aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-11-27 06:29:45 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-11-27 06:33:15 -0800
commit191825e63f341a4e7777b85254f616e541000d5c (patch)
tree55e7a384e6dcea2e154a5419b5dc05326fb20c8b /tensorflow/contrib
parenta264269f523467ac018708a647eab02c1f1010fe (diff)
Delete trailing whitespace
PiperOrigin-RevId: 177008504
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/android/cmake/README.md2
-rw-r--r--tensorflow/contrib/android/java/org/tensorflow/contrib/android/TensorFlowInferenceInterface.java6
-rw-r--r--tensorflow/contrib/cloud/kernels/bigquery_table_accessor_test.cc2
-rw-r--r--tensorflow/contrib/cmake/tf_grappler.cmake2
-rw-r--r--tensorflow/contrib/cmake/tf_shared_lib.cmake2
-rw-r--r--tensorflow/contrib/cmake/tf_stream_executor.cmake6
-rwxr-xr-xtensorflow/contrib/image/ops/single_image_random_dot_stereograms_ops.cc6
-rw-r--r--tensorflow/contrib/lite/g3doc/apis.md2
-rw-r--r--tensorflow/contrib/lite/java/demo/app/src/main/res/values/base-strings.xml8
-rw-r--r--tensorflow/contrib/makefile/README.md32
-rwxr-xr-xtensorflow/contrib/makefile/compile_ios_protobuf.sh2
-rwxr-xr-xtensorflow/contrib/makefile/compile_nsync.sh2
-rwxr-xr-xtensorflow/contrib/makefile/rename_protobuf.sh4
-rw-r--r--tensorflow/contrib/metrics/README.md2
-rw-r--r--tensorflow/contrib/mpi/README.md10
-rw-r--r--tensorflow/contrib/pi_examples/README.md2
-rw-r--r--tensorflow/contrib/pi_examples/camera/Makefile2
-rw-r--r--tensorflow/contrib/pi_examples/label_image/Makefile2
-rw-r--r--tensorflow/contrib/pi_examples/label_image/label_image.cc14
-rw-r--r--tensorflow/contrib/quantize/README.md2
-rw-r--r--tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc2
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/state_space_models/g3doc/periodic_multires_derivation.md2
-rw-r--r--tensorflow/contrib/tpu/ops/outfeed_ops.cc2
-rw-r--r--tensorflow/contrib/verbs/README.md2
24 files changed, 59 insertions, 59 deletions
diff --git a/tensorflow/contrib/android/cmake/README.md b/tensorflow/contrib/android/cmake/README.md
index 6f19b657fe..934b58c724 100644
--- a/tensorflow/contrib/android/cmake/README.md
+++ b/tensorflow/contrib/android/cmake/README.md
@@ -14,7 +14,7 @@ Add TensorFlow-Android-Inference as a dependency of your Android application
```
include ':TensorFlow-Android-Inference'
-findProject(":TensorFlow-Android-Inference").projectDir =
+findProject(":TensorFlow-Android-Inference").projectDir =
new File("${/path/to/tensorflow_repo}/contrib/android/cmake")
```
diff --git a/tensorflow/contrib/android/java/org/tensorflow/contrib/android/TensorFlowInferenceInterface.java b/tensorflow/contrib/android/java/org/tensorflow/contrib/android/TensorFlowInferenceInterface.java
index 1f423a7a5b..dc5b9fb887 100644
--- a/tensorflow/contrib/android/java/org/tensorflow/contrib/android/TensorFlowInferenceInterface.java
+++ b/tensorflow/contrib/android/java/org/tensorflow/contrib/android/TensorFlowInferenceInterface.java
@@ -160,7 +160,7 @@ public class TensorFlowInferenceInterface {
throw new RuntimeException("Failed to load model from the input stream", e);
}
}
-
+
/*
* Construct a TensorFlowInferenceInterface with provided Graph
*
@@ -168,7 +168,7 @@ public class TensorFlowInferenceInterface {
*/
public TensorFlowInferenceInterface(Graph g) {
prepareNativeRuntime();
-
+
// modelName is redundant here, here is for
// avoiding error in initialization as modelName is marked final.
this.modelName = "";
@@ -290,7 +290,7 @@ public class TensorFlowInferenceInterface {
*/
public void feed(String inputName, boolean[] src, long... dims) {
byte[] b = new byte[src.length];
-
+
for (int i = 0; i < src.length; i++) {
b[i] = src[i] ? (byte) 1 : (byte) 0;
}
diff --git a/tensorflow/contrib/cloud/kernels/bigquery_table_accessor_test.cc b/tensorflow/contrib/cloud/kernels/bigquery_table_accessor_test.cc
index b31b882fa1..e9b79a066d 100644
--- a/tensorflow/contrib/cloud/kernels/bigquery_table_accessor_test.cc
+++ b/tensorflow/contrib/cloud/kernels/bigquery_table_accessor_test.cc
@@ -421,7 +421,7 @@ TEST_F(BigQueryTableAccessorTest, MultiplePagesTest) {
TF_EXPECT_OK(accessor_->ReadRow(&row_id, &example));
EXPECT_EQ(3, row_id);
EXPECT_TRUE(accessor_->Done());
-
+
Example expected_example;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(kTestExampleProtoWithNulls,
&expected_example));
diff --git a/tensorflow/contrib/cmake/tf_grappler.cmake b/tensorflow/contrib/cmake/tf_grappler.cmake
index a7841c98e8..410490531a 100644
--- a/tensorflow/contrib/cmake/tf_grappler.cmake
+++ b/tensorflow/contrib/cmake/tf_grappler.cmake
@@ -23,7 +23,7 @@ file(GLOB tf_grappler_srcs
"${tensorflow_source_dir}/tensorflow/python/grappler/model_analyzer.cc"
"${tensorflow_source_dir}/tensorflow/python/grappler/model_analyzer.h"
)
-
+
add_library(tf_grappler OBJECT ${tf_grappler_srcs})
add_dependencies(tf_grappler tf_core_cpu) \ No newline at end of file
diff --git a/tensorflow/contrib/cmake/tf_shared_lib.cmake b/tensorflow/contrib/cmake/tf_shared_lib.cmake
index 3e3fe0cdfa..dcedabb333 100644
--- a/tensorflow/contrib/cmake/tf_shared_lib.cmake
+++ b/tensorflow/contrib/cmake/tf_shared_lib.cmake
@@ -45,7 +45,7 @@ if(WIN32)
$<TARGET_FILE:tensorflow_static>
$<TARGET_FILE:tf_protos_cc>
)
-
+
set(tensorflow_deffile "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/tensorflow.def")
set_source_files_properties(${tensorflow_deffile} PROPERTIES GENERATED TRUE)
diff --git a/tensorflow/contrib/cmake/tf_stream_executor.cmake b/tensorflow/contrib/cmake/tf_stream_executor.cmake
index 8d95f0d3e8..91ca33f4c4 100644
--- a/tensorflow/contrib/cmake/tf_stream_executor.cmake
+++ b/tensorflow/contrib/cmake/tf_stream_executor.cmake
@@ -61,18 +61,18 @@ file(GLOB tf_stream_executor_srcs
"${tensorflow_source_dir}/tensorflow/stream_executor/platform/default/*.h"
)
-if (tensorflow_ENABLE_GPU)
+if (tensorflow_ENABLE_GPU)
file(GLOB tf_stream_executor_gpu_srcs
"${tensorflow_source_dir}/tensorflow/stream_executor/cuda/*.cc"
)
list(APPEND tf_stream_executor_srcs ${tf_stream_executor_gpu_srcs})
-endif()
+endif()
#file(GLOB_RECURSE tf_stream_executor_test_srcs
# "${tensorflow_source_dir}/tensorflow/stream_executor/*_test.cc"
# "${tensorflow_source_dir}/tensorflow/stream_executor/*_test.h"
#)
-#list(REMOVE_ITEM tf_stream_executor_srcs ${tf_stream_executor_test_srcs})
+#list(REMOVE_ITEM tf_stream_executor_srcs ${tf_stream_executor_test_srcs})
if (NOT WIN32)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lgomp")
diff --git a/tensorflow/contrib/image/ops/single_image_random_dot_stereograms_ops.cc b/tensorflow/contrib/image/ops/single_image_random_dot_stereograms_ops.cc
index 2b67992138..f8b56ab1c5 100755
--- a/tensorflow/contrib/image/ops/single_image_random_dot_stereograms_ops.cc
+++ b/tensorflow/contrib/image/ops/single_image_random_dot_stereograms_ops.cc
@@ -40,7 +40,7 @@ REGISTER_OP("SingleImageRandomDotStereograms")
.Doc(R"doc(
Outputs a single image random dot stereogram for export via encode_PNG/JPG OP.
-Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
+Given the 2-D tensor 'depth_values' with encoded Z values, this operation will
encode 3-D data into a 2-D image. The output of this Op is suitable for the
encode_PNG/JPG ops. Be careful with image compression as this may corrupt the
encode 3-D data witin the image.
@@ -68,14 +68,14 @@ with open('picture_out.png', 'wb') as f:
f.write(png)
```
-depth_values: Z values of data to encode into 'output_data_window' window,
+depth_values: Z values of data to encode into 'output_data_window' window,
lower values are further away {0.0 floor(far), 1.0 ceiling(near) after normalization}, must be 2-D tensor
hidden_surface_removal: Activate hidden surface removal
convergence_dots_size: Black dot size in pixels to help view converge image, drawn on bottom of image
dots_per_inch: Output device in dots/inch
eye_separation: Separation between eyes in inches
mu: Depth of field, Fraction of viewing distance (eg. 1/3 = .3333)
-normalize: Normalize input data to [0.0, 1.0]
+normalize: Normalize input data to [0.0, 1.0]
normalize_max: Fix MAX value for Normalization - if < MIN, autoscale
normalize_min: Fix MIN value for Normalization - if > MAX, autoscale
border_level: Value of border depth 0.0 {far} to 1.0 {near}
diff --git a/tensorflow/contrib/lite/g3doc/apis.md b/tensorflow/contrib/lite/g3doc/apis.md
index 311fc69696..e8f5566f11 100644
--- a/tensorflow/contrib/lite/g3doc/apis.md
+++ b/tensorflow/contrib/lite/g3doc/apis.md
@@ -52,7 +52,7 @@ typedef enum {
Failures can be easily verified with:
```c++
if (status != kTfLiteOk) {
- // ... error handling here ...
+ // ... error handling here ...
}
```
diff --git a/tensorflow/contrib/lite/java/demo/app/src/main/res/values/base-strings.xml b/tensorflow/contrib/lite/java/demo/app/src/main/res/values/base-strings.xml
index ab7d3fd496..0a71dbd0e8 100644
--- a/tensorflow/contrib/lite/java/demo/app/src/main/res/values/base-strings.xml
+++ b/tensorflow/contrib/lite/java/demo/app/src/main/res/values/base-strings.xml
@@ -19,12 +19,12 @@
<string name="app_name">TfLiteCameraDemo</string>
<string name="intro_message">
<![CDATA[
-
-
+
+
This sample demonstrates the basic use of TfLite API. Check the source code to see how
you can use TfLite for efficient, on-device inference with trained TensorFlow models.
-
-
+
+
]]>
</string>
</resources>
diff --git a/tensorflow/contrib/makefile/README.md b/tensorflow/contrib/makefile/README.md
index 65bd60c12a..9345303ff1 100644
--- a/tensorflow/contrib/makefile/README.md
+++ b/tensorflow/contrib/makefile/README.md
@@ -16,17 +16,17 @@ This static library will not contain:
- Python or other language bindings
- GPU support
-
+
You can target:
- iOS
- OS X (macOS)
- Android
- Raspberry-PI
-
+
You will compile tensorflow and protobuf libraries that you can link into other
applications. You will also compile the [benchmark](../../tools/benchmark/)
application that will let you check your application.
-
+
## Before you start (all platforms)
First, clone this TensorFlow repository.
@@ -58,9 +58,9 @@ You should then be able to run the `build_all_linux.sh` script to compile:
tensorflow/contrib/makefile/build_all_linux.sh
```
-This should compile a static library in
-`tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a`,
-and create an example executable at `tensorflow/contrib/makefile/gen/bin/benchmark`.
+This should compile a static library in
+`tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a`,
+and create an example executable at `tensorflow/contrib/makefile/gen/bin/benchmark`.
Get the graph file, if you have not already:
@@ -201,7 +201,7 @@ library in a simple app.
### Building by hand
This section covers each step of building. For all the code in one place, see
-[build_all_ios.sh](build_all_ios.sh).
+[build_all_ios.sh](build_all_ios.sh).
If you have not already, you will need to download dependencies:
@@ -232,7 +232,7 @@ make -f tensorflow/contrib/makefile/Makefile \
This creates a library in
`tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a` that you can link any
-xcode project against.
+xcode project against.
To see TensorFlow running on iOS, the example Xcode project in
[tensorflow/examples/ios](../../examples/ios/) shows how to use the static
@@ -258,15 +258,15 @@ tensorflow/contrib/makefile/compile_ios_tensorflow.sh -f "-O3" -h tensorflow/con
In XCode, you will need to use -force_load in the linker flags
section of the build settings to pull in the global constructors that are used
-to register ops and kernels.
+to register ops and kernels.
#### Optimization
-
+
The `compile_ios_tensorflow.sh` script can take optional command-line arguments.
The first argument will be passed as a C++ optimization flag and defaults to
debug mode. If you are concerned about performance or are working on a release
build, you would likely want a higher optimization setting, like so:
-
+
```bash
compile_ios_tensorflow.sh -f "-Os"
```
@@ -330,7 +330,7 @@ what you need for your desired system.
## Dependency Management
The Makefile loads in a list of dependencies stored in text files. These files
-are generated from the main Bazel build by running
+are generated from the main Bazel build by running
`tensorflow/contrib/makefile/gen_file_lists.sh`. You'll need to re-run this i
you make changes to the files that are included in the build.
@@ -361,10 +361,10 @@ codebase can sometimes break the makefile build process. If you find that tests
relying on this makefile are failing with a change you're involved in, here are
some trouble-shooting steps:
- - Try to reproduce the issue on your platform. If you're on Linux, running
+ - Try to reproduce the issue on your platform. If you're on Linux, running
`make -f tensorflow/contrib/makefile/Makefile` should be enough to recreate
most issues. For other platforms, see the sections earlier in this document.
-
+
- The most common cause of breakages are files that have been added to the
Bazel build scripts, but that the makefile isn't aware of. Typical symptoms
of this include linker errors mentioning missing symbols or protobuf headers
@@ -377,11 +377,11 @@ some trouble-shooting steps:
`tensorflow/core/BUILD`, so if you change the wildcards there to include new
files you'll need to also update `CORE_CC_ALL_SRCS` and `CORE_CC_EXCLUDE_SRCS`
in the makefile.
-
+
- Some of the supported platforms use clang instead of gcc as their compiler,
so if you're hitting compile errors you may need to tweak your code to be more
friendly to different compilers by avoiding gcc extensions or idioms.
-
+
These are the most common reasons for makefile breakages, but it's also
possible you may hit something unusual, like a platform incompatibility. For
those, you'll need to see if you can reproduce the issue on that particular
diff --git a/tensorflow/contrib/makefile/compile_ios_protobuf.sh b/tensorflow/contrib/makefile/compile_ios_protobuf.sh
index 43e5809dd2..8fa2021363 100755
--- a/tensorflow/contrib/makefile/compile_ios_protobuf.sh
+++ b/tensorflow/contrib/makefile/compile_ios_protobuf.sh
@@ -270,7 +270,7 @@ case "$1" in
echo "Unknown ARCH"
exit 1
;;
-esac
+esac
}
for build_element in "${build_targets[@]}"
diff --git a/tensorflow/contrib/makefile/compile_nsync.sh b/tensorflow/contrib/makefile/compile_nsync.sh
index 930e6b8dea..7927997678 100755
--- a/tensorflow/contrib/makefile/compile_nsync.sh
+++ b/tensorflow/contrib/makefile/compile_nsync.sh
@@ -28,7 +28,7 @@ usage="usage: $prog [-t linux|ios|android|macos|native]
[-a architecture] [-v android_api_version]
A script to build nsync for tensorflow.
-This script can be run on Linux or MacOS host platforms, and can target
+This script can be run on Linux or MacOS host platforms, and can target
Linux, MacOS, iOS, or Android.
Options:
diff --git a/tensorflow/contrib/makefile/rename_protobuf.sh b/tensorflow/contrib/makefile/rename_protobuf.sh
index b3bff2d503..8d52c1a169 100755
--- a/tensorflow/contrib/makefile/rename_protobuf.sh
+++ b/tensorflow/contrib/makefile/rename_protobuf.sh
@@ -38,7 +38,7 @@
#
# Note that this script modifies the source code in-place, so once it's been run
# it's no longer suitable for further manual modifications, since the difference
-# with the top of tree will already be large.
+# with the top of tree will already be large.
mv tensorflow/contrib/makefile/downloads/protobuf/src/google/protobuf \
tensorflow/contrib/makefile/downloads/protobuf//src/google/protobuf3
@@ -71,7 +71,7 @@ sed -i '' 's%::google::protobuf;%google::protobuf3;%' \
# Fix up a couple of special build scripts that look for particular files.
sed -i '' 's%src/google/protobuf/message.cc%src/google/protobuf3/message.cc%' \
- tensorflow/contrib/makefile/downloads/protobuf/configure.ac
+ tensorflow/contrib/makefile/downloads/protobuf/configure.ac
sed -i '' 's%src/google/protobuf/stubs/common.h%src/google/protobuf3/stubs/common.h%' \
tensorflow/contrib/makefile/downloads/protobuf/autogen.sh
diff --git a/tensorflow/contrib/metrics/README.md b/tensorflow/contrib/metrics/README.md
index 247ebac5bb..e0f2d74fa3 100644
--- a/tensorflow/contrib/metrics/README.md
+++ b/tensorflow/contrib/metrics/README.md
@@ -4,7 +4,7 @@
Metrics are used in evaluation to assess the quality of a model. Most are
"streaming" ops, meaning they create variables to accumulate a running total,
-and return an update tensor to update these variables, and a value tensor to
+and return an update tensor to update these variables, and a value tensor to
read the accumulated value. Example:
value, update_op = metrics.streaming_mean_squared_error(
diff --git a/tensorflow/contrib/mpi/README.md b/tensorflow/contrib/mpi/README.md
index b0d03d05a2..75cb823048 100644
--- a/tensorflow/contrib/mpi/README.md
+++ b/tensorflow/contrib/mpi/README.md
@@ -23,7 +23,7 @@ The following environment variables can be set to modify the behavior at runtime
**MPI_DISABLED=[0,1]**
-This environment variable allows you to disable the MPI path before launch (e.g. for performance or correctness testing).
+This environment variable allows you to disable the MPI path before launch (e.g. for performance or correctness testing).
**MPI_OPTIMAL_PATH=[0,1]**
@@ -34,10 +34,10 @@ This path is disabled by default as it requires that the MPI library can directl
## Known problems
-For certain complex neural nets the implementation sometimes crashes inside the MPI libraries. This seems to be related to memory allocations/routines that register the memory for the Infiniband transfers. (The crashes do not happen when all MPI processes are within the same physical machine).
+For certain complex neural nets the implementation sometimes crashes inside the MPI libraries. This seems to be related to memory allocations/routines that register the memory for the Infiniband transfers. (The crashes do not happen when all MPI processes are within the same physical machine).
**MVAPICH**
-- The problem manifests itself with a segmentation fault inside a memory copy routine and during startup you will get the following warning: "WARNING: Error in initializing MVAPICH2 ptmalloc library. Continuing without InfiniBand registration cache support."
+- The problem manifests itself with a segmentation fault inside a memory copy routine and during startup you will get the following warning: "WARNING: Error in initializing MVAPICH2 ptmalloc library. Continuing without InfiniBand registration cache support."
**OpenMPI**
- With OpenMPI corrupt data will be received resulting in an assertion or the MPI library will print an error and exit. The error is "Attempt to free memory that is still in use by an ongoing MPI communication. MPI job will now abort."
@@ -58,11 +58,11 @@ Once a request has arrived from a remote process the request is forwarded to the
* Receive tensor request
The MPI thread will check if there are any incoming tensor request messages on the communication lines using MPI_Iprobe. Once a request has been received it will be passed on to the standard TensorFlow code and eventually will be placed on the sendQueue.
-* Receive tensor
+* Receive tensor
At some point after a request has been sent the remote process will transmit the tensor. This tensor will be received and we look-up the callback that is associated with this tensor in our request table and execute the callback on the received data.
-In the implementation all send operations are non-blocking, all probe operations are non-blocking and all receive-operations are blocking. The receive-operations are only executed after the probe has determined that there is something to receive.
+In the implementation all send operations are non-blocking, all probe operations are non-blocking and all receive-operations are blocking. The receive-operations are only executed after the probe has determined that there is something to receive.
The MPI processes identify each other using an MPI process ID. The TensorFlow gRPC processes identify each other using a name. During launch we create a mapping between the TensorFlow process name and the MPI process ID to allow the processes to communicate with the correct destinations when using MPI operations.
diff --git a/tensorflow/contrib/pi_examples/README.md b/tensorflow/contrib/pi_examples/README.md
index f550228083..177357bca6 100644
--- a/tensorflow/contrib/pi_examples/README.md
+++ b/tensorflow/contrib/pi_examples/README.md
@@ -13,7 +13,7 @@ sudo apt-get install -y libjpeg-dev
```
- To download the example model you'll need, run these commands:
-
+
```bash
curl https://storage.googleapis.com/download.tensorflow.org/models/inception_dec_2015_stripped.zip \
-o /tmp/inception_dec_2015_stripped.zip
diff --git a/tensorflow/contrib/pi_examples/camera/Makefile b/tensorflow/contrib/pi_examples/camera/Makefile
index 578f1336f3..b354c03b6e 100644
--- a/tensorflow/contrib/pi_examples/camera/Makefile
+++ b/tensorflow/contrib/pi_examples/camera/Makefile
@@ -76,7 +76,7 @@ $(EXECUTABLE_NAME): $(EXECUTABLE_OBJS) $(TFLIBS)
$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
# Matches on C++ source files.
-$(OBJDIR)%.o: %.cc
+$(OBJDIR)%.o: %.cc
@mkdir -p $(dir $@)
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
diff --git a/tensorflow/contrib/pi_examples/label_image/Makefile b/tensorflow/contrib/pi_examples/label_image/Makefile
index 19652e581d..9d054a3133 100644
--- a/tensorflow/contrib/pi_examples/label_image/Makefile
+++ b/tensorflow/contrib/pi_examples/label_image/Makefile
@@ -75,7 +75,7 @@ $(EXECUTABLE_NAME): $(EXECUTABLE_OBJS) $(TFLIBS)
$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
# Matches on C++ source files.
-$(OBJDIR)%.o: %.cc
+$(OBJDIR)%.o: %.cc
@mkdir -p $(dir $@)
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
diff --git a/tensorflow/contrib/pi_examples/label_image/label_image.cc b/tensorflow/contrib/pi_examples/label_image/label_image.cc
index 7817cd0c64..0b18045789 100644
--- a/tensorflow/contrib/pi_examples/label_image/label_image.cc
+++ b/tensorflow/contrib/pi_examples/label_image/label_image.cc
@@ -89,7 +89,7 @@ Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
FILE * infile;
JSAMPARRAY buffer;
int row_stride;
-
+
if ((infile = fopen(file_name.c_str(), "rb")) == NULL) {
LOG(ERROR) << "Can't open " << file_name;
return tensorflow::errors::NotFound("JPEG file ", file_name,
@@ -105,7 +105,7 @@ Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
fclose(infile);
return tensorflow::errors::Unknown("JPEG decoding failed");
}
-
+
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
@@ -119,14 +119,14 @@ Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
- tensorflow::uint8* row_address = &((*data)[cinfo.output_scanline * row_stride]);
+ tensorflow::uint8* row_address = &((*data)[cinfo.output_scanline * row_stride]);
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(row_address, buffer[0], row_stride);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
- fclose(infile);
+ fclose(infile);
return Status::OK();
}
@@ -167,7 +167,7 @@ Status ReadTensorFromImageFile(string file_name, const int wanted_height,
const int top_y_index = static_cast<int>(floorf(in_y));
const int bottom_y_index =
std::min(static_cast<int>(ceilf(in_y)), (image_height - 1));
- const float y_lerp = in_y - top_y_index;
+ const float y_lerp = in_y - top_y_index;
tensorflow::uint8* in_top_row = in + (top_y_index * image_rowlen);
tensorflow::uint8* in_bottom_row = in + (bottom_y_index * image_rowlen);
float *out_row = out + (y * wanted_width * wanted_channels);
@@ -186,7 +186,7 @@ Status ReadTensorFromImageFile(string file_name, const int wanted_height,
in_bottom_row + (right_x_index * wanted_channels);
const float x_lerp = in_x - left_x_index;
float *out_pixel = out_row + (x * wanted_channels);
- for (int c = 0; c < wanted_channels; ++c) {
+ for (int c = 0; c < wanted_channels; ++c) {
const float top_left((in_top_left_pixel[c] - input_mean) / input_std);
const float top_right((in_top_right_pixel[c] - input_mean) / input_std);
const float bottom_left((in_bottom_left_pixel[c] - input_mean) / input_std);
@@ -198,7 +198,7 @@ Status ReadTensorFromImageFile(string file_name, const int wanted_height,
}
}
}
-
+
out_tensors->push_back(image_tensor);
return Status::OK();
}
diff --git a/tensorflow/contrib/quantize/README.md b/tensorflow/contrib/quantize/README.md
index 782232e85f..40541729da 100644
--- a/tensorflow/contrib/quantize/README.md
+++ b/tensorflow/contrib/quantize/README.md
@@ -13,7 +13,7 @@ through estimator [2]. Note that during back propagation, the parameters are
updated at high precision as this is needed to ensure sufficient precision in
accumulating tiny adjustments to the parameters. However, for the forward pass,
the parameters and activations are quantized to the desired lower precision.
-
+
![drawing](g3doc/drawings/Fake_Quantization.jpg)
###Forward pass
diff --git a/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc b/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc
index 09b83e2af1..66aa293dc1 100644
--- a/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc
+++ b/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc
@@ -70,7 +70,7 @@ REGISTER_OP("StochasticHardRoutingFunction")
return Status::OK();
})
.Doc(R"doc(
- Samples a path for each instance in `input_data` and returns the
+ Samples a path for each instance in `input_data` and returns the
probability of the path and the path taken.
tree_depth: The depth of the decision tree.
diff --git a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/g3doc/periodic_multires_derivation.md b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/g3doc/periodic_multires_derivation.md
index b174bb6af3..872474aee1 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/g3doc/periodic_multires_derivation.md
+++ b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/g3doc/periodic_multires_derivation.md
@@ -66,7 +66,7 @@ def make_eigval_mat_fn(to_power=1):
if i == j:
number = j // 2 + 1
powersign = ((j + 1) % 2) * 2 - 1
- return root_of_unity(matsize + 1, number=number,
+ return root_of_unity(matsize + 1, number=number,
to_power=powersign*to_power)
else:
return 0
diff --git a/tensorflow/contrib/tpu/ops/outfeed_ops.cc b/tensorflow/contrib/tpu/ops/outfeed_ops.cc
index ed5756cc54..5900c61a38 100644
--- a/tensorflow/contrib/tpu/ops/outfeed_ops.cc
+++ b/tensorflow/contrib/tpu/ops/outfeed_ops.cc
@@ -39,7 +39,7 @@ REGISTER_OP("OutfeedEnqueueTuple")
.Doc(R"doc(
An op which emits multiple Tensor values from an XLA computation.
-inputs: A list of tensors that will be inserted into the outfeed queue as an
+inputs: A list of tensors that will be inserted into the outfeed queue as an
XLA tuple.
)doc");
diff --git a/tensorflow/contrib/verbs/README.md b/tensorflow/contrib/verbs/README.md
index dcb390b0a5..7c1c8ea459 100644
--- a/tensorflow/contrib/verbs/README.md
+++ b/tensorflow/contrib/verbs/README.md
@@ -38,7 +38,7 @@ The following improvements can be made in the future. First, conversion to Tenso
* **RDMA channel:** Responsible for RDMA connection to a particular node. It manages multiple buffers. A channel has a callback table which stores all the callbacks for the requested tensors.
* **RDMA buffer:** Responsible for sending or receiving data. It has a fixed size memory to store the data. It has a queue to store the pending jobs. There are three types of buffers, message buffer, ACK buffer and tensor buffer. A channel has two message buffers, two ack buffers and many tensor buffers.
* **RDMA manager:** Manages the adapter and channels, including channel creation, channel setup via GRPC service, channel lookup, etc.
-* **RDMA rendezvous manager:** manages multiple rdma rendezvous.
+* **RDMA rendezvous manager:** manages multiple rdma rendezvous.
* **RDMA rendezvous:** a derived class of BaseRemoteRendezvous. This class is the back end for "send" and "recv" ops. When the sendrecv_op wants to send or receive a tensor, it calls the rendezvous' "send" and "recv" functions respectively. Rendezvous are identified by "step_id", a random number, so that tensors for different iterations don't get mixed up.
### The SEND operation