aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--README.md8
-rw-r--r--RELEASE.md110
-rwxr-xr-xconfigure36
-rw-r--r--tensorflow/c/c_api.cc2
-rw-r--r--tensorflow/cc/framework/grad_op_registry.h2
-rw-r--r--tensorflow/cc/framework/gradient_checker.cc4
-rw-r--r--tensorflow/cc/framework/ops.h4
-rw-r--r--tensorflow/contrib/cmake/CMakeLists.txt79
-rw-r--r--tensorflow/contrib/cmake/README.md395
-rw-r--r--tensorflow/contrib/cmake/external/farmhash.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/gif.cmake64
-rw-r--r--tensorflow/contrib/cmake/external/grpc.cmake6
-rw-r--r--tensorflow/contrib/cmake/external/highwayhash.cmake50
-rw-r--r--tensorflow/contrib/cmake/external/protobuf.cmake18
-rw-r--r--tensorflow/contrib/cmake/install.cmake1
-rw-r--r--tensorflow/contrib/cmake/patches/gif/CMakeLists.txt33
-rw-r--r--tensorflow/contrib/cmake/patches/gif/unistd.h0
-rw-r--r--tensorflow/contrib/cmake/patches/highwayhash/CMakeLists.txt5
-rw-r--r--tensorflow/contrib/cmake/setup.py9
-rw-r--r--tensorflow/contrib/cmake/tests.cmake1
-rw-r--r--tensorflow/contrib/cmake/tf_cc_ops.cmake156
-rw-r--r--tensorflow/contrib/cmake/tf_core_cpu.cmake28
-rw-r--r--tensorflow/contrib/cmake/tf_core_direct_session.cmake24
-rw-r--r--tensorflow/contrib/cmake/tf_core_distributed_runtime.cmake53
-rw-r--r--tensorflow/contrib/cmake/tf_core_framework.cmake114
-rw-r--r--tensorflow/contrib/cmake/tf_core_kernels.cmake132
-rw-r--r--tensorflow/contrib/cmake/tf_core_ops.cmake112
-rw-r--r--tensorflow/contrib/cmake/tf_models.cmake77
-rw-r--r--tensorflow/contrib/cmake/tf_python.cmake95
-rw-r--r--tensorflow/contrib/cmake/tf_stream_executor.cmake15
-rw-r--r--tensorflow/contrib/cmake/tf_tools.cmake32
-rw-r--r--tensorflow/contrib/cmake/tf_tutorials.cmake41
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column_ops_test.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/linear_test.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/tests/base_test.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/tests/stability_test.py2
-rw-r--r--tensorflow/contrib/makefile/Makefile1
-rwxr-xr-xtensorflow/contrib/makefile/download_dependencies.sh2
-rw-r--r--tensorflow/contrib/quantization/tools/graph_to_dot.py2
-rw-r--r--tensorflow/contrib/quantization/tools/quantize_graph.py2
-rw-r--r--tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py137
-rw-r--r--tensorflow/contrib/session_bundle/README.md8
-rw-r--r--tensorflow/contrib/tfprof/README.md2
-rw-r--r--tensorflow/core/BUILD1
-rw-r--r--tensorflow/core/framework/numeric_types.h11
-rw-r--r--tensorflow/core/kernels/conv_ops_fused.cc4
-rw-r--r--tensorflow/core/kernels/conv_ops_using_gemm.cc4
-rw-r--r--tensorflow/core/lib/gtl/inlined_vector.h3
-rw-r--r--tensorflow/core/lib/random/random_distributions.h2
-rw-r--r--tensorflow/core/ops/nn_ops.cc6
-rw-r--r--tensorflow/core/platform/cloud/retrying_file_system.cc98
-rw-r--r--tensorflow/core/platform/cloud/retrying_file_system.h7
-rw-r--r--tensorflow/core/platform/cloud/retrying_file_system_test.cc56
-rw-r--r--tensorflow/core/platform/default/logging.h3
-rw-r--r--tensorflow/core/platform/dynamic_annotations.h2
-rw-r--r--tensorflow/core/platform/file_system.h5
-rw-r--r--tensorflow/core/platform/gif.h2
-rw-r--r--tensorflow/core/platform/jpeg.h2
-rw-r--r--tensorflow/core/platform/macros.h11
-rw-r--r--tensorflow/core/platform/mutex.h2
-rw-r--r--tensorflow/core/platform/notification.h2
-rw-r--r--tensorflow/core/platform/platform.h4
-rw-r--r--tensorflow/core/platform/png.h2
-rw-r--r--tensorflow/core/platform/posix/error.cc20
-rw-r--r--tensorflow/core/platform/thread_annotations.h2
-rw-r--r--tensorflow/core/platform/types.h2
-rw-r--r--tensorflow/core/platform/windows/env.cc113
-rw-r--r--tensorflow/core/platform/windows/net.cc131
-rw-r--r--tensorflow/core/platform/windows/port.cc99
-rw-r--r--tensorflow/core/platform/windows/windows_file_system.cc266
-rw-r--r--tensorflow/core/platform/windows/windows_file_system.h71
-rw-r--r--tensorflow/core/public/version.h4
-rw-r--r--tensorflow/examples/udacity/1_notmnist.ipynb2
-rw-r--r--tensorflow/examples/udacity/3_regularization.ipynb2
-rw-r--r--tensorflow/examples/udacity/Dockerfile2
-rw-r--r--tensorflow/g3doc/api_docs/python/contrib.learn.md14
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.nn.sampled_softmax_loss.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.Graph.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.learn.BaseEstimator.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.learn.Estimator.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.nn.fractional_max_pool.md5
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowEstimator.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowRNNRegressor.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.nn.nce_loss.md12
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.parse_example.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.RMSPropOptimizer.md3
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.atrous_conv2d.md9
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.fractional_avg_pool.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.local_response_normalization.md5
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Session.md7
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.resize_images.md9
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.DNNRegressor.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.TensorFlowRNNClassifier.md2
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md64
-rw-r--r--tensorflow/g3doc/how_tos/image_retraining/index.md4
-rw-r--r--tensorflow/g3doc/how_tos/quantization/index.md23
-rw-r--r--tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md19
-rw-r--r--tensorflow/g3doc/how_tos/tool_developers/index.md10
-rw-r--r--tensorflow/g3doc/resources/faq.md10
-rw-r--r--tensorflow/g3doc/resources/xla_prerelease.md31
-rw-r--r--tensorflow/g3doc/tutorials/deep_cnn/index.md34
-rw-r--r--tensorflow/g3doc/tutorials/image_recognition/index.md14
-rw-r--r--tensorflow/g3doc/tutorials/index.md2
-rw-r--r--tensorflow/g3doc/tutorials/input_fn/index.md36
-rw-r--r--tensorflow/g3doc/tutorials/linear/overview.md7
-rw-r--r--tensorflow/g3doc/tutorials/mnist/beginners/index.md5
-rw-r--r--tensorflow/g3doc/tutorials/mnist/pros/index.md9
-rw-r--r--tensorflow/g3doc/tutorials/monitors/index.md31
-rw-r--r--tensorflow/g3doc/tutorials/recurrent/index.md7
-rw-r--r--tensorflow/g3doc/tutorials/seq2seq/index.md14
-rw-r--r--tensorflow/g3doc/tutorials/tflearn/index.md112
-rw-r--r--tensorflow/g3doc/tutorials/wide/index.md35
-rw-r--r--tensorflow/g3doc/tutorials/wide_and_deep/index.md18
-rw-r--r--tensorflow/models/rnn/translate/data_utils.py2
-rw-r--r--tensorflow/python/framework/common_shapes.py4
-rw-r--r--tensorflow/python/lib/core/py_func.cc6
-rw-r--r--tensorflow/python/ops/nn.py22
-rw-r--r--tensorflow/python/ops/nn_grad.py2
-rw-r--r--tensorflow/python/summary/impl/reservoir.py2
-rw-r--r--tensorflow/tensorboard/README.md10
-rw-r--r--tensorflow/tensorflow.bzl7
-rwxr-xr-xtensorflow/tools/ci_build/builds/configured3
-rw-r--r--tensorflow/tools/dist_test/Dockerfile35
-rw-r--r--tensorflow/tools/dist_test/Dockerfile.local13
-rw-r--r--tensorflow/tools/dist_test/README.md4
-rwxr-xr-xtensorflow/tools/dist_test/local_test.sh48
-rwxr-xr-xtensorflow/tools/dist_test/remote_test.sh37
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile2
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile.test2
-rw-r--r--tensorflow/tools/docker/Dockerfile2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu4
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu2
-rw-r--r--tensorflow/tools/docker/README.md2
-rwxr-xr-xtensorflow/tools/docker/parameterized_docker_build.sh16
-rw-r--r--tensorflow/tools/gcs_test/Dockerfile5
-rw-r--r--tensorflow/tools/gcs_test/python/gcs_smoke.py2
-rwxr-xr-xtensorflow/tools/git/gen_git_source.py5
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--third_party/eigen3/unsupported/Eigen/CXX11/Tensor9
-rw-r--r--third_party/gpus/cuda/BUILD.tpl6
-rw-r--r--third_party/gpus/cuda_configure.bzl15
-rwxr-xr-xutil/python/python_config.sh4
147 files changed, 2048 insertions, 1621 deletions
diff --git a/README.md b/README.md
index 98e5e02a8d..e9b9228492 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/RELEASE.md b/RELEASE.md
index 734aab095f..60a7304623 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,7 +1,42 @@
-# Changes since last release
+# Release 0.11.0
+
+## Major Features and Improvements
+
+* cuDNN 5 support.
+* HDFS Support.
+* Adds Fused LSTM support via cuDNN 5 in `tensorflow/contrib/cudnn_rnn`.
+* Improved support for NumPy style basic slicing including non-1 strides,
+ ellipses, newaxis, and negative indices. For example complicated expressions
+ like `foo[1, 2:4, tf.newaxis, ..., :-3:-1, :]` are now supported. In addition
+ we have preliminary (non-broadcasting) support for sliced assignment to
+ variables. In particular one can write `var[1:3].assign([1,11,111])`.
+* Deprecated `tf.op_scope` and `tf.variable_op_scope` in favor of a unified `tf.name_scope` and `tf.variable_scope`. The new argument order of `tf.variable_scope` is incompatible with previous versions.
+* Introducing `core/util/tensor_bundle` module: a module to efficiently
+ serialize/deserialize tensors to disk. Will be used in TF's new checkpoint
+ format.
+* Added tf.svd for computing the singular value decomposition (SVD) of dense
+ matrices or batches of matrices (CPU only).
+* Added gradients for eigenvalues and eigenvectors computed using
+ `self_adjoint_eig` or `self_adjoint_eigvals`.
+* Eliminated `batch_*` methods for most linear algebra and FFT ops and promoted
+ the non-batch version of the ops to handle batches of matrices.
+* Tracing/timeline support for distributed runtime (no GPU profiler yet).
+* C API gives access to inferred shapes with `TF_GraphGetTensorNumDims` and
+ `TF_GraphGetTensorShape`.
+* Shape functions for core ops have moved to C++ via
+ `REGISTER_OP(...).SetShapeFn(...)`. Python shape inference RegisterShape calls
+ use the C++ shape functions with `common_shapes.call_cpp_shape_fn`. A future
+ release will remove `RegisterShape` from python.
-## Breaking Changes to the API
+## Bug Fixes and Other Changes
+
+* Documentation now includes operator overloads on Tensor and Variable.
+* `tensorflow.__git_version__` now allows users to identify the version of the
+ code that TensorFlow was compiled with. We also have
+ `tensorflow.__git_compiler__` which identifies the compiler used to compile
+ TensorFlow's core.
+* Improved multi-threaded performance of `batch_matmul`.
* LSTMCell, BasicLSTMCell, and MultiRNNCell constructors now default to
`state_is_tuple=True`. For a quick fix while transitioning to the new
default, simply pass the argument `state_is_tuple=False`.
@@ -10,20 +45,45 @@
* Int32 elements of list(type) arguments are no longer placed in host memory by
default. If necessary, a list(type) argument to a kernel can be placed in host
memory using a HostMemory annotation.
-* uniform_unit_scaling_initializer() no longer takes a full_shape arg, instead
- relying on the partition info passed to the initializer function when it's
- called.
-* The NodeDef protocol message is now defined in its own file node_def.proto
- instead of graph.proto.
-* ops.NoGradient was renamed ops.NotDifferentiable. ops.NoGradient will
+* `uniform_unit_scaling_initializer()` no longer takes a `full_shape` arg,
+ instead relying on the partition info passed to the initializer function when
+ it's called.
+* The NodeDef protocol message is now defined in its own file `node_def.proto`
+ `instead of graph.proto`.
+* `ops.NoGradient` was renamed `ops.NotDifferentiable`. `ops.NoGradient` will
be removed soon.
-* dot.h / DotGraph was removed (it was an early analysis tool prior
+* `dot.h` / DotGraph was removed (it was an early analysis tool prior
to TensorBoard, no longer that useful). It remains in history
should someone find the code useful.
* re2 / regexp.h was removed from being a public interface of TF.
Should users need regular expressions, they should depend on the RE2
library directly rather than via TensorFlow.
+## Thanks to our Contributors
+
+This release contains contributions from many people at Google, as well as:
+
+Abid K, @afshinrahimi, @AidanGG, Ajay Rao, Aki Sukegawa, Alex Rothberg,
+Alexander Rosenberg Johansen, Andrew Gibiansky, Andrew Thomas, @Appleholic,
+Bastiaan Quast, Ben Dilday, Bofu Chen, Brandon Amos, Bryon Gloden, Cissp®,
+@chanis, Chenyang Liu, Corey Wharton, Daeyun Shin, Daniel Julius Lasiman, Daniel
+Waterworth, Danijar Hafner, Darren Garvey, Denis Gorbachev, @DjangoPeng,
+Egor-Krivov, Elia Palme, Eric Platon, Fabrizio Milo, Gaetan Semet,
+Georg Nebehay, Gu Wang, Gustav Larsson, @haosdent, Harold Cooper, Hw-Zz,
+@ichuang, Igor Babuschkin, Igor Macedo Quintanilha, Ilya Edrenkin, @ironhead,
+Jakub Kolodziejczyk, Jennifer Guo, Jihun Choi, Jonas Rauber, Josh Bleecher
+Snyder, @jpangburn, Jules Gagnon-Marchand, Karen Brems, @kborer, Kirill Bobyrev,
+Laurent Mazare, Longqi Yang, Malith Yapa, Maniteja Nandana, Martin Englund,
+Matthias Winkelmann, @mecab, Mu-Ik Jeon, Nand Dalal, Niels Ole Salscheider,
+Nikhil Mishra, Park Jiin, Pieter De Rijk, @raix852, Ritwik Gupta, Sahil Sharma,
+@Sangheum, @SergejsRk, Shinichiro Hamaji, Simon Denel, @Steve, @suiyuan2009,
+Tiago Jorge, Tijmen Tieleman, @tvn, @tyfkda, Wang Yang, Wei-Ting Kuo, Wenjian
+Huang, Yan Chen, @YenChenLin, Yuan (Terry) Tang, Yuncheng Li, Yunfeng Wang, Zack
+Polizzi, @zhongzyd, Ziming Dong, @perhapszzy
+
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
+
# Release 0.10.0
## Major Features and Improvements
@@ -36,7 +96,7 @@
* Full version of TF-Slim available as `tf.contrib.slim`
* Added k-Means clustering and WALS matrix factorization
-## Big Fixes and Other Changes
+## Bug Fixes and Other Changes
* Allow gradient computation for scalar values.
* Performance improvements for gRPC
@@ -58,8 +118,8 @@ This release contains contributions from many people at Google, as well as:
Alex Rothberg, Andrew Royer, Austin Marshall, @BlackCoal, Bob Adolf, Brian Diesel, Charles-Emmanuel Dias, @chemelnucfin, Chris Lesniewski, Daeyun Shin, Daniel Rodriguez, Danijar Hafner, Darcy Liu, Kristinn R. Thórisson, Daniel Castro, Dmitry Savintsev, Kashif Rasul, Dylan Paiton, Emmanuel T. Odeke, Ernest Grzybowski, Gavin Sherry, Gideon Dresdner, Gregory King, Harold Cooper, @heinzbeinz, Henry Saputra, Huarong Huo, Huazuo Gao, Igor Babuschkin, Igor Macedo Quintanilha, Ivan Ukhov, James Fysh, Jan Wilken Dörrie, Jihun Choi, Johnny Lim, Jonathan Raiman, Justin Francis, @lilac, Li Yi, Marc Khoury, Marco Marchesi, Max Melnick, Micael Carvalho, @mikowals, Mostafa Gazar, Nico Galoppo, Nishant Agrawal, Petr Janda, Yuncheng Li, @raix852, Robert Rose, @Robin-des-Bois, Rohit Girdhar, Sam Abrahams, satok16, Sergey Kishchenko, Sharkd Tu, @shotat, Siddharth Agrawal, Simon Denel, @sono-bfio, SunYeop Lee, Thijs Vogels, @tobegit3hub, @Undo1, Wang Yang, Wenjian Huang, Yaroslav Bulatov, Yuan Tang, Yunfeng Wang, Ziming Dong
-We are also grateful to all who filed issues or helped resolve them, asked and
-answered questions, and were part of inspiring discussions.
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
# Release 0.9.0
@@ -77,7 +137,7 @@ answered questions, and were part of inspiring discussions.
`tf.nn.rnn`, and the classes in `tf.nn.rnn_cell`).
* TensorBoard now has an Audio Dashboard, with associated audio summaries.
-## Big Fixes and Other Changes
+## Bug Fixes and Other Changes
* Turned on CuDNN Autotune.
* Added support for using third-party Python optimization algorithms (contrib.opt).
@@ -93,8 +153,8 @@ answered questions, and were part of inspiring discussions.
* Performance improvements
* Many bugfixes
* Many documentation fixes
-* TensorBoard fixes: graphs with only one data point, Nan values,
- reload button and auto-reload, tooltips in scalar charts, run
+* TensorBoard fixes: graphs with only one data point, Nan values,
+ reload button and auto-reload, tooltips in scalar charts, run
filtering, stable colors
* Tensorboard graph visualizer now supports run metadata. Clicking on nodes
while viewing a stats for a particular run will show runtime statistics, such
@@ -106,8 +166,8 @@ This release contains contributions from many people at Google, as well as:
Aaron Schumacher, Aidan Dang, Akihiko ITOH, Aki Sukegawa, Arbit Chen, Aziz Alto, Danijar Hafner, Erik Erwitt, Fabrizio Milo, Felix Maximilian Möller, Henry Saputra, Sung Kim, Igor Babuschkin, Jan Zikes, Jeremy Barnes, Jesper Steen Møller, Johannes Mayer, Justin Harris, Kashif Rasul, Kevin Robinson, Loo Rong Jie, Lucas Moura, Łukasz Bieniasz-Krzywiec, Mario Cho, Maxim Grechkin, Michael Heilman, Mostafa Rahmani, Mourad Mourafiq, @ninotoshi, Orion Reblitz-Richardson, Yuncheng Li, @raoqiyu, Robert DiPietro, Sam Abrahams, Sebastian Raschka, Siddharth Agrawal, @snakecharmer1024, Stephen Roller, Sung Kim, SunYeop Lee, Thijs Vogels, Till Hoffmann, Victor Melo, Ville Kallioniemi, Waleed Abdulla, Wenjian Huang, Yaroslav Bulatov, Yeison Rodriguez, Yuan Tang, Yuxin Wu, @zhongzyd, Ziming Dong, Zohar Jackson
-We are also grateful to all who filed issues or helped resolve them, asked and
-answered questions, and were part of inspiring discussions.
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
# Release 0.8.0
@@ -124,11 +184,11 @@ answered questions, and were part of inspiring discussions.
* Add an extension mechanism for adding network file system support
* TensorBoard displays metadata stats (running time, memory usage and device used) and tensor shapes
-## Big Fixes and Other Changes
+## Bug Fixes and Other Changes
* Utility for inspecting checkpoints
* Basic tracing and timeline support
-* Allow building against cuDNN 5 (not incl. RNN/LSTM support)
+* Allow building against cuDNN 5 (not incl. RNN/LSTM support)
* Added instructions and binaries for ProtoBuf library with fast serialization and without 64MB limit
* Added special functions
* `bool`-strictness: Tensors have to be explicitly compared to `None`
@@ -148,8 +208,8 @@ This release contains contributions from many people at Google, as well as:
Abhinav Upadhyay, Aggelos Avgerinos, Alan Wu, Alexander G. de G. Matthews, Aleksandr Yahnev, @amchercashin, Andy Kitchen, Aurelien Geron, Awni Hannun, @BanditCat, Bas Veeling, Cameron Chen, @cg31, Cheng-Lung Sung, Christopher Bonnett, Dan Becker, Dan Van Boxel, Daniel Golden, Danijar Hafner, Danny Goodman, Dave Decker, David Dao, David Kretch, Dongjoon Hyun, Dustin Dorroh, @e-lin, Eurico Doirado, Erik Erwitt, Fabrizio Milo, @gaohuazuo, Iblis Lin, Igor Babuschkin, Isaac Hodes, Isaac Turner, Iván Vallés, J Yegerlehner, Jack Zhang, James Wexler, Jan Zikes, Jay Young, Jeff Hodges, @jmtatsch, Johnny Lim, Jonas Meinertz Hansen, Kanit Wongsuphasawat, Kashif Rasul, Ken Shirriff, Kenneth Mitchner, Kenta Yonekura, Konrad Magnusson, Konstantin Lopuhin, @lahwran, @lekaha, @liyongsea, Lucas Adams, @makseq, Mandeep Singh, @manipopopo, Mark Amery, Memo Akten, Michael Heilman, Michael Peteuil, Nathan Daly, Nicolas Fauchereau, @ninotoshi, Olav Nymoen, @panmari, @papelita1234, Pedro Lopes, Pranav Sailesh Mani, RJ Ryan, Rob Culliton, Robert DiPietro, @ronrest, Sam Abrahams, Sarath Shekkizhar, Scott Graham, Sebastian Raschka, Sung Kim, Surya Bhupatiraju, Syed Ahmed, Till Hoffmann, @timsl, @urimend, @vesnica, Vlad Frolov, Vlad Zagorodniy, Wei-Ting Kuo, Wenjian Huang, William Dmitri Breaden Madden, Wladimir Schmidt, Yuan Tang, Yuwen Yan, Yuxin Wu, Yuya Kusakabe, @zhongzyd, @znah.
-We are also grateful to all who filed issues or helped resolve them, asked and
-answered questions, and were part of inspiring discussions.
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
# Release 0.7.1
@@ -175,12 +235,12 @@ answered questions, and were part of inspiring discussions.
* Allow using any installed Cuda >= 7.0 and cuDNN >= R2, and add support
for cuDNN R4
-* Added a `contrib/` directory for unsupported or experimental features,
+* Added a `contrib/` directory for unsupported or experimental features,
including higher level `layers` module
* Added an easy way to add and dynamically load user-defined ops
* Built out a good suite of tests, things should break less!
* Added `MetaGraphDef` which makes it easier to save graphs with metadata
-* Added assignments for "Deep Learning with TensorFlow" udacity course
+* Added assignments for "Deep Learning with TensorFlow" udacity course
## Bug Fixes and Other Changes
@@ -270,8 +330,8 @@ Vlad Zavidovych, Yangqing Jia, Yi-Lin Juang, Yuxin Wu, Zachary Lipton,
Zero Chen, Alan Wu, @brchiu, @emmjaykay, @jalammar, @Mandar-Shinde,
@nsipplswezey, @ninotoshi, @panmari, @prolearner and @rizzomichaelg.
-We are also grateful to all who filed issues or helped resolve them, asked and
-answered questions, and were part of inspiring discussions.
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
# Release 0.6.0
diff --git a/configure b/configure
index eccc204945..8bc271aaf3 100755
--- a/configure
+++ b/configure
@@ -1,5 +1,8 @@
#!/usr/bin/env bash
+set -e
+set -o pipefail
+
# Find out the absolute path to where ./configure resides
pushd `dirname $0` #> /dev/null
SOURCE_BASE_DIR=`pwd -P`
@@ -14,7 +17,7 @@ function bazel_clean_and_fetch() {
while true; do
fromuser=""
if [ -z "$PYTHON_BIN_PATH" ]; then
- default_python_bin_path=$(which python)
+ default_python_bin_path=$(which python || which python3 || true)
read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
fromuser="1"
if [ -z "$PYTHON_BIN_PATH" ]; then
@@ -47,7 +50,6 @@ while [ "$TF_NEED_GCP" == "" ]; do
done
if [ "$TF_NEED_GCP" == "1" ]; then
-
## Verify that libcurl header files are available.
# Only check Linux, since on MacOS the header files are installed with XCode.
if [[ $(uname -a) =~ Linux ]] && [[ ! -f "/usr/include/curl/curl.h" ]]; then
@@ -96,7 +98,7 @@ fi
echo "$SWIG_PATH" > tensorflow/tools/swig/swig_path
# Invoke python_config and set up symlinks to python includes
-(./util/python/python_config.sh --setup "$PYTHON_BIN_PATH";) || exit -1
+./util/python/python_config.sh --setup "$PYTHON_BIN_PATH"
# Run the gen_git_source to create links where bazel can track dependencies for
# git hash propagation
@@ -127,7 +129,7 @@ fi
while true; do
fromuser=""
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
- default_gcc_host_compiler_path=$(which gcc)
+ default_gcc_host_compiler_path=$(which gcc || true)
read -p "Please specify which gcc should be used by nvcc as the host compiler. [Default is $default_gcc_host_compiler_path]: " GCC_HOST_COMPILER_PATH
fromuser="1"
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
@@ -214,18 +216,36 @@ while true; do
if [[ -z "$TF_CUDNN_VERSION" ]]; then
TF_CUDNN_EXT=""
+ cudnn_lib_path=""
+ cudnn_alt_lib_path=""
+ if [ "$OSNAME" == "Linux" ]; then
+ cudnn_lib_path="${CUDNN_INSTALL_PATH}/lib64/libcudnn.so"
+ cudnn_alt_lib_path="${CUDNN_INSTALL_PATH}/libcudnn.so"
+ elif [ "$OSNAME" == "Darwin" ]; then
+ cudnn_lib_path="${CUDNN_INSTALL_PATH}/lib/libcudnn.dylib"
+ cudnn_alt_lib_path="${CUDNN_INSTALL_PATH}/libcudnn.dylib"
+ fi
# Resolve to the SONAME of the symlink. Use readlink without -f
# to resolve exactly once to the SONAME. E.g, libcudnn.so ->
- # libcudnn.so.4
- REALVAL=`readlink ${CUDNN_INSTALL_PATH}/lib64/libcudnn.so`
+ # libcudnn.so.4.
+ # If the path is not a symlink, readlink will exit with an error code, so
+ # in that case, we return the path itself.
+ if [ -f "$cudnn_lib_path" ]; then
+ REALVAL=`readlink ${cudnn_lib_path} || echo "${cudnn_lib_path}"`
+ else
+ REALVAL=`readlink ${cudnn_alt_lib_path} || echo "${cudnn_alt_lib_path}"`
+ fi
# Extract the version of the SONAME, if it was indeed symlinked to
# the SONAME version of the file.
- if [[ "$REALVAL" =~ .so[.]+([0-9]*) ]];
- then
+ if [[ "$REALVAL" =~ .so[.]+([0-9]*) ]]; then
TF_CUDNN_EXT="."${BASH_REMATCH[1]}
TF_CUDNN_VERSION=${BASH_REMATCH[1]}
echo "libcudnn.so resolves to libcudnn${TF_CUDNN_EXT}"
+ elif [[ "$REALVAL" =~ ([0-9]*).dylib ]]; then
+ TF_CUDNN_EXT=${BASH_REMATCH[1]}".dylib"
+ TF_CUDNN_VERSION=${BASH_REMATCH[1]}
+ echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
fi
else
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index 4e2d574f39..781f1984ac 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -1544,7 +1544,7 @@ TF_Operation* TF_GraphOperationByName(TF_Graph* graph, const char* oper_name) {
TF_Operation* TF_GraphNextOperation(TF_Graph* graph, size_t* pos) {
if (*pos == 0) {
- // Advance past the first sentinal nodes in every graph (the source & sink).
+ // Advance past the first sentinel nodes in every graph (the source & sink).
*pos += 2;
} else {
// Advance to the next node.
diff --git a/tensorflow/cc/framework/grad_op_registry.h b/tensorflow/cc/framework/grad_op_registry.h
index 4c07352a48..fd3c94aa69 100644
--- a/tensorflow/cc/framework/grad_op_registry.h
+++ b/tensorflow/cc/framework/grad_op_registry.h
@@ -37,7 +37,7 @@ typedef Status (*GradFunc)(const Scope& scope, const Operation& op,
class GradOpRegistry {
public:
// Registers 'func' as the the gradient function for 'op'.
- // Returns true if registration was succesful, check fails otherwise.
+ // Returns true if registration was successful, check fails otherwise.
bool Register(const string& op, GradFunc func);
// Sets 'func' to the gradient function for 'op' and returns Status OK if
diff --git a/tensorflow/cc/framework/gradient_checker.cc b/tensorflow/cc/framework/gradient_checker.cc
index a85035896c..a729bdd24d 100644
--- a/tensorflow/cc/framework/gradient_checker.cc
+++ b/tensorflow/cc/framework/gradient_checker.cc
@@ -47,7 +47,7 @@ Status ComputeTheoreticalJacobianTranspose(
auto dy_data_flat = dy_data.flat<T>();
dy_data_flat.setZero();
- // Compute the theoretical Jacobian one row at a time by backproping '1.0'
+ // Compute the theoretical Jacobian one row at a time by back propagating '1.0'
// for each element of 'dy', while holding all other elements of 'dy' at zero.
ClientSession session(scope);
std::vector<Tensor> dxout;
@@ -133,7 +133,7 @@ Status ComputeGradientError(const Scope& scope, const ops::Output& x,
TF_RETURN_IF_ERROR(ComputeTheoreticalJacobianTranspose<T>(
scope, x, x_shape, x_data, y, y_shape, &jacobian_t));
- // Inititalize numeric Jacobian to zeros.
+ // Initialize numeric Jacobian to zeros.
Tensor jacobian_n(x.type(), {x_size, y_size});
auto jacobian_n_flat = jacobian_n.flat<T>();
jacobian_n_flat.setZero();
diff --git a/tensorflow/cc/framework/ops.h b/tensorflow/cc/framework/ops.h
index 55ef2d60a3..71bfc6617c 100644
--- a/tensorflow/cc/framework/ops.h
+++ b/tensorflow/cc/framework/ops.h
@@ -95,7 +95,7 @@ class Input {
// constants such as simple primitive constants and nested initializer lists
// representing a multi-dimensional array. Initializer constructors are all
// templates, so the aforementioned kinds of C++ constants can be used to
- // construct an Initializer. Intializer stores the value it got constructed
+ // construct an Initializer. Initializer stores the value it got constructed
// with in a Tensor object.
struct Initializer {
// Construct from a scalar value of an arithmetic type or a type that can be
@@ -156,7 +156,7 @@ class Input {
}
// Construct a multi-dimensional tensor from a nested initializer list. Note
- // that C++ syntax allows nesting of arbitrarily typed intializer lists, so
+ // that C++ syntax allows nesting of arbitrarily typed initializer lists, so
// such invalid initializers cannot be disallowed at compile time. This
// function performs checks to make sure that the nested initializer list is
// indeed a valid multi-dimensional tensor.
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index 4f4497618b..a664312b0c 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -15,10 +15,13 @@ cmake_policy(SET CMP0022 NEW)
# Options
option(tensorflow_VERBOSE "Enable for verbose output" OFF)
-option(tensorflow_BUILD_TESTS "Build tests" ON)
option(tensorflow_ENABLE_SSL_SUPPORT "Enable boringssl support" OFF)
+option(tensorflow_ENABLE_GRPC_SUPPORT "Enable gRPC support" ON)
option(tensorflow_BUILD_CC_EXAMPLE "Build the C++ tutorial example" ON)
option(tensorflow_BUILD_PYTHON_BINDINGS "Build the Python bindings" ON)
+option(tensorflow_BUILD_ALL_KERNELS "Build all OpKernels" ON)
+option(tensorflow_BUILD_CONTRIB_KERNELS "Build OpKernels from tensorflow/contrib/..." ON)
+
#Threads: defines CMAKE_THREAD_LIBS_INIT and adds -pthread compile option for
# targets that link ${CMAKE_THREAD_LIBS_INIT}.
@@ -42,6 +45,15 @@ set (DOWNLOAD_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/downloads"
mark_as_advanced(DOWNLOAD_LOCATION)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+add_definitions(-DEIGEN_AVOID_STL_ARRAY)
+if(WIN32)
+ add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC -D__VERSION__=\"MSVC\")
+ set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} /MP)
+endif()
+
+if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} "-fno-exceptions -std=c++11")
+endif()
# External dependencies
include(gif)
@@ -49,35 +61,76 @@ include(png)
include(jpeg)
include(eigen)
include(jsoncpp)
-if(tensorflow_ENABLE_SSL_SUPPORT)
- include(boringssl)
-endif()
include(farmhash)
include(highwayhash)
include(protobuf)
-include(grpc)
+find_package(ZLIB REQUIRED)
+
+set(tensorflow_EXTERNAL_LIBRARIES
+ ${gif_STATIC_LIBRARIES}
+ ${png_STATIC_LIBRARIES}
+ ${jpeg_STATIC_LIBRARIES}
+ ${jsoncpp_STATIC_LIBRARIES}
+ ${farmhash_STATIC_LIBRARIES}
+ ${highwayhash_STATIC_LIBRARIES}
+ ${protobuf_STATIC_LIBRARIES}
+ ${ZLIB_LIBRARIES}
+)
+set(tensorflow_EXTERNAL_DEPENDENCIES
+ gif_copy_headers_to_destination png_copy_headers_to_destination jpeg_copy_headers_to_destination jsoncpp farmhash_copy_headers_to_destination highwayhash_copy_headers_to_destination protobuf eigen)
+
+include_directories(
+ # Source and generated code.
+ ${tensorflow_source_dir}
+ ${CMAKE_CURRENT_BINARY_DIR}
+ # External dependencies.
+ ${gif_INCLUDE_DIR}
+ ${png_INCLUDE_DIR}
+ ${jpeg_INCLUDE_DIR}
+ ${eigen_INCLUDE_DIRS}
+ ${jsoncpp_INCLUDE_DIR}
+ ${farmhash_INCLUDE_DIR}
+ ${highwayhash_INCLUDE_DIR}
+ ${PROTOBUF_INCLUDE_DIRS}
+ ${ZLIB_INCLUDE_DIRS}
+)
+
+if(tensorflow_ENABLE_SSL_SUPPORT)
+ include(boringssl)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES ${boringssl_STATIC_LIBRARIES})
+ list(APPEND tensorflow_EXTERNAL_DEPENDENCIES boringssl)
+ include_directories(${boringssl_INCLUDE_DIR})
+endif()
+if(tensorflow_ENABLE_GRPC_SUPPORT)
+ include(grpc)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES ${grpc_STATIC_LIBRARIES})
+ list(APPEND tensorflow_EXTERNAL_DEPENDENCIES grpc)
+ include_directories(${GRPC_INCLUDE_DIRS})
+endif()
+if(WIN32)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES wsock32 ws2_32 shlwapi)
+endif()
+if(UNIX)
+ list(APPEND tensorflow_EXTERNAL_LIBRARIES ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS})
+endif()
# Let's get to work!
include(tf_core_framework.cmake)
+include(tf_tools.cmake)
# NOTE: Disabled until issue #3996 is fixed.
# include(tf_stream_executor.cmake)
include(tf_core_cpu.cmake)
include(tf_models.cmake)
include(tf_core_ops.cmake)
include(tf_core_direct_session.cmake)
-include(tf_core_distributed_runtime.cmake)
+if(tensorflow_ENABLE_GRPC_SUPPORT)
+ include(tf_core_distributed_runtime.cmake)
+endif()
include(tf_core_kernels.cmake)
include(tf_cc_ops.cmake)
-include(tf_tools.cmake)
if(tensorflow_BUILD_CC_EXAMPLE)
include(tf_tutorials.cmake)
endif()
if(tensorflow_BUILD_PYTHON_BINDINGS)
include(tf_python.cmake)
endif()
-
-if (tensorflow_BUILD_TESTS)
- include(tests.cmake)
-endif (tensorflow_BUILD_TESTS)
-
-include(install.cmake)
diff --git a/tensorflow/contrib/cmake/README.md b/tensorflow/contrib/cmake/README.md
index 6d4c0eb79d..daf510139d 100644
--- a/tensorflow/contrib/cmake/README.md
+++ b/tensorflow/contrib/cmake/README.md
@@ -1,283 +1,218 @@
-This directory contains *CMake* files that can be used to build TensorFlow
-core library.
+TensorFlow CMake build
+======================
+This directory contains CMake files for building TensorFlow on Microsoft
+Windows. [CMake](https://cmake.org) is a cross-platform tool that can
+generate build scripts for multiple build systems, including Microsoft
+Visual Studio.
+
+**N.B.** We provide Linux build instructions primarily for the purpose of
+testing the build. We recommend using the standard Bazel-based build on
+Linux.
Current Status
--------------
-CMake build is not yet ready for general usage!
-
-We are actively working on CMake support. Please help us improve it.
-Pull requests are welcomed!
-
-
-Linux CMake + Docker (very simple)
-----------------------------------
-
-```bash
-git clone --recursive https://github.com/tensorflow/tensorflow.git
-cd tensorflow
-tensorflow/tools/ci_build/ci_build.sh CPU tensorflow/tools/ci_build/builds/cmake.sh
-```
-
-That's it. Dependencies included. Otherwise read the rest of this readme...
-
-
-Prerequisites
-=============
-
-You need to have [CMake](http://www.cmake.org) and [Git](http://git-scm.com)
-installed on your computer before proceeding.
-
-Most of the instructions will be given to the *Сommand Prompt*, but the same
-actions can be performed using appropriate GUI tools.
-
-
-Environment Setup
-=================
-
-Open the appropriate *Command Prompt* from the *Start* menu.
-
-For example *VS2013 x64 Native Tools Command Prompt*:
-
- C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin\amd64>
-
-Change to your working directory:
-
- C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin\amd64>cd C:\Path\to
- C:\Path\to>
-
-Where *C:\Path\to* is the path to your real working directory.
-
-Create a folder where TensorFlow headers/libraries/binaries will be installed
-after they are built:
-
- C:\Path\to>mkdir install
-
-If *cmake* command is not available from *Command Prompt*, add it to system
-*PATH* variable:
+The CMake files in this directory can build the core TensorFlow runtime, an
+example C++ binary, and a PIP package containing the runtime and Python
+bindings. Currently, only CPU builds are supported, but we are working on
+providing a GPU build as well.
- C:\Path\to>set PATH=%PATH%;C:\Program Files (x86)\CMake\bin
+Note: Windows support is in an **alpha** state, and we welcome your feedback.
-If *git* command is not available from *Command Prompt*, add it to system
-*PATH* variable:
+### Pre-requisites
- C:\Path\to>set PATH=%PATH%;C:\Program Files\Git\cmd
+* CMake version 3.1 or later
-Good. Now you are ready to continue.
+* [Git](http://git-scm.com)
-Getting Sources
-===============
+* [SWIG](http://www.swig.org/download.html)
-You can get the latest stable source packages from the
-[releases](https://github.com/tensorflow/tensorflow/releases) page.
-Or you can type:
+* Additional pre-requisites for Microsoft Windows:
+ - Visual Studio 2015
+ - Python 3.5
+ - NumPy 1.11.0 or later
- C:\Path\to> git clone --recursive -b [release_tag] https://github.com/tensorflow/tensorflow.git
+* Additional pre-requisites for Linux:
+ - Python 2.7 or later
+ - [Docker](https://www.docker.com/) (for automated testing)
+ - NumPy 1.11.0 or later
-Where *[release_tag]* is a git tag like *v0.6.0* or a branch name like *master*
-if you want to get the latest code.
+### Known-good configurations
-Go to the project folder:
+* Microsoft Windows 10
+ - Microsoft Visual Studio Enterprise 2015 with Visual C++ 2015
+ - [Anaconda 4.1.1 (Python 3.5 64-bit)](https://www.continuum.io/downloads)
+ - [Git for Windows version 2.9.2.windows.1](https://git-scm.com/download/win)
+ - [swigwin-3.0.10](http://www.swig.org/download.html)
- C:\Path\to>cd tensorflow
- C:\Path\to\tensorflow>
+* Ubuntu 14.04
+ - Makefile generator
+ - Docker 1.9.1 (for automated testing)
-Now go to *tensorflow\contrib\cmake* folder in TensorFlow's contrib sources:
+### Current known limitations
- C:\Path\to\tensorflow>cd tensorflow\contrib\cmake
- C:\Path\to\tensorflow\tensorflow\contrib\cmake>
+* CPU support only
-Good. Now you are ready to configure *CMake*.
+ - We are in the process of porting the GPU code in
+ `tensorflow/stream_executor` to build with CMake and work on non-POSIX
+ platforms.
-CMake Configuration
-===================
+* Additional limitations for the Windows build:
-*CMake* supports a lot of different
-[generators](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html)
-for various native build systems. We are only interested in
-[Makefile](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#makefile-generators)
-and
-[Visual Studio](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators)
-generators.
+ - The Python package supports **Python 3.5 only**, because that is the only
+ version for which standard Python binaries exist and those binaries are
+ compatible with the TensorFlow runtime. (On Windows, the standard Python
+ binaries for versions earlier than 3.5 were compiled with older compilers
+ that do not have all of the features (e.g. C++11 support) needed to compile
+ TensorFlow. We welcome patches for making TensorFlow work with Python 2.7
+ on Windows, but have not yet committed to supporting that configuration.)
-We will use shadow building to separate the temporary files from the TensorFlow
-source code.
+ - The following Python APIs are not currently implemented:
+ * Loading custom op libraries via `tf.load_op_library()`.
+ * Path manipulation functions (such as `tf.gfile.ListDirectory()`) are not
+ functional.
-Create a temporary *build* folder and change your working directory to it:
+ - The `tf.contrib` libraries are not currently included in the PIP package.
- C:\Path\to\tensorflow\tensorflow\contrib\cmake>mkdir build & cd build
- C:\Path\to\tensorflow\tensorflow\contrib\cmake\build>
+ - The following operations are not currently implemented:
+ * `DepthwiseConv2dNative`
+ * `Digamma`
+ * `Erf`
+ * `Erfc`
+ * `Igamma`
+ * `Igammac`
+ * `ImmutableConst`
+ * `Lgamma`
+ * `Polygamma`
+ * `SparseMatmul`
+ * `Zeta`
-The *Makefile* generator can build the project in only one configuration, so
-you need to build a separate folder for each configuration.
+ - Google Cloud Storage support is not currently implemented. The GCS library
+ currently depends on `libcurl` and `boringssl`, and the Windows version
+ could use standard Windows APIs for making HTTP requests and cryptography
+ (for OAuth). Contributions are welcome for this feature.
-To start using a *Release* configuration:
+We are actively working on improving CMake and Windows support, and addressing
+these limitations. We would appreciate pull requests that implement missing
+ops or APIs.
- [...]\contrib\cmake\build>mkdir release & cd release
- [...]\contrib\cmake\build\release>cmake -G "NMake Makefiles" ^
- -DCMAKE_BUILD_TYPE=Release ^
- -DCMAKE_INSTALL_PREFIX=../../../../../../install ^
- ../..
-It will generate *nmake* *Makefile* in current directory.
-
-To use *Debug* configuration:
-
- [...]\contrib\cmake\build>mkdir debug & cd debug
- [...]\contrib\cmake\build\debug>cmake -G "NMake Makefiles" ^
- -DCMAKE_BUILD_TYPE=Debug ^
- -DCMAKE_INSTALL_PREFIX=../../../../../../install ^
- ../..
-
-It will generate *nmake* *Makefile* in current directory.
-
-To create *Visual Studio* solution file:
-
- [...]\contrib\cmake\build>mkdir solution & cd solution
- [...]\contrib\cmake\build\solution>cmake -G "Visual Studio 12 2013 Win64" ^
- -DCMAKE_INSTALL_PREFIX=../../../../../../install ^
- ../..
-
-It will generate *Visual Studio* solution file *tensorflow.sln* in current
-directory.
-
-If the *gmock* directory does not exist, and/or you do not want to build
-TensorFlow unit tests, you need to add *cmake* command argument
-`-Dtensorflow_BUILD_TESTS=OFF` to disable testing.
-
-Compiling
-=========
-
-To compile tensorflow:
-
- [...]\contrib\cmake\build\release>nmake
-
-or
-
- [...]\contrib\cmake\build\debug>nmake
-
-And wait for the compilation to finish.
-
-If you prefer to use the IDE:
-
- * Open the generated tensorflow.sln file in Microsoft Visual Studio.
- * Choose "Debug" or "Release" configuration as desired.
- * From the Build menu, choose "Build Solution".
-
-And wait for the compilation to finish.
-
-Testing
-=======
-
-To run unit-tests:
+Step-by-step Windows build
+==========================
- [...]\contrib\cmake\build\release>nmake check
+1. Install the pre-requisites detailed above, and set up your environment.
-or
+ * The following commands assume that you are using the Windows Command
+ Prompt (`cmd.exe`). You will need to set up your environment to use the
+ appropriate toolchain, i.e. the 64-bit tools. (Some of the binary targets
+ we will build are too large for the 32-bit tools, and they will fail with
+ out-of-memory errors.) The typical command to do set up your
+ environment is:
- [...]\contrib\cmake\build\debug>nmake check
+ ```
+ D:\temp> "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\vcvarsall.bat"
+ ```
-You can also build project *check* from Visual Studio solution.
-Yes, it may sound strange, but it works.
+ * We assume that `cmake` and `git` are installed and in your `%PATH%`. If
+ for example `cmake` is not in your path and it is installed in
+ `C:\Program Files (x86)\CMake\bin\cmake.exe`, you can add this directory
+ to your `%PATH%` as follows:
-You should see an output similar to:
+ ```
+ D:\temp> set PATH="%PATH%;C:\Program Files (x86)\CMake\bin\cmake.exe"
+ ```
- Running main() from gmock_main.cc
- [==========] Running 1546 tests from 165 test cases.
-
- ...
-
- [==========] 1546 tests from 165 test cases ran. (2529 ms total)
- [ PASSED ] 1546 tests.
+2. Clone the TensorFlow repository and create a working directory for your
+ build:
-To run specific tests:
+ ```
+ D:\temp> git clone https://github.com/tensorflow/tensorflow.git
+ D:\temp> cd tensorflow\tensorflow\contrib\cmake
+ D:\temp\tensorflow\tensorflow\contrib\cmake> mkdir build
+ D:\temp\tensorflow\tensorflow\contrib\cmake> cd build
+ D:\temp\tensorflow\tensorflow\contrib\cmake\build>
+ ```
- C:\Path\to\tensorflow>tensorflow\contrib\cmake\build\release\tests.exe ^
- --gtest_filter=AnyTest*
- Running main() from gmock_main.cc
- Note: Google Test filter = AnyTest*
- [==========] Running 3 tests from 1 test case.
- [----------] Global test environment set-up.
- [----------] 3 tests from AnyTest
- [ RUN ] AnyTest.TestPackAndUnpack
- [ OK ] AnyTest.TestPackAndUnpack (0 ms)
- [ RUN ] AnyTest.TestPackAndUnpackAny
- [ OK ] AnyTest.TestPackAndUnpackAny (0 ms)
- [ RUN ] AnyTest.TestIs
- [ OK ] AnyTest.TestIs (0 ms)
- [----------] 3 tests from AnyTest (1 ms total)
-
- [----------] Global test environment tear-down
- [==========] 3 tests from 1 test case ran. (2 ms total)
- [ PASSED ] 3 tests.
+3. Invoke CMake to create Visual Studio solution and project files.
-Note that the tests must be run from the source folder.
+ **N.B.** This assumes that `cmake.exe` is in your `%PATH%` environment
+ variable. The other paths are for illustrative purposes only, and may
+ be different on your platform. The `^` character is a line continuation
+ and must be the last character on each line.
-If all tests are passed, safely continue.
+ ```
+ D:\...\build> cmake .. -A x64 -DCMAKE_BUILD_TYPE=Release ^
+ More? -DSWIG_EXECUTABLE=C:/tools/swigwin-3.0.10/swig.exe ^
+ More? -DPYTHON_EXECUTABLE=C:/Users/%USERNAME%/AppData/Local/Continuum/Anaconda3/python.exe ^
+ More? -DPYTHON_LIBRARIES=C:/Users/%USERNAME%/AppData/Local/Continuum/Anaconda3/libs/python35.lib
+ ```
-Installing
-==========
+ Note that the `-DCMAKE_BUILD_TYPE=Release` flag must match the build
+ configuration that you choose when invoking `msbuild`. The known-good
+ values are `Release` and `RelWithDebInfo`. The `Debug` build type is
+ not currently supported, because it relies on a `Debug` library for
+ Python (`python35d.lib`) that is not distributed by default.
-To install TensorFlow to the specified *install* folder:
+ There are various options that can be specified when generating the
+ solution and project files:
- [...]\contrib\cmake\build\release>nmake install
+ * `-DCMAKE_BUILD_TYPE=(Release|RelWithDebInfo)`: Note that the
+ `CMAKE_BUILD_TYPE` option must match the build configuration that you
+ choose when invoking MSBuild in step 4. The known-good values are
+ `Release` and `RelWithDebInfo`. The `Debug` build type is not currently
+ supported, because it relies on a `Debug` library for Python
+ (`python35d.lib`) that is not distributed by default.
-or
+ * `-Dtensorflow_BUILD_ALL_KERNELS=(ON|OFF)`. Defaults to `ON`. You can
+ build a small subset of the kernels for a faster build by setting this
+ option to `OFF`.
- [...]\contrib\cmake\build\debug>nmake install
+ * `-Dtensorflow_BUILD_CC_EXAMPLE=(ON|OFF)`. Defaults to `ON`. Generate
+ project files for a simple C++
+ [example training program](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/tutorials/example_trainer.cc).
-You can also build project *INSTALL* from Visual Studio solution.
-It sounds not so strange and it works.
+ * `-Dtensorflow_BUILD_PYTHON_BINDINGS=(ON|OFF)`. Defaults to `ON`. Generate
+ project files for building a PIP package containing the TensorFlow runtime
+ and its Python bindings.
-This will create the following folders under the *install* location:
- * bin - that contains tensorflow binaries;
- * include - that contains C++ headers and TensorFlow *.proto files;
- * lib - that contains linking libraries and *CMake* configuration files for
- *tensorflow* package.
+ * `-Dtensorflow_ENABLE_GRPC_SUPPORT=(ON|OFF)`. Defaults to `ON`. Include
+ gRPC support and the distributed client and server code in the TensorFlow
+ runtime.
-Now you can if needed:
- * Copy the contents of the include directory to wherever you want to put
- headers.
- * Copy binaries wherever you put build tools (probably somewhere in your
- PATH).
- * Copy linking libraries libtensorflow[d].lib wherever you put libraries.
+ * `-Dtensorflow_ENABLE_SSL_SUPPORT=(ON|OFF)`. Defaults to `OFF`. Include
+ SSL support (for making secure HTTP requests) in the TensorFlow runtime.
+ This support is incomplete, and will be used for Google Cloud Storage
+ support.
-To avoid conflicts between the MSVC debug and release runtime libraries, when
-compiling a debug build of your application, you may need to link against a
-debug build of libtensorflowd.lib with "d" postfix. Similarly, release builds
-should link against release libtensorflow.lib library.
+4. Invoke MSBuild to build TensorFlow.
-DLLs vs. static linking
-=======================
+ To build the C++ example program, which will be created as a `.exe`
+ executable in the subdirectory `.\Release`:
-Static linking is now the default for the TensorFlow Buffer libraries. Due to
-issues with Win32's use of a separate heap for each DLL, as well as binary
-compatibility issues between different versions of MSVC's STL library, it is
-recommended that you use static linkage only. However, it is possible to
-build libtensorflow as DLLs if you really want. To do this, do the following:
+ ```
+ D:\...\build> MSBuild /p:Configuration=Release tf_tutorials_example_trainer.vcxproj
+ D:\...\build> Release\tf_tutorials_example_trainer.exe
+ ```
- * Add an additional flag `-Dtensorflow_BUILD_SHARED_LIBS=ON` when invoking
- cmake
- * Follow the same steps as described in the above section.
- * When compiling your project, make sure to `#define TENSORFLOW_USE_DLLS`.
+ To build the PIP package, which will be created as a `.whl` file in the
+ subdirectory `.\tf_python\dist`:
+
+ ```
+ D:\...\build> MSBuild /p:Configuration=Release tf_python_build_pip_package.vcxproj
+ ```
-When distributing your software to end users, we strongly recommend that you
-do NOT install libtensorflow.dll to any shared location.
-Instead, keep these libraries next to your binaries, in your application's
-own install directory. C++ makes it very difficult to maintain binary
-compatibility between releases, so it is likely that future versions of these
-libraries will *not* be usable as drop-in replacements.
-If your project is itself a DLL intended for use by third-party software, we
-recommend that you do NOT expose TensorFlow objects in your library's
-public interface, and that you statically link them into your library.
-
-Notes on Compiler Warnings
-==========================
+Linux Continuous Integration build
+==================================
+
+This build requires [Docker](https://www.docker.com/) to be installed on the
+local machine.
-The following warnings have been disabled while building the tensorflow
-libraries and binaries. You may have to disable some of them in your own
-project as well, or live with them.
+```bash
+$ git clone --recursive https://github.com/tensorflow/tensorflow.git
+$ cd tensorflow
+$ tensorflow/tools/ci_build/ci_build.sh CMAKE tensorflow/tools/ci_build/builds/cmake.sh
+```
-* [TODO]
+That's it. Dependencies included.
diff --git a/tensorflow/contrib/cmake/external/farmhash.cmake b/tensorflow/contrib/cmake/external/farmhash.cmake
index a68e4cc422..b2c13a14fb 100644
--- a/tensorflow/contrib/cmake/external/farmhash.cmake
+++ b/tensorflow/contrib/cmake/external/farmhash.cmake
@@ -1,6 +1,6 @@
include (ExternalProject)
-set(farmhash_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/external/farmhash_archive)
+set(farmhash_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/external/farmhash_archive ${CMAKE_CURRENT_BINARY_DIR}/external/farmhash_archive/util)
set(farmhash_URL https://github.com/google/farmhash/archive/34c13ddfab0e35422f4c3979f360635a8c050260.zip)
set(farmhash_HASH SHA256=e3d37a59101f38fd58fb799ed404d630f0eee18bfc2a2433910977cc8fea9c28)
set(farmhash_BUILD ${CMAKE_BINARY_DIR}/farmhash/src/farmhash)
diff --git a/tensorflow/contrib/cmake/external/gif.cmake b/tensorflow/contrib/cmake/external/gif.cmake
index 021c2d676e..da20561b88 100644
--- a/tensorflow/contrib/cmake/external/gif.cmake
+++ b/tensorflow/contrib/cmake/external/gif.cmake
@@ -4,28 +4,58 @@ set(gif_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/external/gif_archive/giflib-5.1.
set(gif_URL http://ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz)
set(gif_HASH SHA256=34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1)
set(gif_INSTALL ${CMAKE_BINARY_DIR}/gif/install)
-set(gif_STATIC_LIBRARIES ${gif_INSTALL}/lib/libgif.a)
+set(gif_BUILD ${CMAKE_BINARY_DIR}/gif/src/gif)
+
set(gif_HEADERS
"${gif_INSTALL}/include/gif_lib.h"
)
-set(ENV{CFLAGS} "$ENV{CFLAGS} -fPIC")
-
-ExternalProject_Add(gif
- PREFIX gif
- URL ${gif_URL}
- URL_HASH ${gif_HASH}
- INSTALL_DIR ${gif_INSTALL}
- DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
- BUILD_COMMAND $(MAKE)
- INSTALL_COMMAND $(MAKE) install
- CONFIGURE_COMMAND
- ${CMAKE_CURRENT_BINARY_DIR}/gif/src/gif/configure
- --with-pic
- --prefix=${gif_INSTALL}
- --enable-shared=yes
-)
+if(WIN32)
+
+ set(gif_STATIC_LIBRARIES ${gif_INSTALL}/lib/giflib.lib)
+
+ ExternalProject_Add(gif
+ PREFIX gif
+ URL ${gif_URL}
+ URL_HASH ${gif_HASH}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/patches/gif/CMakeLists.txt ${gif_BUILD}
+ INSTALL_DIR ${gif_INSTALL}
+ DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
+ CMAKE_CACHE_ARGS
+ -DCMAKE_BUILD_TYPE:STRING=Release
+ -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
+ -DCMAKE_INSTALL_PREFIX:STRING=${gif_INSTALL}
+ )
+
+ ExternalProject_Add_Step(gif copy_unistd
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_SOURCE_DIR}/patches/gif/unistd.h ${gif_BUILD}/lib/unistd.h
+ DEPENDEES patch
+ DEPENDERS build
+ )
+
+else()
+
+ set(gif_STATIC_LIBRARIES ${gif_INSTALL}/lib/libgif.a)
+ set(ENV{CFLAGS} "$ENV{CFLAGS} -fPIC")
+
+ ExternalProject_Add(gif
+ PREFIX gif
+ URL ${gif_URL}
+ URL_HASH ${gif_HASH}
+ INSTALL_DIR ${gif_INSTALL}
+ DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
+ BUILD_COMMAND $(MAKE)
+ INSTALL_COMMAND $(MAKE) install
+ CONFIGURE_COMMAND
+ ${CMAKE_CURRENT_BINARY_DIR}/gif/src/gif/configure
+ --with-pic
+ --prefix=${gif_INSTALL}
+ --enable-shared=yes
+ )
+
+endif()
# put gif includes in the directory where they are expected
add_custom_target(gif_create_destination_dir
diff --git a/tensorflow/contrib/cmake/external/grpc.cmake b/tensorflow/contrib/cmake/external/grpc.cmake
index 6684c05142..ecb381f115 100644
--- a/tensorflow/contrib/cmake/external/grpc.cmake
+++ b/tensorflow/contrib/cmake/external/grpc.cmake
@@ -6,12 +6,12 @@ set(GRPC_BUILD ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc)
set(GRPC_TAG 3bc78cd0b5bd784a235c01612d634b1ec5f8fb97)
if(WIN32)
- set(GRPC_LIBRARIES
+ set(grpc_STATIC_LIBRARIES
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/${CMAKE_BUILD_TYPE}/grpc++_unsecure.lib
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/${CMAKE_BUILD_TYPE}/grpc_unsecure.lib
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/${CMAKE_BUILD_TYPE}/gpr.lib)
else()
- set(GRPC_LIBRARIES
+ set(grpc_STATIC_LIBRARIES
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/libgrpc++_unsecure.a
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/libgrpc_unsecure.a
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/libgpr.a)
@@ -30,6 +30,6 @@ ExternalProject_Add(grpc
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DPROTOBUF_INCLUDE_DIRS:STRING=${PROTOBUF_INCLUDE_DIRS}
- -DPROTOBUF_LIBRARIES:STRING=${PROTOBUF_LIBRARIES}
+ -DPROTOBUF_LIBRARIES:STRING=${protobuf_STATIC_LIBRARIES}
)
diff --git a/tensorflow/contrib/cmake/external/highwayhash.cmake b/tensorflow/contrib/cmake/external/highwayhash.cmake
index 7de67daee9..a19ebfe24f 100644
--- a/tensorflow/contrib/cmake/external/highwayhash.cmake
+++ b/tensorflow/contrib/cmake/external/highwayhash.cmake
@@ -17,41 +17,23 @@ add_custom_target(highwayhash_copy_headers_to_destination
if(WIN32)
set(highwayhash_HEADERS "${highwayhash_BUILD}/highwayhash/*.h")
set(highwayhash_STATIC_LIBRARIES ${highwayhash_INSTALL}/lib/highwayhash.lib)
-
- ExternalProject_Add(highwayhash
- PREFIX highwayhash
- GIT_REPOSITORY ${highwayhash_URL}
- GIT_TAG ${highwayhash_TAG}
- DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
- BUILD_IN_SOURCE 1
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/patches/highwayhash/CMakeLists.txt ${highwayhash_BUILD}
- INSTALL_DIR ${highwayhash_INSTALL}
- CMAKE_CACHE_ARGS
- -DCMAKE_BUILD_TYPE:STRING=Release
- -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
- -DCMAKE_INSTALL_PREFIX:STRING=${highwayhash_INSTALL})
-
- add_custom_command(TARGET highwayhash_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy_directory ${highwayhash_INSTALL}/include/ ${highwayhash_INCLUDE_DIR}/highwayhash)
-
else()
-
set(highwayhash_HEADERS "${highwayhash_BUILD}/highwayhash/*.h")
set(highwayhash_STATIC_LIBRARIES ${highwayhash_INSTALL}/lib/libhighwayhash.a)
-
- ExternalProject_Add(highwayhash
- PREFIX highwayhash
- GIT_REPOSITORY ${highwayhash_URL}
- GIT_TAG ${highwayhash_TAG}
- DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
- BUILD_IN_SOURCE 1
- BUILD_COMMAND $(MAKE)
- CONFIGURE_COMMAND ""
- INSTALL_COMMAND "")
-
- foreach(header_file ${highwayhash_HEADERS})
- add_custom_command(TARGET highwayhash_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${highwayhash_INCLUDE_DIR}/highwayhash)
- endforeach()
-
endif()
+
+ExternalProject_Add(highwayhash
+ PREFIX highwayhash
+ GIT_REPOSITORY ${highwayhash_URL}
+ GIT_TAG ${highwayhash_TAG}
+ DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
+ BUILD_IN_SOURCE 1
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/patches/highwayhash/CMakeLists.txt ${highwayhash_BUILD}
+ INSTALL_DIR ${highwayhash_INSTALL}
+ CMAKE_CACHE_ARGS
+ -DCMAKE_BUILD_TYPE:STRING=Release
+ -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
+ -DCMAKE_INSTALL_PREFIX:STRING=${highwayhash_INSTALL})
+
+add_custom_command(TARGET highwayhash_copy_headers_to_destination PRE_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${highwayhash_INSTALL}/include/ ${highwayhash_INCLUDE_DIR}/highwayhash)
diff --git a/tensorflow/contrib/cmake/external/protobuf.cmake b/tensorflow/contrib/cmake/external/protobuf.cmake
index abde06ad31..28f946ec3e 100644
--- a/tensorflow/contrib/cmake/external/protobuf.cmake
+++ b/tensorflow/contrib/cmake/external/protobuf.cmake
@@ -1,28 +1,32 @@
include (ExternalProject)
set(PROTOBUF_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/src)
-set(PROTOBUF_URL https://github.com/google/protobuf/releases/download/v3.1.0/protobuf-cpp-3.1.0.zip)
-set(PROTOBUF_HASH SHA256=0c18ccc99e921c407f359047f9b56cca196c3ab36eed79e5979df6c1f9e623b7)
+set(PROTOBUF_URL https://github.com/mrry/protobuf.git) # Includes MSVC fix.
+set(PROTOBUF_TAG 1d2c7b6c7376f396c8c7dd9b6afd2d4f83f3cb05)
if(WIN32)
- set(PROTOBUF_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/${CMAKE_BUILD_TYPE}/libprotobuf.lib)
+ set(protobuf_STATIC_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/${CMAKE_BUILD_TYPE}/libprotobuf.lib)
set(PROTOBUF_PROTOC_EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/${CMAKE_BUILD_TYPE}/protoc.exe)
+ set(PROTOBUF_ADDITIONAL_CMAKE_OPTIONS -Dprotobuf_MSVC_STATIC_RUNTIME:BOOL=OFF -A x64)
else()
- set(PROTOBUF_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/libprotobuf.a)
+ set(protobuf_STATIC_LIBRARIES ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/libprotobuf.a)
set(PROTOBUF_PROTOC_EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/protobuf/src/protobuf/protoc)
endif()
ExternalProject_Add(protobuf
PREFIX protobuf
- URL ${PROTOBUF_URL}
+ GIT_REPOSITORY ${PROTOBUF_URL}
+ GIT_TAG ${PROTOBUF_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
SOURCE_DIR ${CMAKE_BINARY_DIR}/protobuf/src/protobuf
- CONFIGURE_COMMAND ${CMAKE_COMMAND} cmake/ -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_MSVC_STATIC_RUNTIME:BOOL=OFF
+ CONFIGURE_COMMAND ${CMAKE_COMMAND} cmake/
+ -Dprotobuf_BUILD_TESTS=OFF
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ ${PROTOBUF_ADDITIONAL_CMAKE_OPTIONS}
INSTALL_COMMAND ""
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
- -Dprotobuf_MSVC_STATIC_RUNTIME:BOOL=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
)
diff --git a/tensorflow/contrib/cmake/install.cmake b/tensorflow/contrib/cmake/install.cmake
deleted file mode 100644
index a3fe2bcf06..0000000000
--- a/tensorflow/contrib/cmake/install.cmake
+++ /dev/null
@@ -1 +0,0 @@
-# [TODO] \ No newline at end of file
diff --git a/tensorflow/contrib/cmake/patches/gif/CMakeLists.txt b/tensorflow/contrib/cmake/patches/gif/CMakeLists.txt
new file mode 100644
index 0000000000..0fe919d89e
--- /dev/null
+++ b/tensorflow/contrib/cmake/patches/gif/CMakeLists.txt
@@ -0,0 +1,33 @@
+cmake_minimum_required(VERSION 2.8.3)
+
+project(giflib)
+
+set(GIFLIB_SRCS
+ "lib/dgif_lib.c"
+ "lib/egif_lib.c"
+ "lib/gif_font.c"
+ "lib/gif_hash.h"
+ "lib/gifalloc.c"
+ "lib/openbsd-reallocarray.c"
+ "lib/gif_err.c"
+ "lib/quantize.c"
+ "lib/gif_hash.c"
+ "lib/gif_lib.h"
+ "lib/gif_lib_private.h"
+)
+set(GIFLIB_INCLUDES
+ "lib/gif_lib.h"
+)
+
+include_directories("${CMAKE_CURRENT_SOURCE_DIR}/lib")
+
+add_library(giflib ${GIFLIB_SRCS})
+
+install(TARGETS giflib
+ RUNTIME DESTINATION bin COMPONENT RuntimeLibraries
+ LIBRARY DESTINATION lib COMPONENT RuntimeLibraries
+ ARCHIVE DESTINATION lib COMPONENT Development)
+
+foreach(GIFLIB_INCLUDE ${GIFLIB_INCLUDES})
+ install(FILES ${GIFLIB_INCLUDE} DESTINATION include COMPONENT Development)
+endforeach()
diff --git a/tensorflow/contrib/cmake/patches/gif/unistd.h b/tensorflow/contrib/cmake/patches/gif/unistd.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tensorflow/contrib/cmake/patches/gif/unistd.h
diff --git a/tensorflow/contrib/cmake/patches/highwayhash/CMakeLists.txt b/tensorflow/contrib/cmake/patches/highwayhash/CMakeLists.txt
index 94bb62f95f..8313d1c9f2 100644
--- a/tensorflow/contrib/cmake/patches/highwayhash/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/patches/highwayhash/CMakeLists.txt
@@ -40,6 +40,11 @@ include_directories("${CMAKE_CURRENT_SOURCE_DIR}")
add_library(highwayhash ${HIGHWAYHASH_SRCS})
+# C++11
+target_compile_features(highwayhash PRIVATE
+ cxx_rvalue_references
+)
+
install(TARGETS highwayhash
LIBRARY DESTINATION lib COMPONENT RuntimeLibraries
ARCHIVE DESTINATION lib COMPONENT Development)
diff --git a/tensorflow/contrib/cmake/setup.py b/tensorflow/contrib/cmake/setup.py
index 574fa57b7e..bd1dadedff 100644
--- a/tensorflow/contrib/cmake/setup.py
+++ b/tensorflow/contrib/cmake/setup.py
@@ -26,7 +26,7 @@ from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.10.0-cmake-experimental'
+_VERSION = '0.11.0rc0-cmake-experimental'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
@@ -140,6 +140,10 @@ def find_files(pattern, root):
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
+if os.name == 'nt':
+ EXTENSION_NAME = 'python/_pywrap_tensorflow.pyd'
+else:
+ EXTENSION_NAME = 'python/_pywrap_tensorflow.so'
# TODO(mrry): Add support for development headers.
@@ -168,8 +172,7 @@ setup(
# Add in any packaged data.
include_package_data=True,
package_data={
- 'tensorflow': ['python/_pywrap_tensorflow.so',
- ] + matches,
+ 'tensorflow': [EXTENSION_NAME] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
diff --git a/tensorflow/contrib/cmake/tests.cmake b/tensorflow/contrib/cmake/tests.cmake
deleted file mode 100644
index a3fe2bcf06..0000000000
--- a/tensorflow/contrib/cmake/tests.cmake
+++ /dev/null
@@ -1 +0,0 @@
-# [TODO] \ No newline at end of file
diff --git a/tensorflow/contrib/cmake/tf_cc_ops.cmake b/tensorflow/contrib/cmake/tf_cc_ops.cmake
index b5c33d66ee..6d9c495574 100644
--- a/tensorflow/contrib/cmake/tf_cc_ops.cmake
+++ b/tensorflow/contrib/cmake/tf_cc_ops.cmake
@@ -12,21 +12,6 @@ add_library(tf_cc_framework OBJECT ${tf_cc_framework_srcs})
add_dependencies(tf_cc_framework tf_core_framework)
-target_include_directories(tf_cc_framework PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-target_compile_options(tf_cc_framework PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_cc_framework PRIVATE
- cxx_rvalue_references
-)
-
########################################################
# tf_cc_op_gen_main library
########################################################
@@ -40,67 +25,10 @@ add_library(tf_cc_op_gen_main OBJECT ${tf_cc_op_gen_main_srcs})
add_dependencies(tf_cc_op_gen_main tf_core_framework)
-target_include_directories(tf_cc_op_gen_main PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-#target_link_libraries(tf_cc_op_gen_main
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_protos_cc
-# tf_core_lib
-# tf_core_framework
-#)
-
-target_compile_options(tf_cc_op_gen_main PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_cc_op_gen_main PRIVATE
- cxx_rvalue_references
-)
-
########################################################
# tf_gen_op_wrapper_cc executables
########################################################
-#
-# # Run the op generator.
-# if name == "sendrecv_ops":
-# include_internal = "1"
-# else:
-# include_internal = "0"
-# native.genrule(
-# name=name + "_genrule",
-# outs=[out_ops_file + ".h", out_ops_file + ".cc"],
-# tools=[":" + tool],
-# cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
-# "$(location :" + out_ops_file + ".cc) " + include_internal))
-
-
-
-#def tf_gen_op_wrappers_cc(name,
-# op_lib_names=[],
-# other_srcs=[],
-# other_hdrs=[],
-# pkg=""):
-# subsrcs = other_srcs
-# subhdrs = other_hdrs
-# for n in op_lib_names:
-# tf_gen_op_wrapper_cc(n, "ops/" + n, pkg=pkg)
-# subsrcs += ["ops/" + n + ".cc"]
-# subhdrs += ["ops/" + n + ".h"]
-#
-# native.cc_library(name=name,
-# srcs=subsrcs,
-# hdrs=subhdrs,
-# deps=["//tensorflow/core:core_cpu"],
-# copts=tf_copts(),
-# alwayslink=1,)
-
# create directory for ops generated files
set(cc_ops_target_dir ${CMAKE_CURRENT_BINARY_DIR}/tensorflow/cc/ops)
@@ -115,18 +43,6 @@ set(tf_cc_op_lib_names
"user_ops"
)
foreach(tf_cc_op_lib_name ${tf_cc_op_lib_names})
- #tf_gen_op_wrapper_cc(name, out_ops_file, pkg=""):
- # # Construct an op generator binary for these ops.
- # tool = out_ops_file + "_gen_cc" #example ops/array_ops_gen_cc
- # native.cc_binary(
- # name = tool,
- # copts = tf_copts(),
- # linkopts = ["-lm"],
- # linkstatic = 1, # Faster to link this one-time-use binary dynamically
- # deps = (["//tensorflow/cc:cc_op_gen_main",
- # pkg + ":" + name + "_op_lib"])
- # )
-
# Using <TARGET_OBJECTS:...> to work around an issue where no ops were
# registered (static initializers dropped by the linker because the ops
# are not used explicitly in the *_gen_cc executables).
@@ -137,39 +53,9 @@ foreach(tf_cc_op_lib_name ${tf_cc_op_lib_names})
$<TARGET_OBJECTS:tf_core_framework>
)
- target_include_directories(${tf_cc_op_lib_name}_gen_cc PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
- )
-
- find_package(ZLIB REQUIRED)
-
target_link_libraries(${tf_cc_op_lib_name}_gen_cc PRIVATE
- ${CMAKE_THREAD_LIBS_INIT}
- ${PROTOBUF_LIBRARIES}
tf_protos_cc
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${boringssl_STATIC_LIBRARIES}
- ${CMAKE_DL_LIBS}
- )
- if(tensorflow_ENABLE_SSL_SUPPORT)
- target_link_libraries(${tf_cc_op_lib_name}_gen_cc PRIVATE
- ${boringssl_STATIC_LIBRARIES})
- endif()
-
- target_compile_options(${tf_cc_op_lib_name}_gen_cc PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
- -lm
- )
-
- # C++11
- target_compile_features(${tf_cc_op_lib_name}_gen_cc PRIVATE
- cxx_rvalue_references
+ ${tensorflow_EXTERNAL_LIBRARIES}
)
set(cc_ops_include_internal 0)
@@ -198,43 +84,3 @@ add_library(tf_cc_ops OBJECT
"${tensorflow_source_dir}/tensorflow/cc/ops/const_op.cc"
"${tensorflow_source_dir}/tensorflow/cc/ops/standard_ops.h"
)
-
-target_include_directories(tf_cc_ops PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-#target_link_libraries(tf_cc_ops
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_protos_cc
-# tf_core_lib
-# tf_core_cpu
-# tf_models_word2vec_ops
-#)
-
-target_compile_options(tf_cc_ops PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_cc_ops PRIVATE
- cxx_rvalue_references
-)
-
-
-#tf_gen_op_wrappers_cc(
-# name = "cc_ops",
-# op_lib_names = [
-# ...
-# ],
-# other_hdrs = [
-# "ops/const_op.h",
-# "ops/standard_ops.h",
-# ],
-# other_srcs = [
-# "ops/const_op.cc",
-# ] + glob(["ops/*_grad.cc"]),
-# pkg = "//tensorflow/core",
-#)
diff --git a/tensorflow/contrib/cmake/tf_core_cpu.cmake b/tensorflow/contrib/cmake/tf_core_cpu.cmake
index 58635a5266..143f2e7fb5 100644
--- a/tensorflow/contrib/cmake/tf_core_cpu.cmake
+++ b/tensorflow/contrib/cmake/tf_core_cpu.cmake
@@ -30,30 +30,4 @@ list(APPEND tf_core_cpu_srcs
)
add_library(tf_core_cpu OBJECT ${tf_core_cpu_srcs})
-
-target_include_directories(tf_core_cpu PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-add_dependencies(tf_core_cpu
- tf_core_framework
-)
-#target_link_libraries(tf_core_cpu
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_core_framework
-# tf_core_lib
-# tf_protos_cc
-#)
-
-target_compile_options(tf_core_cpu PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_cpu PRIVATE
- cxx_rvalue_references
-)
-
+add_dependencies(tf_core_cpu tf_core_framework)
diff --git a/tensorflow/contrib/cmake/tf_core_direct_session.cmake b/tensorflow/contrib/cmake/tf_core_direct_session.cmake
index ba274d7f79..113e7b97f6 100644
--- a/tensorflow/contrib/cmake/tf_core_direct_session.cmake
+++ b/tensorflow/contrib/cmake/tf_core_direct_session.cmake
@@ -18,27 +18,3 @@ list(REMOVE_ITEM tf_core_direct_session_srcs ${tf_core_direct_session_test_srcs}
add_library(tf_core_direct_session OBJECT ${tf_core_direct_session_srcs})
add_dependencies(tf_core_direct_session tf_core_cpu)
-
-target_include_directories(tf_core_direct_session PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-#target_link_libraries(tf_core_direct_session
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_core_cpu
-# tf_core_framework
-# tf_core_lib
-# tf_protos_cc
-#)
-
-target_compile_options(tf_core_direct_session PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_direct_session PRIVATE
- cxx_rvalue_references
-)
diff --git a/tensorflow/contrib/cmake/tf_core_distributed_runtime.cmake b/tensorflow/contrib/cmake/tf_core_distributed_runtime.cmake
index e1f8bdd609..cf41e92e4a 100644
--- a/tensorflow/contrib/cmake/tf_core_distributed_runtime.cmake
+++ b/tensorflow/contrib/cmake/tf_core_distributed_runtime.cmake
@@ -20,22 +20,6 @@ add_dependencies(tf_core_distributed_runtime
tf_core_cpu grpc
)
-target_include_directories(tf_core_distributed_runtime PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
- ${GRPC_INCLUDE_DIRS}
-)
-
-target_compile_options(tf_core_distributed_runtime PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_distributed_runtime PRIVATE
- cxx_rvalue_references
-)
-
########################################################
# grpc_tensorflow_server executable
########################################################
@@ -56,42 +40,7 @@ add_executable(grpc_tensorflow_server
$<TARGET_OBJECTS:tf_core_distributed_runtime>
)
-add_dependencies(tf_core_distributed_runtime
- grpc
-)
-
-target_include_directories(grpc_tensorflow_server PUBLIC
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
- ${GRPC_INCLUDE_DIRS}
-)
-
-find_package(ZLIB REQUIRED)
-
target_link_libraries(grpc_tensorflow_server PUBLIC
- ${CMAKE_THREAD_LIBS_INIT}
- ${PROTOBUF_LIBRARIES}
- ${GRPC_LIBRARIES}
tf_protos_cc
- ${farmhash_STATIC_LIBRARIES}
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${CMAKE_DL_LIBS}
-)
-if(tensorflow_ENABLE_SSL_SUPPORT)
- target_link_libraries(grpc_tensorflow_server PUBLIC
- ${boringssl_STATIC_LIBRARIES})
-endif()
-
-target_compile_options(grpc_tensorflow_server PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(grpc_tensorflow_server PRIVATE
- cxx_rvalue_references
+ ${tensorflow_EXTERNAL_LIBRARIES}
)
diff --git a/tensorflow/contrib/cmake/tf_core_framework.cmake b/tensorflow/contrib/cmake/tf_core_framework.cmake
index cad3b7864d..d80db2121b 100644
--- a/tensorflow/contrib/cmake/tf_core_framework.cmake
+++ b/tensorflow/contrib/cmake/tf_core_framework.cmake
@@ -71,8 +71,6 @@ endfunction()
# tf_protos_cc library
########################################################
-include_directories(${PROTOBUF_INCLUDE_DIRS})
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
file(GLOB_RECURSE tf_protos_cc_srcs RELATIVE ${tensorflow_source_dir}
"${tensorflow_source_dir}/tensorflow/core/*.proto"
)
@@ -114,16 +112,6 @@ RELATIVE_PROTOBUF_TEXT_GENERATE_CPP(PROTO_TEXT_SRCS PROTO_TEXT_HDRS
)
add_library(tf_protos_cc ${PROTO_SRCS} ${PROTO_HDRS})
-target_include_directories(tf_protos_cc PUBLIC
- ${CMAKE_CURRENT_BINARY_DIR}
-)
-target_link_libraries(tf_protos_cc PUBLIC
- ${PROTOBUF_LIBRARIES}
-)
-# C++11
-target_compile_features(tf_protos_cc PRIVATE
- cxx_rvalue_references
-)
########################################################
# tf_core_lib library
@@ -131,11 +119,43 @@ target_compile_features(tf_protos_cc PRIVATE
file(GLOB_RECURSE tf_core_lib_srcs
"${tensorflow_source_dir}/tensorflow/core/lib/*.h"
"${tensorflow_source_dir}/tensorflow/core/lib/*.cc"
- "${tensorflow_source_dir}/tensorflow/core/platform/*.h"
- "${tensorflow_source_dir}/tensorflow/core/platform/*.cc"
"${tensorflow_source_dir}/tensorflow/core/public/*.h"
)
+file(GLOB tf_core_platform_srcs
+ "${tensorflow_source_dir}/tensorflow/core/platform/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/*.cc"
+ "${tensorflow_source_dir}/tensorflow/core/platform/default/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/default/*.cc")
+list(APPEND tf_core_lib_srcs ${tf_core_platform_srcs})
+
+if(UNIX)
+ file(GLOB tf_core_platform_posix_srcs
+ "${tensorflow_source_dir}/tensorflow/core/platform/posix/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/posix/*.cc"
+ )
+ list(APPEND tf_core_lib_srcs ${tf_core_platform_posix_srcs})
+endif(UNIX)
+
+if(WIN32)
+ file(GLOB tf_core_platform_windows_srcs
+ "${tensorflow_source_dir}/tensorflow/core/platform/windows/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/windows/*.cc"
+ "${tensorflow_source_dir}/tensorflow/core/platform/posix/error.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/posix/error.cc"
+ )
+ list(APPEND tf_core_lib_srcs ${tf_core_platform_windows_srcs})
+endif(WIN32)
+
+if(tensorflow_ENABLE_SSL_SUPPORT)
+ # Cloud libraries require boringssl.
+ file(GLOB tf_core_platform_cloud_srcs
+ "${tensorflow_source_dir}/tensorflow/core/platform/cloud/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/platform/cloud/*.cc"
+ )
+ list(APPEND tf_core_lib_srcs ${tf_core_platform_cloud_srcs})
+endif()
+
file(GLOB_RECURSE tf_core_lib_test_srcs
"${tensorflow_source_dir}/tensorflow/core/lib/*test*.h"
"${tensorflow_source_dir}/tensorflow/core/lib/*test*.cc"
@@ -143,50 +163,10 @@ file(GLOB_RECURSE tf_core_lib_test_srcs
"${tensorflow_source_dir}/tensorflow/core/platform/*test*.cc"
"${tensorflow_source_dir}/tensorflow/core/public/*test*.h"
)
-
-list(REMOVE_ITEM tf_core_lib_srcs ${tf_core_lib_test_srcs})
-
-if(NOT tensorflow_ENABLE_SSL_SUPPORT)
- file(GLOB_RECURSE tf_core_lib_cloud_srcs
- "${tensorflow_source_dir}/tensorflow/core/platform/cloud/*.h"
- "${tensorflow_source_dir}/tensorflow/core/platform/cloud/*.cc"
- )
- list(REMOVE_ITEM tf_core_lib_srcs ${tf_core_lib_cloud_srcs})
-endif()
+list(REMOVE_ITEM tf_core_lib_srcs ${tf_core_lib_test_srcs})
add_library(tf_core_lib OBJECT ${tf_core_lib_srcs})
-target_include_directories(tf_core_lib PUBLIC
- ${tensorflow_source_dir}
- ${gif_INCLUDE_DIR}
- ${jpeg_INCLUDE_DIR}
- ${png_INCLUDE_DIR}
- ${eigen_INCLUDE_DIRS}
- ${jsoncpp_INCLUDE_DIR}
-)
-target_compile_options(tf_core_lib PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_lib PRIVATE
- cxx_rvalue_references
-)
-
-add_dependencies(tf_core_lib
- gif_copy_headers_to_destination
- jpeg_copy_headers_to_destination
- png_copy_headers_to_destination
- eigen
- tf_protos_cc
- jsoncpp
- )
-
-if(tensorflow_ENABLE_SSL_SUPPORT)
- target_include_directories(tf_core_lib PUBLIC ${boringssl_INCLUDE_DIR})
- add_dependencies(tf_core_lib boringssl)
-endif()
-
+add_dependencies(tf_core_lib ${tensorflow_EXTERNAL_DEPENDENCIES} tf_protos_cc)
# Tricky setup to force always rebuilding
# force_rebuild always runs forcing ${VERSION_INFO_CC} target to run
@@ -197,13 +177,12 @@ add_custom_target(force_rebuild_target ALL DEPENDS ${VERSION_INFO_CC})
add_custom_command(OUTPUT __force_rebuild COMMAND cmake -E echo)
add_custom_command(OUTPUT
${VERSION_INFO_CC}
- COMMAND ${tensorflow_source_dir}/tensorflow/tools/git/gen_git_source.py
+ COMMAND ${PYTHON_EXECUTABLE} ${tensorflow_source_dir}/tensorflow/tools/git/gen_git_source.py
--raw_generate ${VERSION_INFO_CC}
DEPENDS __force_rebuild)
set(tf_version_srcs ${tensorflow_source_dir}/tensorflow/core/util/version_info.cc)
-
########################################################
# tf_core_framework library
########################################################
@@ -212,7 +191,6 @@ file(GLOB_RECURSE tf_core_framework_srcs
"${tensorflow_source_dir}/tensorflow/core/framework/*.cc"
"${tensorflow_source_dir}/tensorflow/core/util/*.h"
"${tensorflow_source_dir}/tensorflow/core/util/*.cc"
- "${tensorflow_source_dir}/tensorflow/core/client/tensor_c_api.cc"
"${tensorflow_source_dir}/tensorflow/core/common_runtime/session.cc"
"${tensorflow_source_dir}/tensorflow/core/common_runtime/session_factory.cc"
"${tensorflow_source_dir}/tensorflow/core/common_runtime/session_options.cc"
@@ -230,26 +208,18 @@ file(GLOB_RECURSE tf_core_framework_test_srcs
"${tensorflow_source_dir}/tensorflow/core/util/*main.cc"
)
-list(REMOVE_ITEM tf_core_framework_srcs ${tf_core_framework_test_srcs})
+list(REMOVE_ITEM tf_core_framework_srcs ${tf_core_framework_test_srcs}
+ "${tensorflow_source_dir}/tensorflow/core/util/memmapped_file_system.cc"
+ "${tensorflow_source_dir}/tensorflow/core/util/memmapped_file_system.h"
+ "${tensorflow_source_dir}/tensorflow/core/util/memmapped_file_system_writer.cc"
+)
add_library(tf_core_framework OBJECT
${tf_core_framework_srcs}
${tf_version_srcs}
${PROTO_TEXT_HDRS}
${PROTO_TEXT_SRCS})
-target_include_directories(tf_core_framework PUBLIC
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
add_dependencies(tf_core_framework
tf_core_lib
proto_text
)
-target_compile_options(tf_core_framework PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-# C++11
-target_compile_features(tf_core_framework PRIVATE
- cxx_rvalue_references
-)
diff --git a/tensorflow/contrib/cmake/tf_core_kernels.cmake b/tensorflow/contrib/cmake/tf_core_kernels.cmake
index 8f911e3cb1..4d52a00c86 100644
--- a/tensorflow/contrib/cmake/tf_core_kernels.cmake
+++ b/tensorflow/contrib/cmake/tf_core_kernels.cmake
@@ -1,10 +1,73 @@
########################################################
# tf_core_kernels library
########################################################
-file(GLOB_RECURSE tf_core_kernels_srcs
- "${tensorflow_source_dir}/tensorflow/core/kernels/*.h"
- "${tensorflow_source_dir}/tensorflow/core/kernels/*.cc"
-)
+
+if(tensorflow_BUILD_ALL_KERNELS)
+ file(GLOB_RECURSE tf_core_kernels_srcs
+ "${tensorflow_source_dir}/tensorflow/core/kernels/*.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/*.cc"
+ )
+else(tensorflow_BUILD_ALL_KERNELS)
+ # Build a minimal subset of kernels to be able to run a test program.
+ set(tf_core_kernels_srcs
+ "${tensorflow_source_dir}/tensorflow/core/kernels/bounds_check.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/constant_op.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/constant_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/fill_functor.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/fill_functor.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/matmul_op.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/matmul_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/no_op.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/no_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/sendrecv_ops.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/sendrecv_ops.cc"
+ )
+endif(tensorflow_BUILD_ALL_KERNELS)
+
+if(tensorflow_BUILD_CONTRIB_KERNELS)
+ set(tf_contrib_kernels_srcs
+ "${tensorflow_source_dir}/tensorflow/contrib/factorization/kernels/clustering_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/factorization/kernels/wals_solver_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/factorization/ops/clustering_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/factorization/ops/factorization_ops.cc"
+ #"${tensorflow_source_dir}/tensorflow/contrib/ffmpeg/decode_audio_op.cc"
+ #"${tensorflow_source_dir}/tensorflow/contrib/ffmpeg/encode_audio_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/layers/kernels/bucketization_kernel.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/layers/kernels/sparse_feature_cross_kernel.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/layers/ops/bucketization_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/layers/ops/sparse_feature_cross_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/metrics/kernels/set_kernels.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/metrics/ops/set_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/rnn/kernels/gru_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/rnn/kernels/lstm_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/rnn/ops/gru_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/rnn/ops/lstm_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/best_splits_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/count_extremely_random_stats_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/finished_nodes_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/grow_tree_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/sample_inputs_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/scatter_add_ndim_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/topn_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/tree_predictions_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/tree_utils.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/core/ops/update_fertile_slots_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/data/sparse_values_to_indices.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/data/string_to_float_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/hard_routing_function_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/k_feature_gradient_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/k_feature_routing_function_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/routing_function_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/routing_gradient_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_function_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_gradient_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/unpack_path_op.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/utils.cc"
+ )
+ list(APPEND tf_core_kernels_srcs ${tf_contrib_kernels_srcs})
+endif(tensorflow_BUILD_CONTRIB_KERNELS)
+
file(GLOB_RECURSE tf_core_kernels_exclude_srcs
"${tensorflow_source_dir}/tensorflow/core/kernels/*test*.h"
@@ -13,51 +76,28 @@ file(GLOB_RECURSE tf_core_kernels_exclude_srcs
"${tensorflow_source_dir}/tensorflow/core/kernels/*testutil.cc"
"${tensorflow_source_dir}/tensorflow/core/kernels/*main.cc"
"${tensorflow_source_dir}/tensorflow/core/kernels/*.cu.cc"
- "${tensorflow_source_dir}/tensorflow/core/kernels/debug_ops.h"
- "${tensorflow_source_dir}/tensorflow/core/kernels/debug_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/debug_ops.h" # stream_executor dependency
+ "${tensorflow_source_dir}/tensorflow/core/kernels/debug_ops.cc" # stream_executor dependency
)
+list(REMOVE_ITEM tf_core_kernels_srcs ${tf_core_kernels_exclude_srcs})
-list(REMOVE_ITEM tf_core_kernels_srcs ${tf_core_kernels_exclude_srcs})
+if(WIN32)
+ file(GLOB_RECURSE tf_core_kernels_windows_exclude_srcs
+ # Not currently working on Windows:
+ "${tensorflow_source_dir}/tensorflow/core/kernels/depthwise_conv_op.cc" # Cannot find symbol: tensorflow::LaunchConv2DOp<struct Eigen::ThreadPoolDevice, double>::launch(...).
+ "${tensorflow_source_dir}/tensorflow/core/kernels/fact_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/immutable_constant_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/immutable_constant_op.h"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/sparse_matmul_op.cc"
+ "${tensorflow_source_dir}/tensorflow/core/kernels/sparse_matmul_op.h"
+ )
+ list(REMOVE_ITEM tf_core_kernels_srcs ${tf_core_kernels_windows_exclude_srcs})
+endif(WIN32)
add_library(tf_core_kernels OBJECT ${tf_core_kernels_srcs})
-add_dependencies(tf_core_kernels
- tf_core_cpu
- farmhash
- highwayhash
- farmhash_copy_headers_to_destination
- highwayhash_copy_headers_to_destination
-)
-
-target_include_directories(tf_core_kernels PRIVATE
- ${tensorflow_source_dir}
- ${png_INCLUDE_DIR}
- ${eigen_INCLUDE_DIRS}
- ${farmhash_INCLUDE_DIR}
- ${highwayhash_INCLUDE_DIR}
-)
+if(WIN32)
+ target_compile_options(tf_core_kernels PRIVATE /MP)
+endif()
-#target_link_libraries(tf_core_kernels
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_core_cpu
-# tf_core_framework
-# tf_core_lib
-# tf_protos_cc
-# tf_models_word2vec_kernels
-# tf_stream_executor
-# tf_core_ops
-# tf_core_cpu
-#)
-
-# "@gemmlowp//:eight_bit_int_gemm",
-
-target_compile_options(tf_core_kernels PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_kernels PRIVATE
- cxx_rvalue_references
-)
+add_dependencies(tf_core_kernels tf_core_cpu)
diff --git a/tensorflow/contrib/cmake/tf_core_ops.cmake b/tensorflow/contrib/cmake/tf_core_ops.cmake
index 2aa03facaf..ac7228ef68 100644
--- a/tensorflow/contrib/cmake/tf_core_ops.cmake
+++ b/tensorflow/contrib/cmake/tf_core_ops.cmake
@@ -1,39 +1,25 @@
-#def tf_gen_op_libs(op_lib_names):
-# # Make library out of each op so it can also be used to generate wrappers
-# # for various languages.
-# for n in op_lib_names:
-# native.cc_library(name=n + "_op_lib"
-# copts=tf_copts(),
-# srcs=["ops/" + n + ".cc"],
-# deps=(["//tensorflow/core:framework"]),
-# visibility=["//visibility:public"],
-# alwayslink=1,
-# linkstatic=1,)
-
-
set(tf_op_lib_names
"array_ops"
- "attention_ops"
"candidate_sampling_ops"
"control_flow_ops"
"ctc_ops"
"data_flow_ops"
+ "functional_ops"
"image_ops"
"io_ops"
"linalg_ops"
"logging_ops"
- "functional_ops"
"math_ops"
"nn_ops"
"no_op"
"parsing_ops"
"random_ops"
"script_ops"
+ "sdca_ops"
"sendrecv_ops"
"sparse_ops"
"state_ops"
"string_ops"
- "summary_ops"
"training_ops"
)
@@ -48,32 +34,8 @@ foreach(tf_op_lib_name ${tf_op_lib_names})
add_library(tf_${tf_op_lib_name} OBJECT ${tf_${tf_op_lib_name}_srcs})
add_dependencies(tf_${tf_op_lib_name} tf_core_framework)
-
- target_include_directories(tf_${tf_op_lib_name} PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
- )
-
- target_compile_options(tf_${tf_op_lib_name} PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
- )
-
- # C++11
- target_compile_features(tf_${tf_op_lib_name} PRIVATE
- cxx_rvalue_references
- )
endforeach()
-#cc_library(
-# name = "user_ops_op_lib"
-# srcs = glob(["user_ops/**/*.cc"]),
-# copts = tf_copts(),
-# linkstatic = 1,
-# visibility = ["//visibility:public"],
-# deps = [":framework"],
-# alwayslink = 1,
-#)
########################################################
# tf_user_ops library
########################################################
@@ -85,50 +47,6 @@ add_library(tf_user_ops OBJECT ${tf_user_ops_srcs})
add_dependencies(tf_user_ops tf_core_framework)
-target_include_directories(tf_user_ops PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-target_compile_options(tf_user_ops PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_user_ops PRIVATE
- cxx_rvalue_references
-)
-
-
-#tf_cuda_library(
-# name = "ops"
-# srcs = glob(
-# [
-# "ops/**/*.h"
-# "ops/**/*.cc"
-# "user_ops/**/*.h"
-# "user_ops/**/*.cc"
-# ],
-# exclude = [
-# "**/*test*"
-# "**/*main.cc"
-# "user_ops/**/*.cu.cc"
-# ],
-# ),
-# copts = tf_copts(),
-# linkstatic = 1,
-# visibility = ["//visibility:public"],
-# deps = [
-# ":core"
-# ":lib"
-# ":protos_cc"
-# "//tensorflow/models/embedding:word2vec_ops"
-# "//third_party/eigen3"
-# ],
-# alwayslink = 1,
-#)
-
########################################################
# tf_core_ops library
########################################################
@@ -154,29 +72,3 @@ list(REMOVE_ITEM tf_core_ops_srcs ${tf_core_ops_exclude_srcs})
add_library(tf_core_ops OBJECT ${tf_core_ops_srcs})
add_dependencies(tf_core_ops tf_core_cpu)
-
-target_include_directories(tf_core_ops PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-#target_link_libraries(tf_core_ops
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_protos_cc
-# tf_core_lib
-# tf_core_cpu
-# tf_models_word2vec_ops
-#)
-
-target_compile_options(tf_core_ops PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_core_ops PRIVATE
- cxx_rvalue_references
-)
-
-
diff --git a/tensorflow/contrib/cmake/tf_models.cmake b/tensorflow/contrib/cmake/tf_models.cmake
index 9ba7608551..10aedf03cd 100644
--- a/tensorflow/contrib/cmake/tf_models.cmake
+++ b/tensorflow/contrib/cmake/tf_models.cmake
@@ -1,15 +1,3 @@
-#cc_library(
-# name = "word2vec_ops",
-# srcs = [
-# "word2vec_ops.cc",
-# ],
-# visibility = ["//tensorflow:internal"],
-# deps = [
-# "//tensorflow/core:framework",
-# ],
-# alwayslink = 1,
-#)
-
########################################################
# tf_models_word2vec_ops library
########################################################
@@ -19,43 +7,8 @@ file(GLOB tf_models_word2vec_ops_srcs
add_library(tf_models_word2vec_ops OBJECT ${tf_models_word2vec_ops_srcs})
-target_include_directories(tf_models_word2vec_ops PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-add_dependencies(tf_models_word2vec_ops
- tf_core_framework
-)
-#target_link_libraries(tf_models_word2vec_ops
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_core_framework
-# tf_core_lib
-# tf_protos_cc
-#)
-
-target_compile_options(tf_models_word2vec_ops PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_models_word2vec_ops PRIVATE
- cxx_rvalue_references
-)
+add_dependencies(tf_models_word2vec_ops tf_core_framework)
-#cc_library(
-# name = "word2vec_kernels",
-# srcs = [
-# "word2vec_kernels.cc",
-# ],
-# visibility = ["//tensorflow:internal"],
-# deps = [
-# "//tensorflow/core",
-# ],
-# alwayslink = 1,
-#)
########################################################
# tf_models_word2vec_kernels library
########################################################
@@ -65,30 +18,4 @@ file(GLOB tf_models_word2vec_kernels_srcs
add_library(tf_models_word2vec_kernels OBJECT ${tf_models_word2vec_kernels_srcs})
-target_include_directories(tf_models_word2vec_kernels PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-add_dependencies(tf_models_word2vec_kernels
- tf_core_cpu
-)
-
-#target_link_libraries(tf_models_word2vec_kernels
-# ${CMAKE_THREAD_LIBS_INIT}
-# ${PROTOBUF_LIBRARIES}
-# tf_core_framework
-# tf_core_lib
-# tf_protos_cc
-# tf_core_cpu
-#)
-
-target_compile_options(tf_models_word2vec_kernels PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_models_word2vec_kernels PRIVATE
- cxx_rvalue_references
-)
+add_dependencies(tf_models_word2vec_kernels tf_core_cpu)
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index b31ef233cf..321528c584 100644
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -18,7 +18,7 @@ include(FindPythonInterp)
if(NOT PYTHON_INCLUDE_DIR)
set(PYTHON_NOT_FOUND false)
exec_program("${PYTHON_EXECUTABLE}"
- ARGS "-c 'import distutils.sysconfig; print distutils.sysconfig.get_python_inc()'"
+ ARGS "-c \"import distutils.sysconfig; print(distutils.sysconfig.get_python_inc())\""
OUTPUT_VARIABLE PYTHON_INCLUDE_DIR
RETURN_VALUE PYTHON_NOT_FOUND)
if(${PYTHON_NOT_FOUND})
@@ -32,7 +32,7 @@ FIND_PACKAGE(PythonLibs)
if(NOT NUMPY_INCLUDE_DIR)
set(NUMPY_NOT_FOUND false)
exec_program("${PYTHON_EXECUTABLE}"
- ARGS "-c 'import numpy; print numpy.get_include()'"
+ ARGS "-c \"import numpy; print(numpy.get_include())\""
OUTPUT_VARIABLE NUMPY_INCLUDE_DIR
RETURN_VALUE NUMPY_NOT_FOUND)
if(${NUMPY_NOT_FOUND})
@@ -50,7 +50,6 @@ find_package(ZLIB REQUIRED)
########################################################
# TODO(mrry): Configure this to build in a directory other than tf_python/
-# TODO(mrry): Assemble the Python files into a PIP package.
# tf_python_srcs contains all static .py files
file(GLOB_RECURSE tf_python_srcs RELATIVE ${tensorflow_source_dir}
@@ -172,21 +171,6 @@ add_library(tf_python_op_gen_main OBJECT ${tf_python_op_gen_main_srcs})
add_dependencies(tf_python_op_gen_main tf_core_framework)
-target_include_directories(tf_python_op_gen_main PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
-target_compile_options(tf_python_op_gen_main PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_python_op_gen_main PRIVATE
- cxx_rvalue_references
-)
-
# create directory for ops generated files
set(python_ops_target_dir ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/ops)
@@ -216,37 +200,13 @@ function(GENERATE_PYTHON_OP_LIB tf_python_op_lib_name)
$<TARGET_OBJECTS:tf_${tf_python_op_lib_name}>
$<TARGET_OBJECTS:tf_core_lib>
$<TARGET_OBJECTS:tf_core_framework>
- ${GENERATE_PYTHON_OP_LIB_ADDITIONAL_LIBRARIES}
- )
- target_include_directories(${tf_python_op_lib_name}_gen_python PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
+ ${GENERATE_PYTHON_OP_LIB_ADDITIONAL_LIBRARIES}
)
target_link_libraries(${tf_python_op_lib_name}_gen_python PRIVATE
- ${CMAKE_THREAD_LIBS_INIT}
- ${PROTOBUF_LIBRARIES}
tf_protos_cc
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${CMAKE_DL_LIBS}
- )
- target_compile_options(${tf_python_op_lib_name}_gen_python PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
- -lm
- )
- # C++11
- target_compile_features(${tf_python_op_lib_name}_gen_python PRIVATE
- cxx_rvalue_references
+ ${tensorflow_EXTERNAL_LIBRARIES}
)
- if(tensorflow_ENABLE_SSL_SUPPORT)
- target_link_libraries(${tf_python_op_lib_name}_gen_python PRIVATE
- ${boringssl_STATIC_LIBRARIES})
- endif()
-
+
# Use the generated C++ executable to create a Python file
# containing the wrappers.
add_custom_command(
@@ -275,6 +235,7 @@ GENERATE_PYTHON_OP_LIB("nn_ops")
GENERATE_PYTHON_OP_LIB("parsing_ops")
GENERATE_PYTHON_OP_LIB("random_ops")
GENERATE_PYTHON_OP_LIB("script_ops")
+GENERATE_PYTHON_OP_LIB("sdca_ops")
GENERATE_PYTHON_OP_LIB("state_ops")
GENERATE_PYTHON_OP_LIB("sparse_ops")
GENERATE_PYTHON_OP_LIB("string_ops")
@@ -328,6 +289,8 @@ add_library(pywrap_tensorflow SHARED
"${tensorflow_source_dir}/tensorflow/python/lib/io/py_record_reader.cc"
"${tensorflow_source_dir}/tensorflow/python/lib/io/py_record_writer.h"
"${tensorflow_source_dir}/tensorflow/python/lib/io/py_record_writer.cc"
+ "${tensorflow_source_dir}/tensorflow/python/util/kernel_registry.h"
+ "${tensorflow_source_dir}/tensorflow/python/util/kernel_registry.cc"
"${tensorflow_source_dir}/tensorflow/c/c_api.cc"
"${tensorflow_source_dir}/tensorflow/c/c_api.h"
"${tensorflow_source_dir}/tensorflow/c/checkpoint_reader.cc"
@@ -340,38 +303,18 @@ add_library(pywrap_tensorflow SHARED
$<TARGET_OBJECTS:tf_core_framework>
$<TARGET_OBJECTS:tf_core_ops>
$<TARGET_OBJECTS:tf_core_direct_session>
- $<TARGET_OBJECTS:tf_core_distributed_runtime>
+ $<$<BOOL:${tensorflow_ENABLE_GRPC_SUPPORT}>:$<TARGET_OBJECTS:tf_core_distributed_runtime>>
$<TARGET_OBJECTS:tf_core_kernels>
)
-target_link_libraries(pywrap_tensorflow
- ${CMAKE_THREAD_LIBS_INIT}
- tf_protos_cc
- ${GRPC_LIBRARIES}
- ${PROTOBUF_LIBRARY}
- ${farmhash_STATIC_LIBRARIES}
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${CMAKE_DL_LIBS}
-)
target_include_directories(pywrap_tensorflow PUBLIC
- ${tensorflow_source_dir}
- ${CMAKE_CURRENT_BINARY_DIR}
- ${eigen_INCLUDE_DIRS}
${PYTHON_INCLUDE_DIR}
${NUMPY_INCLUDE_DIR}
)
-# C++11
-target_compile_features(pywrap_tensorflow PRIVATE
- cxx_rvalue_references
+target_link_libraries(pywrap_tensorflow
+ ${tensorflow_EXTERNAL_LIBRARIES}
+ tf_protos_cc
+ ${PYTHON_LIBRARIES}
)
-if(tensorflow_ENABLE_SSL_SUPPORT)
- target_link_libraries(pywrap_tensorflow ${boringssl_STATIC_LIBRARIES})
-endif()
-
-
############################################################
# Build a PIP package containing the TensorFlow runtime.
@@ -385,9 +328,15 @@ add_dependencies(tf_python_build_pip_package
add_custom_command(TARGET tf_python_build_pip_package POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/contrib/cmake/setup.py
${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
-add_custom_command(TARGET tf_python_build_pip_package POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/libpywrap_tensorflow.so
- ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/_pywrap_tensorflow.so)
+if(WIN32)
+ add_custom_command(TARGET tf_python_build_pip_package POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/pywrap_tensorflow.dll
+ ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/_pywrap_tensorflow.pyd)
+else()
+ add_custom_command(TARGET tf_python_build_pip_package POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/libpywrap_tensorflow.so
+ ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/_pywrap_tensorflow.so)
+endif()
add_custom_command(TARGET tf_python_build_pip_package POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${tensorflow_source_dir}/tensorflow/tools/pip_package/README
${CMAKE_CURRENT_BINARY_DIR}/tf_python/)
diff --git a/tensorflow/contrib/cmake/tf_stream_executor.cmake b/tensorflow/contrib/cmake/tf_stream_executor.cmake
index e1aa0cd7b5..b121ddf491 100644
--- a/tensorflow/contrib/cmake/tf_stream_executor.cmake
+++ b/tensorflow/contrib/cmake/tf_stream_executor.cmake
@@ -56,10 +56,6 @@ file(GLOB tf_stream_executor_srcs
add_library(tf_stream_executor OBJECT ${tf_stream_executor_srcs})
-target_include_directories(tf_stream_executor PRIVATE
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
add_dependencies(tf_stream_executor
tf_core_lib
)
@@ -69,14 +65,3 @@ add_dependencies(tf_stream_executor
# tf_protos_cc
# tf_core_lib
#)
-
-target_compile_options(tf_stream_executor PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_stream_executor PRIVATE
- cxx_rvalue_references
-)
-
diff --git a/tensorflow/contrib/cmake/tf_tools.cmake b/tensorflow/contrib/cmake/tf_tools.cmake
index 87c53502f8..91776fd5c8 100644
--- a/tensorflow/contrib/cmake/tf_tools.cmake
+++ b/tensorflow/contrib/cmake/tf_tools.cmake
@@ -13,37 +13,9 @@ add_executable(${proto_text}
$<TARGET_OBJECTS:tf_core_lib>
)
-target_include_directories(${proto_text} PUBLIC
- ${tensorflow_source_dir}
-)
-
-# TODO(mrry): Cut down the dependencies of this tool.
-target_link_libraries(${proto_text} PUBLIC
- ${CMAKE_THREAD_LIBS_INIT}
- ${PROTOBUF_LIBRARIES}
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${CMAKE_DL_LIBS}
- )
-if(tensorflow_ENABLE_SSL_SUPPORT)
- target_link_libraries(${proto_text} PUBLIC ${boringssl_STATIC_LIBRARIES})
-endif()
-
+target_link_libraries(${proto_text} PUBLIC ${tensorflow_EXTERNAL_LIBRARIES})
add_dependencies(${proto_text}
tf_core_lib
- protobuf
-)
-
-target_compile_options(${proto_text} PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(${proto_text} PRIVATE
- cxx_rvalue_references
+ grpc
)
diff --git a/tensorflow/contrib/cmake/tf_tutorials.cmake b/tensorflow/contrib/cmake/tf_tutorials.cmake
index ec45ac4ff8..8a23d02905 100644
--- a/tensorflow/contrib/cmake/tf_tutorials.cmake
+++ b/tensorflow/contrib/cmake/tf_tutorials.cmake
@@ -1,18 +1,3 @@
-#cc_binary(
-# name = "tutorials_example_trainer",
-# srcs = ["tutorials/example_trainer.cc"],
-# copts = tf_copts(),
-# linkopts = [
-# "-lpthread",
-# "-lm",
-# ],
-# deps = [
-# ":cc_ops",
-# "//tensorflow/core:kernels",
-# "//tensorflow/core:tensorflow",
-# ],
-#)
-
set(tf_tutorials_example_trainer_srcs
"${tensorflow_source_dir}/tensorflow/cc/tutorials/example_trainer.cc"
)
@@ -29,31 +14,7 @@ add_executable(tf_tutorials_example_trainer
$<TARGET_OBJECTS:tf_core_direct_session>
)
-target_include_directories(tf_tutorials_example_trainer PUBLIC
- ${tensorflow_source_dir}
- ${eigen_INCLUDE_DIRS}
-)
-
target_link_libraries(tf_tutorials_example_trainer PUBLIC
- ${CMAKE_THREAD_LIBS_INIT}
- ${PROTOBUF_STATIC_LIBRARIES}
tf_protos_cc
- ${boringssl_STATIC_LIBRARIES}
- ${farmhash_STATIC_LIBRARIES}
- ${gif_STATIC_LIBRARIES}
- ${jpeg_STATIC_LIBRARIES}
- ${jsoncpp_STATIC_LIBRARIES}
- ${png_STATIC_LIBRARIES}
- ${ZLIB_LIBRARIES}
- ${CMAKE_DL_LIBS}
-)
-
-target_compile_options(tf_tutorials_example_trainer PRIVATE
- -fno-exceptions
- -DEIGEN_AVOID_STL_ARRAY
-)
-
-# C++11
-target_compile_features(tf_tutorials_example_trainer PRIVATE
- cxx_rvalue_references
+ ${tensorflow_EXTERNAL_LIBRARIES}
)
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
index af12bcec36..7151da8c5c 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
@@ -1409,7 +1409,7 @@ class WeightedSumTest(tf.test.TestCase):
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
- # bucket size has to be big enough to allwo sharding.
+ # bucket size has to be big enough to allow sharding.
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index c4c9f8d825..29e1ec7c40 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -143,7 +143,7 @@ def batch_norm(inputs,
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
- One can set update_collections=None to force the updates in place, but that
+ One can set updates_collections=None to force the updates in place, but that
can have speed penalty, specially in distributed settings.
Args:
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator.py b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
index ac085c2e8a..99afefe084 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
@@ -491,7 +491,7 @@ class BaseEstimator(
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear_test.py b/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
index 42e281a6ba..56eee0a6d0 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
@@ -665,7 +665,7 @@ class LinearClassifierTest(tf.test.TestCase):
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
- # Evaluate on trained mdoel
+ # Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
diff --git a/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py b/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
index d0e9b61f42..7d71aaf739 100644
--- a/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
+++ b/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
@@ -507,6 +507,8 @@ class StreamingDataFeeder(DataFeeder):
inp[i, :] = six.next(self._x)
except StopIteration:
self.stopped = True
+ if i == 0:
+ raise
inp = inp[:i, :]
if self._y is not None:
out = out[:i]
diff --git a/tensorflow/contrib/learn/python/learn/tests/base_test.py b/tensorflow/contrib/learn/python/learn/tests/base_test.py
index 115f86aa7e..7d6e193e7c 100644
--- a/tensorflow/contrib/learn/python/learn/tests/base_test.py
+++ b/tensorflow/contrib/learn/python/learn/tests/base_test.py
@@ -84,7 +84,7 @@ class BaseTest(tf.test.TestCase):
classifier.fit(iris.data, iris.target, max_steps=100)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
- # TODO(ipolosukhin): Check that summaries are correclty written.
+ # TODO(ipolosukhin): Check that summaries are correctly written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
diff --git a/tensorflow/contrib/learn/python/learn/tests/stability_test.py b/tensorflow/contrib/learn/python/learn/tests/stability_test.py
index 84ae45bb5d..c78fdb7043 100644
--- a/tensorflow/contrib/learn/python/learn/tests/stability_test.py
+++ b/tensorflow/contrib/learn/python/learn/tests/stability_test.py
@@ -30,7 +30,7 @@ def _get_input_fn(x, y, batch_size=None):
# We use a null optimizer since we can't get deterministic results out of
-# supervisor's mulitple threads.
+# supervisor's multiple threads.
class _NullOptimizer(tf.train.Optimizer):
def __init__(self):
diff --git a/tensorflow/contrib/makefile/Makefile b/tensorflow/contrib/makefile/Makefile
index 361aca5b95..59eabe2bbb 100644
--- a/tensorflow/contrib/makefile/Makefile
+++ b/tensorflow/contrib/makefile/Makefile
@@ -454,6 +454,7 @@ $(wildcard tensorflow/core/platform/google/*/*) \
$(wildcard tensorflow/core/platform/jpeg.*) \
$(wildcard tensorflow/core/platform/png.*) \
$(wildcard tensorflow/core/platform/stream_executor.*) \
+$(wildcard tensorflow/core/platform/windows/*) \
$(wildcard tensorflow/core/user_ops/*.cu.cc) \
$(wildcard tensorflow/core/common_runtime/gpu/*) \
$(wildcard tensorflow/core/common_runtime/gpu_device_factory.*)
diff --git a/tensorflow/contrib/makefile/download_dependencies.sh b/tensorflow/contrib/makefile/download_dependencies.sh
index e6622a26e9..c0e3e38a15 100755
--- a/tensorflow/contrib/makefile/download_dependencies.sh
+++ b/tensorflow/contrib/makefile/download_dependencies.sh
@@ -48,7 +48,7 @@ download_and_extract() {
local dir="${2:?${usage}}"
echo "downloading ${url}" >&2
mkdir -p "${dir}"
- tar -C "${dir}" --strip-components=1 -xz < <(curl -Ls "${url}")
+ curl -Ls "${url}" | tar -C "${dir}" --strip-components=1 -xz
}
download_and_extract "${EIGEN_URL}" "${DOWNLOADS_DIR}/eigen"
diff --git a/tensorflow/contrib/quantization/tools/graph_to_dot.py b/tensorflow/contrib/quantization/tools/graph_to_dot.py
index c1ee4ea9d3..c53f5e7afa 100644
--- a/tensorflow/contrib/quantization/tools/graph_to_dot.py
+++ b/tensorflow/contrib/quantization/tools/graph_to_dot.py
@@ -46,7 +46,7 @@ def main(unused_args):
return -1
graph = graph_pb2.GraphDef()
- with open(FLAGS.graph, "rb") as f:
+ with open(FLAGS.graph, "r") as f:
if FLAGS.input_binary:
graph.ParseFromString(f.read())
else:
diff --git a/tensorflow/contrib/quantization/tools/quantize_graph.py b/tensorflow/contrib/quantization/tools/quantize_graph.py
index d9982a5cb1..3bc71cd29c 100644
--- a/tensorflow/contrib/quantization/tools/quantize_graph.py
+++ b/tensorflow/contrib/quantization/tools/quantize_graph.py
@@ -213,7 +213,7 @@ def quantize_weight_rounded(input_node):
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
- # This could be fixed by intorducing a new parameter, num_buckets,
+ # This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py b/tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
index 293388cedf..d3743e83c9 100644
--- a/tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
+++ b/tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
@@ -136,46 +136,54 @@ class StackBidirectionalRNNTest(tf.test.TestCase):
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
- # - Check that state_5 and state_5' are the same.
- # (Check forward and backward).
- # - Check output_5 and output_5' as well.
+ # - Check that the state_5 and state_5' (forward and backward) are the
+ # same for the first layer (it does not apply for the second layer since
+ # it has forward-backward dependencies).
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ batch_size = 2
+ # Create states placeholders.
+ initial_states_fw = [tf.placeholder(tf.float32, shape=(batch_size, layer*2))
+ for layer in self.layers]
+ initial_states_bw = [tf.placeholder(tf.float32, shape=(batch_size, layer*2))
+ for layer in self.layers]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
- self._createStackBidirectionalRNN(use_gpu, True, True))
+ self._createStackBidirectionalRNN(use_gpu, True, True,
+ initial_states_fw, initial_states_bw))
tf.initialize_all_variables().run()
+
# Run 3 steps.
+ feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
+ # Initialize to empty state.
+ for i, layer in enumerate(self.layers):
+ feed_dict[initial_states_fw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ feed_dict[initial_states_bw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict={inputs[0]: input_value,
- sequence_length: [3, 3]})
+ feed_dict=feed_dict)
+
# Reset the net and run 5 steps.
- batch_size = 2
- zero_state = [cell.zero_state(
- batch_size, dtype=tf.float32).eval() for cell in self.cells_fw]
- feed_dict = {inputs[0]: input_value, sequence_length: [5, 5]}
- for i, _ in enumerate(self.layers):
- feed_dict[state_fw[i]] = zero_state[i]
- feed_dict[state_bw[i]] = zero_state[i]
- out_5, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
+ feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
+ for i, layer in enumerate(self.layers):
+ feed_dict[initial_states_fw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ feed_dict[initial_states_bw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ _, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
- feed_dict = {inputs[0]: input_value, sequence_length: [2, 2]}
+ feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
- feed_dict[state_fw[i]] = st_3_fw[i]
- feed_dict[state_bw[i]] = st_3_bw[i]
-
- out_5, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict=feed_dict)
+ feed_dict[initial_states_fw[i]] = st_3_fw[i]
+ feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
- # Check that the 3+2 and 5 outputs are the same.
- self.assertAllEqual(out_5p[-1][0], out_5[-1][0])
- # Check that the 3+2 and 5 last states are the same.
- for i, _ in enumerate(self.layers):
- self.assertAllEqual(st_5_fw[i], st_5p_fw[i])
- self.assertAllEqual(st_5_bw[i], st_5p_bw[i])
+ # Check that the 3+2 and 5 first layer states.
+ self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
+ self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testStackBidirectionalRNN(self):
self._testStackBidirectionalRNN(use_gpu=False, use_shape=False)
@@ -288,54 +296,65 @@ class StackBidirectionalRNNTest(tf.test.TestCase):
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
- def _testStackBidirectionalDynamicRNNStates(self, use_gpu,
- use_state_tuple):
+ def _testStackBidirectionalDynamicRNNStates(self, use_gpu):
+
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
- # - Check that state_5 and state_5' are the same.
- # (Check forward and backward).
- # - Check output_5 and output_5' as well.
+ # - Check that the state_5 and state_5' (forward and backward) are the
+ # same for the first layer (it does not apply for the second layer since
+ # it has forward-backward dependencies).
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ batch_size=2
+ # Create states placeholders.
+ initial_states_fw = [tf.placeholder(tf.float32, shape=(batch_size, layer*2))
+ for layer in self.layers]
+ initial_states_bw = [tf.placeholder(tf.float32, shape=(batch_size, layer*2))
+ for layer in self.layers]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
- self._createStackBidirectionalDynamicRNN(use_gpu, False,
- use_state_tuple))
+ self._createStackBidirectionalDynamicRNN(
+ use_gpu,
+ use_shape=True,
+ use_state_tuple=False,
+ initial_states_fw=initial_states_fw,
+ initial_states_bw=initial_states_bw))
tf.initialize_all_variables().run()
+
# Run 3 steps.
+ feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
+ # Initialize to empty state.
+ for i, layer in enumerate(self.layers):
+ feed_dict[initial_states_fw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ feed_dict[initial_states_bw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict={inputs[0]: input_value,
- sequence_length: [3, 3]})
+ feed_dict=feed_dict)
+
# Reset the net and run 5 steps.
- batch_size = 2
- zero_state = [cell.zero_state(
- batch_size, dtype=tf.float32).eval() for cell in self.cells_fw]
- feed_dict = {inputs[0]: input_value, sequence_length: [5, 5]}
- for i, _ in enumerate(self.layers):
- feed_dict[state_fw[i]] = zero_state[i]
- feed_dict[state_bw[i]] = zero_state[i]
- out_5, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
+ feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
+ for i, layer in enumerate(self.layers):
+ feed_dict[initial_states_fw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ feed_dict[initial_states_bw[i]] = np.zeros((batch_size, layer*2),
+ dtype=np.float32)
+ _, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
- feed_dict = {inputs[0]: input_value, sequence_length: [2, 2]}
+ feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
- feed_dict[state_fw[i]] = st_3_fw[i]
- feed_dict[state_bw[i]] = st_3_bw[i]
-
- out_5, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
- feed_dict=feed_dict)
+ feed_dict[initial_states_fw[i]] = st_3_fw[i]
+ feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
- # Check that the 3+2 and 5 outputs are the same.
- self.assertAllEqual(out_5p[-1][0], out_5[-1][0])
- # Check that the 3+2 and 5 last states are the same.
- for i, _ in enumerate(self.layers):
- self.assertAllEqual(st_5_fw[i], st_5p_fw[i])
- self.assertAllEqual(st_5_bw[i], st_5p_bw[i])
+ # Check that the 3+2 and 5 first layer states.
+ self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
+ self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testBidirectionalRNN(self):
# Generate 2^3 option values
@@ -346,13 +365,9 @@ class StackBidirectionalRNNTest(tf.test.TestCase):
use_gpu=option[0], use_shape=option[1], use_state_tuple=option[2])
# Check States.
self._testStackBidirectionalDynamicRNNStates(
- use_gpu=False, use_state_tuple=False)
- self._testStackBidirectionalDynamicRNNStates(
- use_gpu=True, use_state_tuple=False)
- self._testStackBidirectionalDynamicRNNStates(
- use_gpu=False, use_state_tuple=True)
+ use_gpu=False)
self._testStackBidirectionalDynamicRNNStates(
- use_gpu=True, use_state_tuple=False)
+ use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
diff --git a/tensorflow/contrib/session_bundle/README.md b/tensorflow/contrib/session_bundle/README.md
index 48897557bc..64328fe596 100644
--- a/tensorflow/contrib/session_bundle/README.md
+++ b/tensorflow/contrib/session_bundle/README.md
@@ -4,8 +4,8 @@
## Overview
-This document describes the data formats and layouts for exporting [TensorFlow]
-(https://www.tensorflow.org/) models for inference.
+This document describes the data formats and layouts for exporting
+[TensorFlow](https://www.tensorflow.org/) models for inference.
These exports have the following properties:
@@ -50,8 +50,8 @@ binary.
### Exporting TF.learn models
-TF.learn uses an [Exporter wrapper]
-(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/utils/export.py)
+TF.learn uses an
+[Exporter wrapper](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/utils/export.py)
that can be used for building signatures. Use the `BaseEstimator.export`
function to export your Estimator with a signature.
diff --git a/tensorflow/contrib/tfprof/README.md b/tensorflow/contrib/tfprof/README.md
index 27a08c514c..013be48676 100644
--- a/tensorflow/contrib/tfprof/README.md
+++ b/tensorflow/contrib/tfprof/README.md
@@ -362,7 +362,7 @@ statistics for those ops without accidently missing or including extra ops.
tfprof exposes the following Python API to add op information and logging.
```python
- def write_op_log(graph, log_dir, op_log=None)
+tf.contrib.tfprof.tfprof_logger.write_op_log(graph, log_dir, op_log=None)
```
<b>--checkpoint_path:</b>
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 5d054c6b82..635f14d45d 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -679,6 +679,7 @@ filegroup(
"platform/png.*",
"platform/gif.*",
"platform/stream_executor.*",
+ "platform/windows/**/*",
"user_ops/**/*.cu.cc",
"common_runtime/gpu/**/*",
"common_runtime/gpu_device_factory.*",
diff --git a/tensorflow/core/framework/numeric_types.h b/tensorflow/core/framework/numeric_types.h
index 6b759c1010..31b88707e2 100644
--- a/tensorflow/core/framework/numeric_types.h
+++ b/tensorflow/core/framework/numeric_types.h
@@ -62,4 +62,15 @@ EIGEN_STRONG_INLINE bool operator==(const tensorflow::bfloat16 a,
} // namespace Eigen
+#ifdef COMPILER_MSVC
+namespace std {
+template <>
+struct hash<Eigen::half> {
+ std::size_t operator()(const Eigen::half& a) const {
+ return static_cast<std::size_t>(a.x);
+ }
+};
+} // namespace std
+#endif // COMPILER_MSVC
+
#endif // TENSORFLOW_FRAMEWORK_NUMERIC_TYPES_H_
diff --git a/tensorflow/core/kernels/conv_ops_fused.cc b/tensorflow/core/kernels/conv_ops_fused.cc
index 697ee5d25a..edf05ff8d6 100644
--- a/tensorflow/core/kernels/conv_ops_fused.cc
+++ b/tensorflow/core/kernels/conv_ops_fused.cc
@@ -43,8 +43,8 @@ namespace {
// going to be extremely large, so break it into chunks if it's bigger than
// a limit. Each chunk will be processed serially, so we can refill the
// buffer for the next chunk and reuse it, keeping maximum memory size down.
-// In this case, we've picked 16 megabytes as a reasonable limit.
-const size_t kMaxChunkSize = (16 * 1024 * 1024);
+// In this case, we've picked 1 megabyte as a reasonable limit.
+const size_t kMaxChunkSize = (1 * 1024 * 1024);
// Lookup method used when resizing.
enum SamplingMode {
diff --git a/tensorflow/core/kernels/conv_ops_using_gemm.cc b/tensorflow/core/kernels/conv_ops_using_gemm.cc
index 2b00b6dbeb..92aa10d1d6 100644
--- a/tensorflow/core/kernels/conv_ops_using_gemm.cc
+++ b/tensorflow/core/kernels/conv_ops_using_gemm.cc
@@ -256,8 +256,8 @@ class Im2ColConvFunctor {
// going to be extremely large, so break it into chunks if it's bigger than
// a limit. Each chunk will be processed serially, so we can refill the
// buffer for the next chunk and reuse it, keeping maximum memory size down.
- // In this case, we've picked 16 megabytes as a reasonable limit.
- const size_t max_chunk_size = (16 * 1024 * 1024);
+ // In this case, we've picked 1 megabyte as a reasonable limit.
+ const size_t max_chunk_size = (1 * 1024 * 1024);
OP_REQUIRES(context, (filter_value_count * sizeof(T1)) <= max_chunk_size,
errors::InvalidArgument("Im2Col patch too large for buffer"));
const size_t patches_per_chunk =
diff --git a/tensorflow/core/lib/gtl/inlined_vector.h b/tensorflow/core/lib/gtl/inlined_vector.h
index 640bbf6597..fc439f9eb6 100644
--- a/tensorflow/core/lib/gtl/inlined_vector.h
+++ b/tensorflow/core/lib/gtl/inlined_vector.h
@@ -31,6 +31,7 @@ limitations under the License.
#ifndef TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
#define TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
+#include <cstddef>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
@@ -60,7 +61,7 @@ class InlinedVector {
typedef T& reference;
typedef const T& const_reference;
typedef size_t size_type;
- typedef ssize_t difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef pointer iterator;
typedef const_pointer const_iterator;
diff --git a/tensorflow/core/lib/random/random_distributions.h b/tensorflow/core/lib/random/random_distributions.h
index dc29dc1eaa..03b155344c 100644
--- a/tensorflow/core/lib/random/random_distributions.h
+++ b/tensorflow/core/lib/random/random_distributions.h
@@ -18,9 +18,9 @@ limitations under the License.
#define _USE_MATH_DEFINES
#include <cmath>
+#include <math.h>
#undef _USE_MATH_DEFINES
-#include <math.h>
#include <string.h>
#include <algorithm>
diff --git a/tensorflow/core/ops/nn_ops.cc b/tensorflow/core/ops/nn_ops.cc
index e06e14966b..81b9036982 100644
--- a/tensorflow/core/ops/nn_ops.cc
+++ b/tensorflow/core/ops/nn_ops.cc
@@ -1071,8 +1071,7 @@ each component is divided by the weighted, squared sum of inputs within
output = input / (bias + alpha * sqr_sum) ** beta
For details, see [Krizhevsky et al., ImageNet classification with deep
-convolutional neural networks (NIPS 2012)]
-(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
+convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
input: 4-D.
depth_radius: 0-D. Half-width of the 1-D normalization window.
@@ -1825,8 +1824,7 @@ Then, row_pooling_sequence should satisfy:
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper:
-[Benjamin Graham, Fractional Max-Pooling]
-(http://arxiv.org/abs/1412.6071)
+[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
value: 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: Pooling ratio for each dimension of `value`, currently only
diff --git a/tensorflow/core/platform/cloud/retrying_file_system.cc b/tensorflow/core/platform/cloud/retrying_file_system.cc
index 6dd6383faf..2b6e0a3083 100644
--- a/tensorflow/core/platform/cloud/retrying_file_system.cc
+++ b/tensorflow/core/platform/cloud/retrying_file_system.cc
@@ -16,14 +16,18 @@ limitations under the License.
#include "tensorflow/core/platform/cloud/retrying_file_system.h"
#include <functional>
#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/lib/random/random.h"
+#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
namespace tensorflow {
namespace {
-// In case of failure, every call will be retried kMaxAttempts-1 times.
-constexpr int kMaxAttempts = 4;
+// In case of failure, every call will be retried kMaxRetries times.
+constexpr int kMaxRetries = 3;
+// Maximum backoff time in microseconds.
+constexpr int64 kMaximumBackoffMicroseconds = 32000000;
bool IsRetriable(Status status) {
switch (status.code()) {
@@ -37,55 +41,76 @@ bool IsRetriable(Status status) {
}
}
-Status CallWithRetries(const std::function<Status()>& f) {
- int attempts = 0;
+void WaitBeforeRetry(const int64 delay_micros) {
+ const int64 random_micros = random::New64() % 1000000;
+
+ Env::Default()->SleepForMicroseconds(std::min(delay_micros + random_micros,
+ kMaximumBackoffMicroseconds));
+}
+
+Status CallWithRetries(const std::function<Status()>& f,
+ const int64 initial_delay_microseconds) {
+ int retries = 0;
while (true) {
- attempts++;
auto status = f();
- if (!IsRetriable(status) || attempts >= kMaxAttempts) {
+ if (!IsRetriable(status) || retries >= kMaxRetries) {
return status;
}
- LOG(ERROR) << "The operation resulted in an error and will be retried: "
- << status.ToString();
+ const int64 delay_micros = initial_delay_microseconds << retries;
+ LOG(ERROR) << "The operation resulted in an error: " << status.ToString()
+ << " will be retried after " << delay_micros << " microseconds";
+ WaitBeforeRetry(delay_micros);
+ retries++;
}
}
class RetryingRandomAccessFile : public RandomAccessFile {
public:
- RetryingRandomAccessFile(std::unique_ptr<RandomAccessFile> base_file)
- : base_file_(std::move(base_file)) {}
+ RetryingRandomAccessFile(std::unique_ptr<RandomAccessFile> base_file,
+ int64 delay_microseconds = 1000000)
+ : base_file_(std::move(base_file)),
+ initial_delay_microseconds_(delay_microseconds) {}
Status Read(uint64 offset, size_t n, StringPiece* result,
char* scratch) const override {
return CallWithRetries(std::bind(&RandomAccessFile::Read, base_file_.get(),
- offset, n, result, scratch));
+ offset, n, result, scratch),
+ initial_delay_microseconds_);
}
private:
std::unique_ptr<RandomAccessFile> base_file_;
+ const int64 initial_delay_microseconds_;
};
class RetryingWritableFile : public WritableFile {
public:
- RetryingWritableFile(std::unique_ptr<WritableFile> base_file)
- : base_file_(std::move(base_file)) {}
+ RetryingWritableFile(std::unique_ptr<WritableFile> base_file,
+ int64 delay_microseconds = 1000000)
+ : base_file_(std::move(base_file)),
+ initial_delay_microseconds_(delay_microseconds) {}
Status Append(const StringPiece& data) override {
return CallWithRetries(
- std::bind(&WritableFile::Append, base_file_.get(), data));
+ std::bind(&WritableFile::Append, base_file_.get(), data),
+ initial_delay_microseconds_);
}
Status Close() override {
- return CallWithRetries(std::bind(&WritableFile::Close, base_file_.get()));
+ return CallWithRetries(std::bind(&WritableFile::Close, base_file_.get()),
+ initial_delay_microseconds_);
}
Status Flush() override {
- return CallWithRetries(std::bind(&WritableFile::Flush, base_file_.get()));
+ return CallWithRetries(std::bind(&WritableFile::Flush, base_file_.get()),
+ initial_delay_microseconds_);
}
Status Sync() override {
- return CallWithRetries(std::bind(&WritableFile::Sync, base_file_.get()));
+ return CallWithRetries(std::bind(&WritableFile::Sync, base_file_.get()),
+ initial_delay_microseconds_);
}
private:
std::unique_ptr<WritableFile> base_file_;
+ const int64 initial_delay_microseconds_;
};
} // namespace
@@ -95,7 +120,8 @@ Status RetryingFileSystem::NewRandomAccessFile(
std::unique_ptr<RandomAccessFile> base_file;
TF_RETURN_IF_ERROR(CallWithRetries(std::bind(&FileSystem::NewRandomAccessFile,
base_file_system_.get(),
- filename, &base_file)));
+ filename, &base_file),
+ initial_delay_microseconds_));
result->reset(new RetryingRandomAccessFile(std::move(base_file)));
return Status::OK();
}
@@ -105,7 +131,8 @@ Status RetryingFileSystem::NewWritableFile(
std::unique_ptr<WritableFile> base_file;
TF_RETURN_IF_ERROR(CallWithRetries(std::bind(&FileSystem::NewWritableFile,
base_file_system_.get(),
- filename, &base_file)));
+ filename, &base_file),
+ initial_delay_microseconds_));
result->reset(new RetryingWritableFile(std::move(base_file)));
return Status::OK();
}
@@ -115,7 +142,8 @@ Status RetryingFileSystem::NewAppendableFile(
std::unique_ptr<WritableFile> base_file;
TF_RETURN_IF_ERROR(CallWithRetries(std::bind(&FileSystem::NewAppendableFile,
base_file_system_.get(),
- filename, &base_file)));
+ filename, &base_file),
+ initial_delay_microseconds_));
result->reset(new RetryingWritableFile(std::move(base_file)));
return Status::OK();
}
@@ -123,7 +151,8 @@ Status RetryingFileSystem::NewAppendableFile(
Status RetryingFileSystem::NewReadOnlyMemoryRegionFromFile(
const string& filename, std::unique_ptr<ReadOnlyMemoryRegion>* result) {
return CallWithRetries(std::bind(&FileSystem::NewReadOnlyMemoryRegionFromFile,
- base_file_system_.get(), filename, result));
+ base_file_system_.get(), filename, result),
+ initial_delay_microseconds_);
}
bool RetryingFileSystem::FileExists(const string& fname) {
@@ -133,49 +162,58 @@ bool RetryingFileSystem::FileExists(const string& fname) {
Status RetryingFileSystem::Stat(const string& fname, FileStatistics* stat) {
return CallWithRetries(
- std::bind(&FileSystem::Stat, base_file_system_.get(), fname, stat));
+ std::bind(&FileSystem::Stat, base_file_system_.get(), fname, stat),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::GetChildren(const string& dir,
std::vector<string>* result) {
return CallWithRetries(std::bind(&FileSystem::GetChildren,
- base_file_system_.get(), dir, result));
+ base_file_system_.get(), dir, result),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::GetMatchingPaths(const string& pattern,
std::vector<string>* result) {
return CallWithRetries(std::bind(&FileSystem::GetMatchingPaths,
- base_file_system_.get(), pattern, result));
+ base_file_system_.get(), pattern, result),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::DeleteFile(const string& fname) {
return CallWithRetries(
- std::bind(&FileSystem::DeleteFile, base_file_system_.get(), fname));
+ std::bind(&FileSystem::DeleteFile, base_file_system_.get(), fname),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::CreateDir(const string& dirname) {
return CallWithRetries(
- std::bind(&FileSystem::CreateDir, base_file_system_.get(), dirname));
+ std::bind(&FileSystem::CreateDir, base_file_system_.get(), dirname),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::DeleteDir(const string& dirname) {
return CallWithRetries(
- std::bind(&FileSystem::DeleteDir, base_file_system_.get(), dirname));
+ std::bind(&FileSystem::DeleteDir, base_file_system_.get(), dirname),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::GetFileSize(const string& fname, uint64* file_size) {
return CallWithRetries(std::bind(&FileSystem::GetFileSize,
- base_file_system_.get(), fname, file_size));
+ base_file_system_.get(), fname, file_size),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::RenameFile(const string& src, const string& target) {
return CallWithRetries(
- std::bind(&FileSystem::RenameFile, base_file_system_.get(), src, target));
+ std::bind(&FileSystem::RenameFile, base_file_system_.get(), src, target),
+ initial_delay_microseconds_);
}
Status RetryingFileSystem::IsDirectory(const string& dirname) {
return CallWithRetries(
- std::bind(&FileSystem::IsDirectory, base_file_system_.get(), dirname));
+ std::bind(&FileSystem::IsDirectory, base_file_system_.get(), dirname),
+ initial_delay_microseconds_);
}
} // namespace tensorflow
diff --git a/tensorflow/core/platform/cloud/retrying_file_system.h b/tensorflow/core/platform/cloud/retrying_file_system.h
index d021ed2821..790ea61bd6 100644
--- a/tensorflow/core/platform/cloud/retrying_file_system.h
+++ b/tensorflow/core/platform/cloud/retrying_file_system.h
@@ -26,8 +26,10 @@ namespace tensorflow {
/// A wrapper to add retry logic to another file system.
class RetryingFileSystem : public FileSystem {
public:
- RetryingFileSystem(std::unique_ptr<FileSystem> base_file_system)
- : base_file_system_(std::move(base_file_system)) {}
+ RetryingFileSystem(std::unique_ptr<FileSystem> base_file_system,
+ int64 delay_microseconds = 1000000)
+ : base_file_system_(std::move(base_file_system)),
+ initial_delay_microseconds_(delay_microseconds) {}
Status NewRandomAccessFile(
const string& filename,
@@ -66,6 +68,7 @@ class RetryingFileSystem : public FileSystem {
private:
std::unique_ptr<FileSystem> base_file_system_;
+ const int64 initial_delay_microseconds_;
TF_DISALLOW_COPY_AND_ASSIGN(RetryingFileSystem);
};
diff --git a/tensorflow/core/platform/cloud/retrying_file_system_test.cc b/tensorflow/core/platform/cloud/retrying_file_system_test.cc
index cc50fd72a0..9ec1105aa8 100644
--- a/tensorflow/core/platform/cloud/retrying_file_system_test.cc
+++ b/tensorflow/core/platform/cloud/retrying_file_system_test.cc
@@ -158,7 +158,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_ImmediateSuccess) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped random access file.
std::unique_ptr<RandomAccessFile> random_access_file;
@@ -185,7 +185,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_SuccessWith3rdTry) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped random access file.
std::unique_ptr<RandomAccessFile> random_access_file;
@@ -213,7 +213,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_AllRetriesFailed) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped random access file.
std::unique_ptr<RandomAccessFile> random_access_file;
@@ -241,7 +241,7 @@ TEST(RetryingFileSystemTest, NewRandomAccessFile_NoRetriesForSomeErrors) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped random access file.
std::unique_ptr<RandomAccessFile> random_access_file;
@@ -266,7 +266,7 @@ TEST(RetryingFileSystemTest, NewWritableFile_ImmediateSuccess) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped writable file.
std::unique_ptr<WritableFile> writable_file;
@@ -291,7 +291,7 @@ TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped writable file.
std::unique_ptr<WritableFile> writable_file;
@@ -316,7 +316,7 @@ TEST(RetryingFileSystemTest, NewAppendableFile_SuccessWith3rdTry) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped appendable file.
std::unique_ptr<WritableFile> writable_file;
@@ -342,7 +342,7 @@ TEST(RetryingFileSystemTest, NewWritableFile_AllRetriesFailed) {
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
// Retrieve the wrapped writable file.
std::unique_ptr<WritableFile> writable_file;
@@ -360,7 +360,7 @@ TEST(RetryingFileSystemTest,
std::make_tuple("NewReadOnlyMemoryRegionFromFile", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::unique_ptr<ReadOnlyMemoryRegion> result;
TF_EXPECT_OK(fs.NewReadOnlyMemoryRegionFromFile("filename.txt", &result));
@@ -378,7 +378,7 @@ TEST(RetryingFileSystemTest, NewReadOnlyMemoryRegionFromFile_AllRetriesFailed) {
errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::unique_ptr<ReadOnlyMemoryRegion> result;
EXPECT_EQ("Last error",
@@ -393,7 +393,7 @@ TEST(RetryingFileSystemTest, GetChildren_SuccessWith2ndTry) {
std::make_tuple("GetChildren", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
TF_EXPECT_OK(fs.GetChildren("gs://path", &result));
@@ -409,7 +409,7 @@ TEST(RetryingFileSystemTest, GetChildren_AllRetriesFailed) {
std::make_tuple("GetChildren", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
EXPECT_EQ("Last error", fs.GetChildren("gs://path", &result).error_message());
@@ -422,7 +422,7 @@ TEST(RetryingFileSystemTest, GetMatchingPaths_SuccessWith2ndTry) {
std::make_tuple("GetMatchingPaths", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs://path/dir", &result));
@@ -438,7 +438,7 @@ TEST(RetryingFileSystemTest, GetMatchingPaths_AllRetriesFailed) {
std::make_tuple("GetMatchingPaths", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
EXPECT_EQ("Last error",
@@ -451,7 +451,7 @@ TEST(RetryingFileSystemTest, DeleteFile_SuccessWith2ndTry) {
std::make_tuple("DeleteFile", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
TF_EXPECT_OK(fs.DeleteFile("gs://path/file.txt"));
@@ -466,7 +466,7 @@ TEST(RetryingFileSystemTest, DeleteFile_AllRetriesFailed) {
std::make_tuple("DeleteFile", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
EXPECT_EQ("Last error", fs.DeleteFile("gs://path/file.txt").error_message());
@@ -478,7 +478,7 @@ TEST(RetryingFileSystemTest, CreateDir_SuccessWith2ndTry) {
std::make_tuple("CreateDir", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
TF_EXPECT_OK(fs.CreateDir("gs://path/newdir"));
@@ -493,7 +493,7 @@ TEST(RetryingFileSystemTest, CreateDir_AllRetriesFailed) {
std::make_tuple("CreateDir", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
EXPECT_EQ("Last error", fs.CreateDir("gs://path/newdir").error_message());
@@ -505,7 +505,7 @@ TEST(RetryingFileSystemTest, DeleteDir_SuccessWith2ndTry) {
std::make_tuple("DeleteDir", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
TF_EXPECT_OK(fs.DeleteDir("gs://path/dir"));
@@ -520,7 +520,7 @@ TEST(RetryingFileSystemTest, DeleteDir_AllRetriesFailed) {
std::make_tuple("DeleteDir", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
std::vector<string> result;
EXPECT_EQ("Last error", fs.DeleteDir("gs://path/dir").error_message());
@@ -533,7 +533,7 @@ TEST(RetryingFileSystemTest, GetFileSize_SuccessWith2ndTry) {
std::make_tuple("GetFileSize", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
uint64 size;
TF_EXPECT_OK(fs.GetFileSize("gs://path/file.txt", &size));
@@ -549,7 +549,7 @@ TEST(RetryingFileSystemTest, GetFileSize_AllRetriesFailed) {
std::make_tuple("GetFileSize", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
uint64 size;
EXPECT_EQ("Last error",
@@ -562,7 +562,7 @@ TEST(RetryingFileSystemTest, RenameFile_SuccessWith2ndTry) {
std::make_tuple("RenameFile", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
TF_EXPECT_OK(fs.RenameFile("old_name", "new_name"));
}
@@ -577,7 +577,7 @@ TEST(RetryingFileSystemTest, RenameFile_AllRetriesFailed) {
});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
EXPECT_EQ("Last error",
fs.RenameFile("old_name", "new_name").error_message());
@@ -589,7 +589,7 @@ TEST(RetryingFileSystemTest, Stat_SuccessWith2ndTry) {
std::make_tuple("Stat", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("file_name", &stat));
@@ -604,7 +604,7 @@ TEST(RetryingFileSystemTest, Stat_AllRetriesFailed) {
});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
FileStatistics stat;
EXPECT_EQ("Last error", fs.Stat("file_name", &stat).error_message());
@@ -617,7 +617,7 @@ TEST(RetryingFileSystemTest, IsDirectory_SuccessWith2ndTry) {
std::make_tuple("IsDirectory", Status::OK())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
TF_EXPECT_OK(fs.IsDirectory("gs://path/dir"));
}
@@ -632,7 +632,7 @@ TEST(RetryingFileSystemTest, IsDirectory_AllRetriesFailed) {
std::make_tuple("IsDirectory", errors::Unavailable("Last error"))});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
- RetryingFileSystem fs(std::move(base_fs));
+ RetryingFileSystem fs(std::move(base_fs), 0);
EXPECT_EQ("Last error", fs.IsDirectory("gs://path/dir").error_message());
}
diff --git a/tensorflow/core/platform/default/logging.h b/tensorflow/core/platform/default/logging.h
index 1333bd00fc..eaae673464 100644
--- a/tensorflow/core/platform/default/logging.h
+++ b/tensorflow/core/platform/default/logging.h
@@ -24,6 +24,9 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
+// TODO(mrry): Prevent this Windows.h #define from leaking out of our headers.
+#undef ERROR
+
namespace tensorflow {
const int INFO = 0; // base_logging::INFO;
const int WARNING = 1; // base_logging::WARNING;
diff --git a/tensorflow/core/platform/dynamic_annotations.h b/tensorflow/core/platform/dynamic_annotations.h
index 313ec6ba27..f51f3f33a3 100644
--- a/tensorflow/core/platform/dynamic_annotations.h
+++ b/tensorflow/core/platform/dynamic_annotations.h
@@ -22,7 +22,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/build_config/dynamic_annotations.h"
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
- defined(PLATFORM_GOOGLE_ANDROID)
+ defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
#include "tensorflow/core/platform/default/dynamic_annotations.h"
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/file_system.h b/tensorflow/core/platform/file_system.h
index a35117fc3f..8391817521 100644
--- a/tensorflow/core/platform/file_system.h
+++ b/tensorflow/core/platform/file_system.h
@@ -26,9 +26,14 @@ limitations under the License.
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/file_statistics.h"
#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
+#ifdef PLATFORM_WINDOWS
+#undef DeleteFile
+#endif
+
namespace tensorflow {
class RandomAccessFile;
diff --git a/tensorflow/core/platform/gif.h b/tensorflow/core/platform/gif.h
index 4a2bebdd0a..8a719a8cf1 100644
--- a/tensorflow/core/platform/gif.h
+++ b/tensorflow/core/platform/gif.h
@@ -20,7 +20,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/build_config/gif.h"
-#elif defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)
+#elif (defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)) || defined(PLATFORM_WINDOWS)
#include <gif_lib.h>
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/jpeg.h b/tensorflow/core/platform/jpeg.h
index c9ddc23ff1..5b083f84ab 100644
--- a/tensorflow/core/platform/jpeg.h
+++ b/tensorflow/core/platform/jpeg.h
@@ -20,7 +20,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/build_config/jpeg.h"
-#elif defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)
+#elif (defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)) || defined(PLATFORM_WINDOWS)
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tensorflow/core/platform/macros.h b/tensorflow/core/platform/macros.h
index 2f79d33514..aad35890af 100644
--- a/tensorflow/core/platform/macros.h
+++ b/tensorflow/core/platform/macros.h
@@ -30,7 +30,16 @@ limitations under the License.
__attribute__((__format__(__printf__, string_index, first_to_check)))
#define TF_SCANF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__(__scanf__, string_index, first_to_check)))
-
+#elif defined(COMPILER_MSVC)
+// Non-GCC equivalents
+#define TF_ATTRIBUTE_NORETURN __declspec(noreturn)
+#define TF_ATTRIBUTE_NOINLINE
+#define TF_ATTRIBUTE_UNUSED
+#define TF_ATTRIBUTE_COLD
+#define TF_MUST_USE_RESULT
+#define TF_PACKED
+#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
#else
// Non-GCC equivalents
#define TF_ATTRIBUTE_NORETURN
diff --git a/tensorflow/core/platform/mutex.h b/tensorflow/core/platform/mutex.h
index fbab4d9212..42d46ceb5b 100644
--- a/tensorflow/core/platform/mutex.h
+++ b/tensorflow/core/platform/mutex.h
@@ -27,7 +27,7 @@ enum ConditionResult { kCond_Timeout, kCond_MaybeNotified };
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/mutex.h"
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
- defined(PLATFORM_GOOGLE_ANDROID)
+ defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
#include "tensorflow/core/platform/default/mutex.h"
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/notification.h b/tensorflow/core/platform/notification.h
index dfea9e8168..0d477e8e1c 100644
--- a/tensorflow/core/platform/notification.h
+++ b/tensorflow/core/platform/notification.h
@@ -22,7 +22,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/notification.h"
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
- defined(PLATFORM_GOOGLE_ANDROID)
+ defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
#include "tensorflow/core/platform/default/notification.h"
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/platform.h b/tensorflow/core/platform/platform.h
index 02731d9275..982a7b830b 100644
--- a/tensorflow/core/platform/platform.h
+++ b/tensorflow/core/platform/platform.h
@@ -29,7 +29,6 @@ limitations under the License.
#elif defined(__APPLE__)
#define PLATFORM_POSIX
-
#include "TargetConditionals.h"
#if TARGET_IPHONE_SIMULATOR
#define IS_MOBILE_PLATFORM
@@ -37,6 +36,9 @@ limitations under the License.
#define IS_MOBILE_PLATFORM
#endif
+#elif defined(_WIN32)
+#define PLATFORM_WINDOWS
+
#elif defined(__arm__)
#define PLATFORM_POSIX
diff --git a/tensorflow/core/platform/png.h b/tensorflow/core/platform/png.h
index dedb294843..4500d9abe2 100644
--- a/tensorflow/core/platform/png.h
+++ b/tensorflow/core/platform/png.h
@@ -20,7 +20,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/build_config/png.h"
-#elif defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)
+#elif (defined(PLATFORM_POSIX) && !defined(IS_MOBILE_PLATFORM)) || defined(PLATFORM_WINDOWS)
#include <png.h>
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/posix/error.cc b/tensorflow/core/platform/posix/error.cc
index 30d135a255..94a3a6ab5d 100644
--- a/tensorflow/core/platform/posix/error.cc
+++ b/tensorflow/core/platform/posix/error.cc
@@ -72,15 +72,21 @@ error::Code ErrnoToCode(int err_number) {
case EBUSY: // Device or resource busy
case ECHILD: // No child processes
case EISCONN: // Socket is connected
+#if !defined(_WIN32)
case ENOTBLK: // Block device required
+#endif
case ENOTCONN: // The socket is not connected
case EPIPE: // Broken pipe
+#if !defined(_WIN32)
case ESHUTDOWN: // Cannot send after transport endpoint shutdown
+#endif
case ETXTBSY: // Text file busy
code = error::FAILED_PRECONDITION;
break;
case ENOSPC: // No space left on device
+#if !defined(_WIN32)
case EDQUOT: // Disk quota exceeded
+#endif
case EMFILE: // Too many open files
case EMLINK: // Too many links
case ENFILE: // Too many open files in system
@@ -88,7 +94,9 @@ error::Code ErrnoToCode(int err_number) {
case ENODATA: // No message is available on the STREAM read queue
case ENOMEM: // Not enough space
case ENOSR: // No STREAM resources
+#if !defined(_WIN32)
case EUSERS: // Too many users
+#endif
code = error::RESOURCE_EXHAUSTED;
break;
case EFBIG: // File too large
@@ -99,9 +107,13 @@ error::Code ErrnoToCode(int err_number) {
case ENOSYS: // Function not implemented
case ENOTSUP: // Operation not supported
case EAFNOSUPPORT: // Address family not supported
+#if !defined(_WIN32)
case EPFNOSUPPORT: // Protocol family not supported
+#endif
case EPROTONOSUPPORT: // Protocol not supported
+#if !defined(_WIN32)
case ESOCKTNOSUPPORT: // Socket type not supported
+#endif
case EXDEV: // Improper link
code = error::UNIMPLEMENTED;
break;
@@ -110,20 +122,24 @@ error::Code ErrnoToCode(int err_number) {
case ECONNABORTED: // Connection aborted
case ECONNRESET: // Connection reset
case EINTR: // Interrupted function call
+#if !defined(_WIN32)
case EHOSTDOWN: // Host is down
+#endif
case EHOSTUNREACH: // Host is unreachable
case ENETDOWN: // Network is down
case ENETRESET: // Connection aborted by network
case ENETUNREACH: // Network unreachable
case ENOLCK: // No locks available
case ENOLINK: // Link has been severed
-#if !defined(__APPLE__)
+#if !(defined(__APPLE__) || defined(_WIN32))
case ENONET: // Machine is not on the network
#endif
code = error::UNAVAILABLE;
break;
case EDEADLK: // Resource deadlock avoided
+#if !defined(_WIN32)
case ESTALE: // Stale file handle
+#endif
code = error::ABORTED;
break;
case ECANCELED: // Operation cancelled
@@ -140,7 +156,9 @@ error::Code ErrnoToCode(int err_number) {
case ENOEXEC: // Exec format error
case ENOMSG: // No message of the desired type
case EPROTO: // Protocol error
+#if !defined(_WIN32)
case EREMOTE: // Object is remote
+#endif
code = error::UNKNOWN;
break;
default: {
diff --git a/tensorflow/core/platform/thread_annotations.h b/tensorflow/core/platform/thread_annotations.h
index 6f79c28c5d..50195cbbc7 100644
--- a/tensorflow/core/platform/thread_annotations.h
+++ b/tensorflow/core/platform/thread_annotations.h
@@ -21,7 +21,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE)
#include "tensorflow/core/platform/google/build_config/thread_annotations.h"
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
- defined(PLATFORM_GOOGLE_ANDROID)
+ defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
#include "tensorflow/core/platform/default/thread_annotations.h"
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/types.h b/tensorflow/core/platform/types.h
index 30ada90437..93b82ecb7a 100644
--- a/tensorflow/core/platform/types.h
+++ b/tensorflow/core/platform/types.h
@@ -23,7 +23,7 @@ limitations under the License.
#if defined(PLATFORM_GOOGLE) || defined(GOOGLE_INTEGRAL_TYPES)
#include "tensorflow/core/platform/google/integral_types.h"
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
- defined(PLATFORM_GOOGLE_ANDROID)
+ defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
#include "tensorflow/core/platform/default/integral_types.h"
#else
#error Define the appropriate PLATFORM_<foo> macro for this platform
diff --git a/tensorflow/core/platform/windows/env.cc b/tensorflow/core/platform/windows/env.cc
new file mode 100644
index 0000000000..a2182a831c
--- /dev/null
+++ b/tensorflow/core/platform/windows/env.cc
@@ -0,0 +1,113 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/platform/env.h"
+
+#include <Shlwapi.h>
+#include <Windows.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <time.h>
+#undef LoadLibrary
+#undef ERROR
+
+#include <thread>
+#include <vector>
+
+#include "tensorflow/core/lib/core/error_codes.pb.h"
+#include "tensorflow/core/platform/load_library.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/windows/windows_file_system.h"
+
+namespace tensorflow {
+
+namespace {
+
+class StdThread : public Thread {
+ public:
+ // name and thread_options are both ignored.
+ StdThread(const ThreadOptions& thread_options, const string& name,
+ std::function<void()> fn)
+ : thread_(fn) {}
+ ~StdThread() { thread_.join(); }
+
+ private:
+ std::thread thread_;
+};
+
+class WindowsEnv : public Env {
+ public:
+ WindowsEnv() {}
+ ~WindowsEnv() override {
+ LOG(FATAL) << "Env::Default() must not be destroyed";
+ }
+
+ bool MatchPath(const string& path, const string& pattern) override {
+ return PathMatchSpec(path.c_str(), pattern.c_str()) == S_OK;
+ }
+
+ uint64 NowMicros() override {
+ FILETIME temp;
+ GetSystemTimeAsFileTime(&temp);
+ uint64 now_ticks =
+ (uint64)temp.dwLowDateTime + ((uint64)(temp.dwHighDateTime) << 32LL);
+ return now_ticks / 10LL;
+ }
+
+ void SleepForMicroseconds(int64 micros) override { Sleep(micros / 1000); }
+
+ Thread* StartThread(const ThreadOptions& thread_options, const string& name,
+ std::function<void()> fn) override {
+ return new StdThread(thread_options, name, fn);
+ }
+
+ void SchedClosure(std::function<void()> closure) override {
+ // TODO(b/27290852): Spawning a new thread here is wasteful, but
+ // needed to deal with the fact that many `closure` functions are
+ // blocking in the current codebase.
+ std::thread closure_thread(closure);
+ closure_thread.detach();
+ }
+
+ void SchedClosureAfter(int64 micros, std::function<void()> closure) override {
+ // TODO(b/27290852): Consuming a thread here is wasteful, but this
+ // code is (currently) only used in the case where a step fails
+ // (AbortStep). This could be replaced by a timer thread
+ SchedClosure([this, micros, closure]() {
+ SleepForMicroseconds(micros);
+ closure();
+ });
+ }
+
+ Status LoadLibrary(const char* library_filename, void** handle) override {
+ return errors::Unimplemented("WindowsEnv::LoadLibrary");
+ }
+
+ Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
+ void** symbol) override {
+ return errors::Unimplemented("WindowsEnv::GetSymbolFromLibrary");
+ }
+};
+
+} // namespace
+
+REGISTER_FILE_SYSTEM("", WindowsFileSystem);
+Env* Env::Default() {
+ static Env* default_env = new WindowsEnv;
+ return default_env;
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/windows/net.cc b/tensorflow/core/platform/windows/net.cc
new file mode 100644
index 0000000000..fbc0c39c9c
--- /dev/null
+++ b/tensorflow/core/platform/windows/net.cc
@@ -0,0 +1,131 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/platform/net.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <unordered_set>
+
+#include <sys/types.h>
+#include <winsock.h>
+
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/logging.h"
+
+#undef ERROR
+
+namespace tensorflow {
+namespace internal {
+
+namespace {
+bool IsPortAvailable(int* port, bool is_tcp) {
+ const int protocol = is_tcp ? IPPROTO_TCP : 0;
+ const int fd = socket(AF_INET, is_tcp ? SOCK_STREAM : SOCK_DGRAM, protocol);
+
+ struct sockaddr_in addr;
+ int addr_len = static_cast<int>(sizeof(addr));
+ int actual_port;
+
+ CHECK_GE(*port, 0);
+ CHECK_LE(*port, 65535);
+ if (fd < 0) {
+ LOG(ERROR) << "socket() failed: " << strerror(errno);
+ return false;
+ }
+
+ // SO_REUSEADDR lets us start up a server immediately after it exists.
+ int one = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (const char*)&one, sizeof(one)) <
+ 0) {
+ LOG(ERROR) << "setsockopt() failed: " << strerror(errno);
+ closesocket(fd);
+ return false;
+ }
+
+ // Try binding to port.
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = INADDR_ANY;
+ addr.sin_port = htons((uint16_t)*port);
+ if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) < 0) {
+ LOG(WARNING) << "bind(port=" << *port << ") failed: " << strerror(errno);
+ closesocket(fd);
+ return false;
+ }
+
+ // Get the bound port number.
+ if (getsockname(fd, (struct sockaddr*)&addr, &addr_len) < 0) {
+ LOG(WARNING) << "getsockname() failed: " << strerror(errno);
+ closesocket(fd);
+ return false;
+ }
+ CHECK_LE(addr_len, sizeof(addr));
+ actual_port = ntohs(addr.sin_port);
+ CHECK_GT(actual_port, 0);
+ if (*port == 0) {
+ *port = actual_port;
+ } else {
+ CHECK_EQ(*port, actual_port);
+ }
+ closesocket(fd);
+ return true;
+}
+
+const int kNumRandomPortsToPick = 100;
+const int kMaximumTrials = 1000;
+
+} // namespace
+
+int PickUnusedPortOrDie() {
+ static std::unordered_set<int> chosen_ports;
+
+ // Type of port to first pick in the next iteration.
+ bool is_tcp = true;
+ int trial = 0;
+ while (true) {
+ int port;
+ trial++;
+ CHECK_LE(trial, kMaximumTrials)
+ << "Failed to pick an unused port for testing.";
+ if (trial == 1) {
+ port = GetCurrentProcessId() % (65536 - 30000) + 30000;
+ } else if (trial <= kNumRandomPortsToPick) {
+ port = rand() % (65536 - 30000) + 30000;
+ } else {
+ port = 0;
+ }
+
+ if (chosen_ports.find(port) != chosen_ports.end()) {
+ continue;
+ }
+ if (!IsPortAvailable(&port, is_tcp)) {
+ continue;
+ }
+
+ CHECK_GT(port, 0);
+ if (!IsPortAvailable(&port, !is_tcp)) {
+ is_tcp = !is_tcp;
+ continue;
+ }
+
+ chosen_ports.insert(port);
+ return port;
+ }
+
+ return 0;
+}
+
+} // namespace internal
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/windows/port.cc b/tensorflow/core/platform/windows/port.cc
new file mode 100644
index 0000000000..b08c1cf9f4
--- /dev/null
+++ b/tensorflow/core/platform/windows/port.cc
@@ -0,0 +1,99 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef SNAPPY
+#include <snappy.h>
+#endif
+#include <WinSock2.h>
+
+#include "tensorflow/core/platform/cpu_info.h"
+#include "tensorflow/core/platform/demangle.h"
+#include "tensorflow/core/platform/host_info.h"
+#include "tensorflow/core/platform/init_main.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/mem.h"
+#include "tensorflow/core/platform/snappy.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace tensorflow {
+namespace port {
+
+void InitMain(const char* usage, int* argc, char*** argv) {}
+
+string Hostname() {
+ char hostname[1024];
+ gethostname(hostname, sizeof hostname);
+ hostname[sizeof hostname - 1] = 0;
+ return string(hostname);
+}
+
+int NumSchedulableCPUs() {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ return system_info.dwNumberOfProcessors;
+}
+
+void* aligned_malloc(size_t size, int minimum_alignment) {
+ return _aligned_malloc(size, minimum_alignment);
+}
+
+void aligned_free(void* aligned_memory) { _aligned_free(aligned_memory); }
+
+void MallocExtension_ReleaseToSystem(std::size_t num_bytes) {
+ // No-op.
+}
+
+std::size_t MallocExtension_GetAllocatedSize(const void* p) { return 0; }
+
+void AdjustFilenameForLogging(string* filename) {
+ // Nothing to do
+}
+
+bool Snappy_Compress(const char* input, size_t length, string* output) {
+#ifdef SNAPPY
+ output->resize(snappy::MaxCompressedLength(length));
+ size_t outlen;
+ snappy::RawCompress(input, length, &(*output)[0], &outlen);
+ output->resize(outlen);
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result) {
+#ifdef SNAPPY
+ return snappy::GetUncompressedLength(input, length, result);
+#else
+ return false;
+#endif
+}
+
+bool Snappy_Uncompress(const char* input, size_t length, char* output) {
+#ifdef SNAPPY
+ return snappy::RawUncompress(input, length, output);
+#else
+ return false;
+#endif
+}
+
+string Demangle(const char* mangled) { return mangled; }
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/windows/windows_file_system.cc b/tensorflow/core/platform/windows/windows_file_system.cc
new file mode 100644
index 0000000000..44b26d94e8
--- /dev/null
+++ b/tensorflow/core/platform/windows/windows_file_system.cc
@@ -0,0 +1,266 @@
+/* Copyright 2015 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <Windows.h>
+#include <direct.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <io.h>
+#include <Shlwapi.h>
+#undef StrCat
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+
+#include "tensorflow/core/lib/core/error_codes.pb.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/posix/error.h"
+#include "tensorflow/core/platform/windows/windows_file_system.h"
+
+// TODO(mrry): Prevent this Windows.h #define from leaking out of our headers.
+#undef DeleteFile
+
+namespace tensorflow {
+
+namespace {
+
+// read() based random-access
+class WindowsRandomAccessFile : public RandomAccessFile {
+ private:
+ string filename_;
+ FILE* file_;
+
+ public:
+ WindowsRandomAccessFile(const string& fname, FILE* f)
+ : filename_(fname), file_(f) {}
+ ~WindowsRandomAccessFile() override {
+ if (file_ != NULL) {
+ // Ignoring any potential errors
+ fclose(file_);
+ }
+ }
+
+ Status Read(uint64 offset, size_t n, StringPiece* result,
+ char* scratch) const override {
+ Status s;
+ char* dst = scratch;
+ int seek_result = fseek(file_, offset, SEEK_SET);
+ if (seek_result) {
+ return IOError(filename_, errno);
+ }
+ while (n > 0 && s.ok()) {
+ size_t r = fread(dst, 1, n, file_);
+ if (r > 0) {
+ dst += r;
+ n -= r;
+ } else if (r == 0) {
+ s = Status(error::OUT_OF_RANGE, "Read fewer bytes than requested");
+ } else if (errno == EINTR || errno == EAGAIN) {
+ // Retry
+ } else {
+ s = IOError(filename_, errno);
+ }
+ }
+ *result = StringPiece(scratch, dst - scratch);
+ return s;
+ }
+};
+
+class WindowsWritableFile : public WritableFile {
+ private:
+ string filename_;
+ FILE* file_;
+
+ public:
+ WindowsWritableFile(const string& fname, FILE* f)
+ : filename_(fname), file_(f) {}
+
+ ~WindowsWritableFile() override {
+ if (file_ != NULL) {
+ // Ignoring any potential errors
+ fclose(file_);
+ }
+ }
+
+ Status Append(const StringPiece& data) override {
+ size_t r = fwrite(data.data(), 1, data.size(), file_);
+ if (r != data.size()) {
+ return IOError(filename_, errno);
+ }
+ return Status::OK();
+ }
+
+ Status Close() override {
+ Status result;
+ if (fclose(file_) != 0) {
+ result = IOError(filename_, errno);
+ }
+ file_ = NULL;
+ return result;
+ }
+
+ Status Flush() override {
+ if (fflush(file_) != 0) {
+ return IOError(filename_, errno);
+ }
+ return Status::OK();
+ }
+
+ Status Sync() override {
+ Status s;
+ if (fflush(file_) != 0) {
+ s = IOError(filename_, errno);
+ }
+ return s;
+ }
+};
+
+} // namespace
+
+Status WindowsFileSystem::NewRandomAccessFile(
+ const string& fname, std::unique_ptr<RandomAccessFile>* result) {
+ string translated_fname = TranslateName(fname);
+ result->reset();
+ Status s;
+ FILE* f = fopen(translated_fname.c_str(), "r");
+ if (f == NULL) {
+ s = IOError(fname, errno);
+ } else {
+ result->reset(new WindowsRandomAccessFile(translated_fname, f));
+ }
+ return s;
+}
+
+Status WindowsFileSystem::NewWritableFile(
+ const string& fname, std::unique_ptr<WritableFile>* result) {
+ string translated_fname = TranslateName(fname);
+ Status s;
+ FILE* f = fopen(translated_fname.c_str(), "w");
+ if (f == NULL) {
+ result->reset();
+ s = IOError(fname, errno);
+ } else {
+ result->reset(new WindowsWritableFile(translated_fname, f));
+ }
+ return s;
+}
+
+Status WindowsFileSystem::NewAppendableFile(
+ const string& fname, std::unique_ptr<WritableFile>* result) {
+ string translated_fname = TranslateName(fname);
+ Status s;
+ FILE* f = fopen(translated_fname.c_str(), "a");
+ if (f == NULL) {
+ result->reset();
+ s = IOError(fname, errno);
+ } else {
+ result->reset(new WindowsWritableFile(translated_fname, f));
+ }
+ return s;
+}
+
+Status WindowsFileSystem::NewReadOnlyMemoryRegionFromFile(
+ const string& fname, std::unique_ptr<ReadOnlyMemoryRegion>* result) {
+ return errors::Unimplemented(
+ "WindowsFileSystem::NewReadOnlyMemoryRegionFromFile");
+}
+
+bool WindowsFileSystem::FileExists(const string& fname) {
+ return _access(TranslateName(fname).c_str(), 0) == 0;
+}
+
+Status WindowsFileSystem::GetChildren(const string& dir,
+ std::vector<string>* result) {
+ string translated_dir = TranslateName(dir);
+ result->clear();
+
+ WIN32_FIND_DATA find_data;
+ HANDLE find_handle = FindFirstFile(translated_dir.c_str(), &find_data);
+ if (find_handle == INVALID_HANDLE_VALUE) {
+ // TODO(mrry): Convert to a more specific error.
+ return errors::Unknown("Error code: ", GetLastError());
+ }
+ result->push_back(find_data.cFileName);
+ while (FindNextFile(find_handle, &find_data)) {
+ result->push_back(find_data.cFileName);
+ }
+ if (!FindClose(find_handle)) {
+ // TODO(mrry): Convert to a more specific error.
+ return errors::Unknown("Error closing find handle: ", GetLastError());
+ }
+ return Status::OK();
+}
+
+Status WindowsFileSystem::DeleteFile(const string& fname) {
+ Status result;
+ if (unlink(TranslateName(fname).c_str()) != 0) {
+ result = IOError(fname, errno);
+ }
+ return result;
+}
+
+Status WindowsFileSystem::CreateDir(const string& name) {
+ Status result;
+ if (_mkdir(TranslateName(name).c_str()) != 0) {
+ result = IOError(name, errno);
+ }
+ return result;
+}
+
+Status WindowsFileSystem::DeleteDir(const string& name) {
+ Status result;
+ if (_rmdir(TranslateName(name).c_str()) != 0) {
+ result = IOError(name, errno);
+ }
+ return result;
+}
+
+Status WindowsFileSystem::GetFileSize(const string& fname, uint64* size) {
+ Status s;
+ struct _stat sbuf;
+ if (_stat(TranslateName(fname).c_str(), &sbuf) != 0) {
+ *size = 0;
+ s = IOError(fname, errno);
+ } else {
+ *size = sbuf.st_size;
+ }
+ return s;
+}
+
+Status WindowsFileSystem::RenameFile(const string& src, const string& target) {
+ Status result;
+ if (rename(TranslateName(src).c_str(), TranslateName(target).c_str()) != 0) {
+ result = IOError(src, errno);
+ }
+ return result;
+}
+
+Status WindowsFileSystem::Stat(const string& fname, FileStatistics* stat) {
+ Status s;
+ struct _stat sbuf;
+ if (_stat(TranslateName(fname).c_str(), &sbuf) != 0) {
+ s = IOError(fname, errno);
+ } else {
+ stat->mtime_nsec = sbuf.st_mtime * 1e9;
+ stat->length = sbuf.st_size;
+ stat->is_directory = PathIsDirectory(TranslateName(fname).c_str());
+ }
+ return s;
+}
+
+} // namespace tensorflow \ No newline at end of file
diff --git a/tensorflow/core/platform/windows/windows_file_system.h b/tensorflow/core/platform/windows/windows_file_system.h
new file mode 100644
index 0000000000..68b391fb10
--- /dev/null
+++ b/tensorflow/core/platform/windows/windows_file_system.h
@@ -0,0 +1,71 @@
+/* Copyright 2015 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_
+#define TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_
+
+#include "tensorflow/core/platform/file_system.h"
+
+#ifdef PLATFORM_WINDOWS
+#undef DeleteFile
+#endif
+
+namespace tensorflow {
+
+class WindowsFileSystem : public FileSystem {
+ public:
+ WindowsFileSystem() {}
+
+ ~WindowsFileSystem() {}
+
+ Status NewRandomAccessFile(
+ const string& fname, std::unique_ptr<RandomAccessFile>* result) override;
+
+ Status NewWritableFile(const string& fname,
+ std::unique_ptr<WritableFile>* result) override;
+
+ Status NewAppendableFile(const string& fname,
+ std::unique_ptr<WritableFile>* result) override;
+
+ Status NewReadOnlyMemoryRegionFromFile(
+ const string& fname,
+ std::unique_ptr<ReadOnlyMemoryRegion>* result) override;
+
+ bool FileExists(const string& fname) override;
+
+ Status GetChildren(const string& dir, std::vector<string>* result) override;
+
+ Status Stat(const string& fname, FileStatistics* stat) override;
+
+ Status DeleteFile(const string& fname) override;
+
+ Status CreateDir(const string& name) override;
+
+ Status DeleteDir(const string& name) override;
+
+ Status GetFileSize(const string& fname, uint64* size) override;
+
+ Status RenameFile(const string& src, const string& target) override;
+
+ string TranslateName(const string& name) const override {
+ return name;
+ }
+};
+
+Status IOError(const string& context, int err_number);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index 50d86bff32..bab45727a3 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -19,8 +19,8 @@ limitations under the License.
// TensorFlow uses semantic versioning, see http://semver.org/.
#define TF_MAJOR_VERSION 0
-#define TF_MINOR_VERSION 10
-#define TF_PATCH_VERSION 0
+#define TF_MINOR_VERSION 11
+#define TF_PATCH_VERSION 0rc0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb
index 01c6231ab4..b60d4a9034 100644
--- a/tensorflow/examples/udacity/1_notmnist.ipynb
+++ b/tensorflow/examples/udacity/1_notmnist.ipynb
@@ -57,7 +57,7 @@
"from six.moves.urllib.request import urlretrieve\n",
"from six.moves import cPickle as pickle\n",
"\n",
- "# Config the matlotlib backend as plotting inline in IPython\n",
+ "# Config the matplotlib backend as plotting inline in IPython\n",
"%matplotlib inline"
],
"outputs": [],
diff --git a/tensorflow/examples/udacity/3_regularization.ipynb b/tensorflow/examples/udacity/3_regularization.ipynb
index 5e1d30f54f..2658c00b6c 100644
--- a/tensorflow/examples/udacity/3_regularization.ipynb
+++ b/tensorflow/examples/udacity/3_regularization.ipynb
@@ -180,7 +180,7 @@
"\n",
"def reformat(dataset, labels):\n",
" dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n",
- " # Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]\n",
+ " # Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]\n",
" labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n",
" return dataset, labels\n",
"train_dataset, train_labels = reformat(train_dataset, train_labels)\n",
diff --git a/tensorflow/examples/udacity/Dockerfile b/tensorflow/examples/udacity/Dockerfile
index 4af441018b..b7b094621a 100644
--- a/tensorflow/examples/udacity/Dockerfile
+++ b/tensorflow/examples/udacity/Dockerfile
@@ -1,6 +1,6 @@
FROM gcr.io/tensorflow/tensorflow:latest
MAINTAINER Vincent Vanhoucke <vanhoucke@google.com>
-RUN pip install scikit-learn
+RUN pip install scikit-learn pyreadline Pillow
RUN rm -rf /notebooks/*
ADD *.ipynb /notebooks/
WORKDIR /notebooks
diff --git a/tensorflow/g3doc/api_docs/python/contrib.learn.md b/tensorflow/g3doc/api_docs/python/contrib.learn.md
index 4783430710..1d647aea58 100644
--- a/tensorflow/g3doc/api_docs/python/contrib.learn.md
+++ b/tensorflow/g3doc/api_docs/python/contrib.learn.md
@@ -85,7 +85,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -384,7 +384,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -1018,7 +1018,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -1355,7 +1355,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -1996,7 +1996,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -2347,7 +2347,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
@@ -2744,7 +2744,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
index 68b1e3e717..75bb09740b 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.contrib.learn.LinearRegressor.md
@@ -163,7 +163,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.nn.sampled_softmax_loss.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.nn.sampled_softmax_loss.md
index 6d22f67352..44388cce0c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.nn.sampled_softmax_loss.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.nn.sampled_softmax_loss.md
@@ -11,8 +11,8 @@ the full softmax loss.
At inference time, you can compute full softmax probabilities with the
expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`.
-See our [Candidate Sampling Algorithms Reference]
-(../../extras/candidate_sampling.pdf)
+See our
+[Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.Graph.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.Graph.md
index 0faea32646..27258ff899 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.Graph.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.Graph.md
@@ -632,8 +632,8 @@ Note that this is unrelated to the
The GraphDef version information of this graph.
-For details on the meaning of each version, see [`GraphDef`]
-(https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
+For details on the meaning of each version, see
+[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
##### Returns:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.learn.BaseEstimator.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.learn.BaseEstimator.md
index 3057aa7e2b..b90b5845a1 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.learn.BaseEstimator.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.learn.BaseEstimator.md
@@ -70,7 +70,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.learn.Estimator.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.learn.Estimator.md
index 28722cf7e6..6d698594f5 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.learn.Estimator.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.contrib.learn.Estimator.md
@@ -98,7 +98,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.nn.fractional_max_pool.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.nn.fractional_max_pool.md
index ef10897212..8f8fb0237c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.nn.fractional_max_pool.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.nn.fractional_max_pool.md
@@ -29,8 +29,7 @@ Then, row_pooling_sequence should satisfy:
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper:
-[Benjamin Graham, Fractional Max-Pooling]
-(http://arxiv.org/abs/1412.6071)
+[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
##### Args:
@@ -47,7 +46,7 @@ For more details on fractional max pooling, see this paper:
* <b>`pseudo_random`</b>: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
- Graham, Fractional Max-Pooling] (http://arxiv.org/abs/1412.6071) for
+ Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
* <b>`overlapping`</b>: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowEstimator.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowEstimator.md
index 4c925ae212..7c220189a3 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowEstimator.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowEstimator.md
@@ -103,7 +103,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowRNNRegressor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowRNNRegressor.md
index a26808e829..bdac5ffbbc 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowRNNRegressor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.contrib.learn.TensorFlowRNNRegressor.md
@@ -118,7 +118,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.nn.nce_loss.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.nn.nce_loss.md
index 3de6d1ae3f..bfa01aeaba 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.nn.nce_loss.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.nn.nce_loss.md
@@ -2,11 +2,10 @@
Computes and returns the noise-contrastive estimation training loss.
-See [Noise-contrastive estimation: A new estimation principle for
-unnormalized statistical models]
-(http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
-Also see our [Candidate Sampling Algorithms Reference]
-(../../extras/candidate_sampling.pdf)
+See
+[Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
+Also see our
+[Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf)
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
@@ -44,8 +43,7 @@ with an otherwise unused class.
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
- our [Candidate Sampling Algorithms Reference]
- (../../extras/candidate_sampling.pdf).
+ our [Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf).
Default is False.
* <b>`partition_strategy`</b>: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.parse_example.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.parse_example.md
index 2f2f511196..b52ea6ad3c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.parse_example.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.parse_example.md
@@ -2,8 +2,8 @@
Parses `Example` protos into a `dict` of tensors.
-Parses a number of serialized [`Example`]
-(https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
+Parses a number of serialized
+[`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`.
`example_names` may contain descriptive names for the corresponding serialized
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.RMSPropOptimizer.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.RMSPropOptimizer.md
index 4fcce3cbff..499b65cc84 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.RMSPropOptimizer.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.RMSPropOptimizer.md
@@ -1,7 +1,6 @@
Optimizer that implements the RMSProp algorithm.
-See the [paper]
-(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
+See the [paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
- - -
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.atrous_conv2d.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.atrous_conv2d.md
index c73c76ab50..e5f0d9e567 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.atrous_conv2d.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.atrous_conv2d.md
@@ -32,11 +32,10 @@ Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
-[OverFeat: Integrated Recognition, Localization and Detection using
-Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
-Scanning with Deep Max-Pooling Convolutional Neural Networks]
-(http://arxiv.org/abs/1302.1700). Atrous convolution is also closely related
-to the so-called noble identities in multi-rate signal processing.
+[OverFeat: Integrated Recognition, Localization and Detection using Convolutional Networks](http://arxiv.org/abs/1312.6229)
+and [Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks](http://arxiv.org/abs/1302.1700).
+Atrous convolution is also closely related to the so-called noble identities in
+multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.fractional_avg_pool.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.fractional_avg_pool.md
index 595e664973..367205ffd6 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.fractional_avg_pool.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.fractional_avg_pool.md
@@ -22,7 +22,7 @@ pooling region.
* <b>`pseudo_random`</b>: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
- Graham, Fractional Max-Pooling] (http://arxiv.org/abs/1412.6071) for
+ Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
* <b>`overlapping`</b>: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.local_response_normalization.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.local_response_normalization.md
index e4477dda41..2738a61f9d 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.local_response_normalization.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.nn.local_response_normalization.md
@@ -11,9 +11,8 @@ each component is divided by the weighted, squared sum of inputs within
sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
output = input / (bias + alpha * sqr_sum) ** beta
-For details, see [Krizhevsky et al., ImageNet classification with deep
-convolutional neural networks (NIPS 2012)]
-(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
+For details, see
+[Krizhevsky et al., ImageNet classification with deep convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
##### Args:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Session.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Session.md
index 7293205505..d9de06d5d0 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Session.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.Session.md
@@ -36,8 +36,7 @@ with tf.Session() as sess:
sess.run(...)
```
-The [`ConfigProto`]
-(https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
+The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
@@ -68,8 +67,8 @@ the session constructor.
* <b>`target`</b>: (Optional.) The execution engine to connect to.
- Defaults to using an in-process engine. See [Distributed Tensorflow]
- (https://www.tensorflow.org/how_tos/distributed/index.html)
+ Defaults to using an in-process engine. See
+ [Distributed Tensorflow](https://www.tensorflow.org/how_tos/distributed/index.html)
for more examples.
* <b>`graph`</b>: (Optional.) The `Graph` to be launched (described above).
* <b>`config`</b>: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.resize_images.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.resize_images.md
index 48d9cf1648..a4b7e8f57a 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.resize_images.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.resize_images.md
@@ -8,12 +8,9 @@ the same as `size`. To avoid distortions see
`method` can be one of:
-* <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.]
- (https://en.wikipedia.org/wiki/Bilinear_interpolation)
-* <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.]
- (https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
-* <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.]
- (https://en.wikipedia.org/wiki/Bicubic_interpolation)
+* <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](https://en.wikipedia.org/wiki/Bilinear_interpolation)
+* <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.](https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
+* <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](https://en.wikipedia.org/wiki/Bicubic_interpolation)
* <b>`ResizeMethod.AREA`</b>: Area interpolation.
##### Args:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.DNNRegressor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.DNNRegressor.md
index 49dae2b843..4943db998e 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.DNNRegressor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.DNNRegressor.md
@@ -176,7 +176,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.TensorFlowRNNClassifier.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.TensorFlowRNNClassifier.md
index 07c87bb001..852d9199f9 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.TensorFlowRNNClassifier.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.learn.TensorFlowRNNClassifier.md
@@ -117,7 +117,7 @@ The signature of the input_fn accepted by export is changing to be consistent wi
string key to `Tensor` and targets is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
- key into the features dict returned by `input_fn` that corresponds to
+ key into the features dict returned by `input_fn` that corresponds toa
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index f58620bca9..cbf64a32de 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -63,37 +63,37 @@ Then, select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py3-none-any.whl
```
Install TensorFlow:
@@ -159,37 +159,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -298,37 +298,37 @@ select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v5. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.11.0rc0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -396,7 +396,7 @@ code.
code.
We also have tags with `latest` replaced by a released version (e.g.,
-`0.10.0-gpu`).
+`0.11.0-gpu`).
With Docker the installation is as follows:
@@ -784,7 +784,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.10.0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.11.0rc0-py2-none-any.whl
```
## Setting up TensorFlow for Development
diff --git a/tensorflow/g3doc/how_tos/image_retraining/index.md b/tensorflow/g3doc/how_tos/image_retraining/index.md
index 278398f2c0..c6a0467fb3 100644
--- a/tensorflow/g3doc/how_tos/image_retraining/index.md
+++ b/tensorflow/g3doc/how_tos/image_retraining/index.md
@@ -199,8 +199,8 @@ will end up basing its prediction on the background color, not the features of
the object you actually care about. To avoid this, try to take pictures in as
wide a variety of situations as you can, at different times, and with different
devices. If you want to know more about this problem, you can read about the
-classic (and possibly apocryphal) [tank recognition problem]
-(http://www.jefftk.com/p/detecting-tanks).
+classic (and possibly apocryphal)
+[tank recognition problem](http://www.jefftk.com/p/detecting-tanks).
You may also want to think about the categories you use. It might be worth
splitting big categories that cover a lot of different physical forms into
diff --git a/tensorflow/g3doc/how_tos/quantization/index.md b/tensorflow/g3doc/how_tos/quantization/index.md
index 35254edbe1..340d70f93e 100644
--- a/tensorflow/g3doc/how_tos/quantization/index.md
+++ b/tensorflow/g3doc/how_tos/quantization/index.md
@@ -200,10 +200,9 @@ Quantized | Float
The advantages of this format are that it can represent arbitrary magnitudes of
ranges, they don't have to be symmetrical, it can represent signed and unsigned
values, and the linear spread makes doing multiplications straightforward. There
-are alternatives like [Song Han's code books]
-(http://arxiv.org/pdf/1510.00149.pdf) that can use lower bit depths by
-non-linearly distributing the float values across the representation, but these
-tend to be more expensive to calculate on.
+are alternatives like [Song Han's code books](http://arxiv.org/pdf/1510.00149.pdf)
+that can use lower bit depths by non-linearly distributing the float values
+across the representation, but these tend to be more expensive to calculate on.
The advantage of having a strong and clear definition of the quantized format is
that it's always possible to convert back and forth from float for operations
@@ -226,11 +225,11 @@ results from 8-bit inputs.
We've found that we can get extremely good performance on mobile and embedded
devices by using eight-bit arithmetic rather than floating-point. You can see
-the framework we use to optimize matrix multiplications at [gemmlowp]
-(https://github.com/google/gemmlowp). We still need to apply all the lessons
-we've learned to the TensorFlow ops to get maximum performance on mobile, but
-we're actively working on that. Right now, this quantized implementation is a
-reasonably fast and accurate reference implementation that we're hoping will
-enable wider support for our eight-bit models on a wider variety of devices. We
-also hope that this demonstration will encourage the community to explore what's
-possible with low-precision neural networks.
+the framework we use to optimize matrix multiplications at
+[gemmlowp](https://github.com/google/gemmlowp). We still need to apply all the
+lessons we've learned to the TensorFlow ops to get maximum performance on
+mobile, but we're actively working on that. Right now, this quantized
+implementation is a reasonably fast and accurate reference implementation that
+we're hoping will enable wider support for our eight-bit models on a wider
+variety of devices. We also hope that this demonstration will encourage the
+community to explore what's possible with low-precision neural networks.
diff --git a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
index 2fad2f255b..9132132879 100644
--- a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
+++ b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
@@ -24,8 +24,7 @@ lifecycle for summary data within TensorBoard.
First, create the TensorFlow graph that you'd like to collect summary
data from, and decide which nodes you would like to annotate with
-[summary operations]
-(../../api_docs/python/train.md#summary-operations).
+[summary operations](../../api_docs/python/train.md#summary-operations).
For example, suppose you are training a convolutional neural network for
recognizing MNIST digits. You'd like to record how the learning rate
@@ -42,8 +41,7 @@ this data by attaching
the gradient outputs and to the variable that holds your weights, respectively.
For details on all of the summary operations available, check out the docs on
-[summary operations]
-(../../api_docs/python/train.md#summary-operations).
+[summary operations](../../api_docs/python/train.md#summary-operations).
Operations in TensorFlow don't do anything until you run them, or an op that
depends on their output. And the summary nodes that we've just created are
@@ -72,12 +70,13 @@ every single step, and record a ton of training data. That's likely to be more
data than you need, though. Instead, consider running the merged summary op
every `n` steps.
-The code example below is a modification of the [simple MNIST tutorial]
-(http://tensorflow.org/tutorials/mnist/beginners/index.md), in which we have
-added some summary ops, and run them every ten steps. If you run this and then
-launch `tensorboard --logdir=/tmp/mnist_logs`, you'll be able to visualize
-statistics, such as how the weights or accuracy varied during training.
-The code below is an excerpt; full source is [here](https://www.tensorflow.org/code/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py).
+The code example below is a modification of the
+[simple MNIST tutorial](http://tensorflow.org/tutorials/mnist/beginners/index.md),
+in which we have added some summary ops, and run them every ten steps. If you
+run this and then launch `tensorboard --logdir=/tmp/mnist_logs`, you'll be able
+to visualize statistics, such as how the weights or accuracy varied during
+training. The code below is an excerpt; full source is
+[here](https://www.tensorflow.org/code/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py).
```python
def variable_summaries(var, name):
diff --git a/tensorflow/g3doc/how_tos/tool_developers/index.md b/tensorflow/g3doc/how_tos/tool_developers/index.md
index 271e38eb8a..cf0116e07e 100644
--- a/tensorflow/g3doc/how_tos/tool_developers/index.md
+++ b/tensorflow/g3doc/how_tos/tool_developers/index.md
@@ -11,11 +11,11 @@ those kind of tools.
## Protocol Buffers
-All of TensorFlow's file formats are based on [Protocol Buffers]
-(https://developers.google.com/protocol-buffers/?hl=en), so to start
-it's worth getting familiar with how they work. The summary is that you define
-data structures in text files, and the protobuf tools generate classes in C,
-Python, and other languages that can load, save, and access the data in a
+All of TensorFlow's file formats are based on
+[Protocol Buffers](https://developers.google.com/protocol-buffers/?hl=en), so to
+start it's worth getting familiar with how they work. The summary is that you
+define data structures in text files, and the protobuf tools generate classes in
+C, Python, and other languages that can load, save, and access the data in a
friendly way. We often refer to Protocol Buffers as protobufs, and I'll use
that convention in this guide.
diff --git a/tensorflow/g3doc/resources/faq.md b/tensorflow/g3doc/resources/faq.md
index 6dc208f8dd..3f84b766d3 100644
--- a/tensorflow/g3doc/resources/faq.md
+++ b/tensorflow/g3doc/resources/faq.md
@@ -261,8 +261,8 @@ these summaries to a log directory. Then, start TensorBoard using
python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory
-For more details, see the [Summaries and TensorBoard tutorial]
-(../how_tos/summaries_and_tensorboard/index.md).
+For more details, see the
+[Summaries and TensorBoard tutorial](../how_tos/summaries_and_tensorboard/index.md).
#### Every time I launch TensorBoard, I get a network security popup!
@@ -279,9 +279,9 @@ See also the how-to documentation for
There are two main options for dealing with data in a custom format.
The easier option is to write parsing code in Python that transforms the data
-into a numpy array, then feed a [`tf.placeholder()`]
-(../api_docs/python/io_ops.md#placeholder) a tensor with that data. See the
-documentation on
+into a numpy array, then feed a
+[`tf.placeholder()`](../api_docs/python/io_ops.md#placeholder) a tensor with
+that data. See the documentation on
[using placeholders for input](../how_tos/reading_data/index.md#feeding) for
more details. This approach is easy to get up and running, but the parsing can
be a performance bottleneck.
diff --git a/tensorflow/g3doc/resources/xla_prerelease.md b/tensorflow/g3doc/resources/xla_prerelease.md
index 9852668e42..932b5a2945 100644
--- a/tensorflow/g3doc/resources/xla_prerelease.md
+++ b/tensorflow/g3doc/resources/xla_prerelease.md
@@ -57,7 +57,7 @@ operations are high level, e.g., arbitrary sized vector and matrix
operations. This makes the compiler easy to target from TensorFlow, and
preserves enough information to allow sophisticated scheduling and optimization.
The following tutorial provides introductory information about XLA. More details
-follow in the [Operation Semantics] (#operation_semantics) section.
+follow in the [Operation Semantics](#operation_semantics) section.
It is important to note that the XLA framework is not set in stone. In
particular, while it is unlikely that the semantics of existing operations will
@@ -127,8 +127,8 @@ an `xla::Client` object.
The `ComputationBuilder` class provides a convenient programming interface to
construct XLA computations. The semantics of XLA operations with links
-to `ComputationBuilder` methods are documented in [Operation Semantics]
-(#operation_semantics).
+to `ComputationBuilder` methods are documented in
+[Operation Semantics](#operation_semantics).
Here is the part that JIT-compiles the graph (step 2):
@@ -688,11 +688,9 @@ dimension of size `r`.
Notes:
1. This follows the typical definition of a dot operator, as in other numeric
- libraries such as [numpy]
- (http://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html).
+ libraries such as [numpy](http://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html).
2. There is currently no support for the general tensor dot operator
- [numpy.tensordot]
- (http://docs.scipy.org/doc/numpy/reference/generated/numpy.tensordot.html#numpy.tensordot).
+ [numpy.tensordot](http://docs.scipy.org/doc/numpy/reference/generated/numpy.tensordot.html#numpy.tensordot).
### Element-wise binary arithmetic operations
@@ -1355,10 +1353,10 @@ shape of the output array. The array `pred` must have the same dimensionality as
For each element `P` of `pred`, the corresponding element of the output array is
taken from `on_true` if the value of `P` is `true`, and from `on_false` if the
-value of `P` is `false`. As a restricted form of [broadcasting]
-(#broadcasting_semantics), `pred` can be a scalar of type `PRED`. In this case,
-the output array is taken wholly from `on_true` if `pred` is `true`, and from
-`on_false` if `pred` is `false`.
+value of `P` is `false`. As a restricted form of
+[broadcasting](#broadcasting_semantics), `pred` can be a scalar of type `PRED`.
+In this case, the output array is taken wholly from `on_true` if `pred` is
+`true`, and from `on_false` if `pred` is `false`.
Example with non-scalar `pred`:
@@ -1477,8 +1475,8 @@ let s: s32 = 5;
let t: (f32[10], s32) = tuple(v, s);
```
-Tuples can be deconstructed (accessed) via the [`GetTupleElement`]
-(#gettupleelement) operation.
+Tuples can be deconstructed (accessed) via the
+[`GetTupleElement`](#gettupleelement) operation.
### While
@@ -1553,8 +1551,8 @@ by replicating it over rows to get:
|1 2 3| + |7 8 9| = |8 10 12|
|4 5 6| |7 8 9| |11 13 15|
-In Numpy, this is called [broadcasting]
-(http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
+In Numpy, this is called
+[broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
### Principles
@@ -1690,8 +1688,7 @@ Examples:
A special case arises, and is also supported, where each of the input arrays has
a degenerate dimension at a different index. In this case, we get an "outer
operation": (2,1) and (1,3) broadcast to (2,3). For more examples, consult the
-[Numpy documentation on broadcasting]
-(http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
+[Numpy documentation on broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
### Broadcast composition
diff --git a/tensorflow/g3doc/tutorials/deep_cnn/index.md b/tensorflow/g3doc/tutorials/deep_cnn/index.md
index d1ef5f6405..89ba53ac6f 100644
--- a/tensorflow/g3doc/tutorials/deep_cnn/index.md
+++ b/tensorflow/g3doc/tutorials/deep_cnn/index.md
@@ -32,17 +32,15 @@ new ideas and experimenting with new techniques.
The CIFAR-10 tutorial demonstrates several important constructs for
designing larger and more sophisticated models in TensorFlow:
-* Core mathematical components including [convolution](
-../../api_docs/python/nn.md#conv2d) ([wiki](
-https://en.wikipedia.org/wiki/Convolution)), [rectified linear activations](
-../../api_docs/python/nn.md#relu) ([wiki](
-https://en.wikipedia.org/wiki/Rectifier_(neural_networks))), [max pooling](
-../../api_docs/python/nn.md#max_pool) ([wiki](
-https://en.wikipedia.org/wiki/Convolutional_neural_network#Pooling_layer))
-and [local response normalization](
-../../api_docs/python/nn.md#local_response_normalization)
-(Chapter 3.3 in [AlexNet paper](
-http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)).
+* Core mathematical components including [convolution](../../api_docs/python/nn.md#conv2d)
+([wiki](https://en.wikipedia.org/wiki/Convolution)),
+[rectified linear activations](../../api_docs/python/nn.md#relu)
+([wiki](https://en.wikipedia.org/wiki/Rectifier_(neural_networks))),
+[max pooling](../../api_docs/python/nn.md#max_pool)
+([wiki](https://en.wikipedia.org/wiki/Convolutional_neural_network#Pooling_layer))
+and [local response normalization](../../api_docs/python/nn.md#local_response_normalization)
+(Chapter 3.3 in
+[AlexNet paper](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)).
* [Visualization](../../how_tos/summaries_and_tensorboard/index.md)
of network activities during training, including input images,
losses and distributions of activations and gradients.
@@ -57,7 +55,7 @@ that systematically decrements over time.
for input
data to isolate the model from disk latency and expensive image pre-processing.
-We also provide a [multi-GPU version](#training-a-model-using-multiple-gpu-cards)
+We also provide a [multi-GPU version](#training-a-model-using-multiple-gpu-cards)
of the model which demonstrates:
* Configuring a model to train across multiple GPU cards in parallel.
@@ -111,7 +109,7 @@ adds operations that perform inference, i.e. classification, on supplied images.
add operations that compute the loss,
gradients, variable updates and visualization summaries.
-### Model Inputs
+### Model Inputs
The input part of the model is built by the functions `inputs()` and
`distorted_inputs()` which read images from the CIFAR-10 binary data files.
@@ -149,7 +147,7 @@ processing time. To prevent these operations from slowing down training, we run
them inside 16 separate threads which continuously fill a TensorFlow
[queue](../../api_docs/python/io_ops.md#shuffle_batch).
-### Model Prediction
+### Model Prediction
The prediction part of the model is constructed by the `inference()` function
which adds operations to compute the *logits* of the predictions. That part of
@@ -174,8 +172,8 @@ Here is a graph generated from TensorBoard describing the inference operation:
</div>
> **EXERCISE**: The output of `inference` are un-normalized logits. Try editing
-the network architecture to return normalized predictions using [`tf.nn.softmax()`]
-(../../api_docs/python/nn.md#softmax).
+the network architecture to return normalized predictions using
+[`tf.nn.softmax()`](../../api_docs/python/nn.md#softmax).
The `inputs()` and `inference()` functions provide all the components
necessary to perform evaluation on a model. We now shift our focus towards
@@ -188,7 +186,7 @@ layers of Alex's original model are locally connected and not fully connected.
Try editing the architecture to exactly reproduce the locally connected
architecture in the top layer.
-### Model Training
+### Model Training
The usual method for training a network to perform N-way classification is
[multinomial logistic regression](https://en.wikipedia.org/wiki/Multinomial_logistic_regression),
@@ -307,7 +305,7 @@ values. See how the scripts use
[`ExponentialMovingAverage`](../../api_docs/python/train.md#ExponentialMovingAverage)
for this purpose.
-## Evaluating a Model
+## Evaluating a Model
Let us now evaluate how well the trained model performs on a hold-out data set.
The model is evaluated by the script `cifar10_eval.py`. It constructs the model
diff --git a/tensorflow/g3doc/tutorials/image_recognition/index.md b/tensorflow/g3doc/tutorials/image_recognition/index.md
index 62b802c022..990f906a44 100644
--- a/tensorflow/g3doc/tutorials/image_recognition/index.md
+++ b/tensorflow/g3doc/tutorials/image_recognition/index.md
@@ -160,13 +160,13 @@ are between 0 and 255 to the floating point values that the graph operates on.
We control the scaling with the `input_mean` and `input_std` flags: we first subtract
`input_mean` from each pixel value, then divide it by `input_std`.
-These values probably look somewhat magical, but they are just defined by the
-original model author based on what he/she wanted to use as input images for
+These values probably look somewhat magical, but they are just defined by the
+original model author based on what he/she wanted to use as input images for
training. If you have a graph that you've trained yourself, you'll just need
to adjust the values to match whatever you used during your training process.
-You can see how they're applied to an image in the [`ReadTensorFromImageFile()`]
-(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/label_image/main.cc#L88)
+You can see how they're applied to an image in the
+[`ReadTensorFromImageFile()`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/label_image/main.cc#L88)
function.
```C++
@@ -255,8 +255,8 @@ definition with the `ToGraphDef()` function.
TF_RETURN_IF_ERROR(session->Run({}, {output_name}, {}, out_tensors));
return Status::OK();
```
-Then we create a [`Session`](http://www.tensorflow.org/versions/master/api_docs/cc/ClassSession.html#class-tensorflow-session)
-object, which is the interface to actually running the graph, and run it,
+Then we create a [`Session`](http://www.tensorflow.org/versions/master/api_docs/cc/ClassSession.html#class-tensorflow-session)
+object, which is the interface to actually running the graph, and run it,
specifying which node we want to get the output from, and where to put the
output data.
@@ -434,7 +434,7 @@ TensorFlow within your own products.
> **EXERCISE**: Transfer learning is the idea that, if you know how to solve a task well, you
should be able to transfer some of that understanding to solving related
problems. One way to perform transfer learning is to remove the final
-classification layer of the network and extract
+classification layer of the network and extract
the [next-to-last layer of the CNN](http://arxiv.org/abs/1310.1531), in this case a 2048 dimensional vector.
There's a guide to doing this [in the how-to section](../../how_tos/image_retraining/index.html).
diff --git a/tensorflow/g3doc/tutorials/index.md b/tensorflow/g3doc/tutorials/index.md
index d21b7bc7dc..c191dc8851 100644
--- a/tensorflow/g3doc/tutorials/index.md
+++ b/tensorflow/g3doc/tutorials/index.md
@@ -112,7 +112,7 @@ Building on the Inception recognition model, we will release a TensorFlow
version of the [Deep Dream](https://github.com/google/deepdream) neural network
visual hallucination software.
-[View Tutorial](https://www.tensorflow.org/code/tensorflow/examples/tutorials/deepdream/deepdream.ipynb)
+[View Tutorial](https://nbviewer.jupyter.org/github/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb)
## Language and Sequence Processing
diff --git a/tensorflow/g3doc/tutorials/input_fn/index.md b/tensorflow/g3doc/tutorials/input_fn/index.md
index bec006f075..0e8b91caf2 100644
--- a/tensorflow/g3doc/tutorials/input_fn/index.md
+++ b/tensorflow/g3doc/tutorials/input_fn/index.md
@@ -104,8 +104,8 @@ This corresponds to the following dense tensor:
[0, 0, 0, 0, 0.5]]
```
-For more on `SparseTensor`, see the [TensorFlow API documentation]
-(../../api_docs/python/sparse_ops.md#SparseTensor).
+For more on `SparseTensor`, see the
+[TensorFlow API documentation](../../api_docs/python/sparse_ops.md#SparseTensor).
### Passing input_fn Data to Your Model
@@ -153,9 +153,9 @@ classifier.fit(input_fn=functools.partial(my_input_function,
data_set=training_set), steps=2000)
```
-A third option is to wrap your input_fn invocation in a [`lambda`]
-(https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions) and
-pass it to the `input_fn` parameter:
+A third option is to wrap your input_fn invocation in a
+[`lambda`](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions)
+and pass it to the `input_fn` parameter:
```python
classifier.fit(input_fn=lambda: my_input_fn(training_set), steps=2000)
@@ -181,8 +181,8 @@ Set](https://archive.ics.uci.edu/ml/datasets/Housing) and use it to feed data to
a neural network regressor for predicting median house values.
The [Boston CSV data sets](#setup) you'll use to train your neural network
-contain the following [feature data]
-(https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.names)
+contain the following
+[feature data](https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.names)
for Boston suburbs:
Feature | Description
@@ -202,10 +202,10 @@ owner-occupied residences in thousands of dollars.
## Setup {#setup}
-Download the following data sets: [boston_train.csv]
-(http://download.tensorflow.org/data/boston_train.csv), [boston_test.csv]
-(http://download.tensorflow.org/data/boston_test.csv), and [boston_predict.csv]
-(http://download.tensorflow.org/data/boston_predict.csv).
+Download the following data sets:
+[boston_train.csv](http://download.tensorflow.org/data/boston_train.csv),
+[boston_test.csv](http://download.tensorflow.org/data/boston_test.csv), and
+[boston_predict.csv](http://download.tensorflow.org/data/boston_predict.csv).
The following sections provide a step-by-step walkthrough of how to create an
input function, feed these data sets into a neural network regressor, train and
@@ -230,9 +230,9 @@ tf.logging.set_verbosity(tf.logging.INFO)
Define the column names for the data set in `COLUMNS`. To distinguish features
from the label, also define `FEATURES` and `LABEL`. Then read the three CSVs
-([train](http://download.tensorflow.org/data/boston_train.csv), [test]
-(http://download.tensorflow.org/data/boston_test.csv), and [predict]
-(http://download.tensorflow.org/data/boston_predict.csv)) into _pandas_
+([train](http://download.tensorflow.org/data/boston_train.csv),
+[test](http://download.tensorflow.org/data/boston_test.csv), and
+[predict](http://download.tensorflow.org/data/boston_predict.csv)) into _pandas_
`DataFrame`s:
```python
@@ -262,10 +262,10 @@ feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
```
-NOTE: For a more in-depth overview of feature columns, see [this introduction]
-(../linear/overview.md#feature-columns-and-transformations), and for an example
-that illustrates how to define `FeatureColumns` for categorical data, see the
-[Linear Model Tutorial](../wide/index.md).
+NOTE: For a more in-depth overview of feature columns, see
+[this introduction](../linear/overview.md#feature-columns-and-transformations),
+and for an example that illustrates how to define `FeatureColumns` for
+categorical data, see the [Linear Model Tutorial](../wide/index.md).
Now, instantiate a `DNNRegressor` for the neural network regression model.
You'll need to provide two arguments here: `hidden_units`, a hyperparameter
diff --git a/tensorflow/g3doc/tutorials/linear/overview.md b/tensorflow/g3doc/tutorials/linear/overview.md
index 1fc4f67bce..b239ec2062 100644
--- a/tensorflow/g3doc/tutorials/linear/overview.md
+++ b/tensorflow/g3doc/tutorials/linear/overview.md
@@ -158,7 +158,7 @@ transformation lets you use continuous features in feature crosses, or learn
cases where specific value ranges have particular importance.
Bucketization divides the range of possible values into subranges called
-buckets:
+buckets:
```python
age_buckets = tf.contrib.layers.bucketized_column(
@@ -178,9 +178,8 @@ The input function must return a dictionary of tensors. Each key corresponds to
the name of a `FeatureColumn`. Each key's value is a tensor containing the
values of that feature for all data instances. See
[Building Input Functions with tf.contrib.learn](../input_fn/index.md) for a
-more comprehensive look at input functions, and `input_fn` in the [linear
-models tutorial code]
-(https://www.tensorflow.org/code/tensorflow/examples/learn/wide_n_deep_tutorial.py)
+more comprehensive look at input functions, and `input_fn` in the
+[linear models tutorial code](https://www.tensorflow.org/code/tensorflow/examples/learn/wide_n_deep_tutorial.py)
for an example implementation of an input function.
The input function is passed to the `fit()` and `evaluate()` calls that
diff --git a/tensorflow/g3doc/tutorials/mnist/beginners/index.md b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
index 7de6981ba1..bd0a190099 100644
--- a/tensorflow/g3doc/tutorials/mnist/beginners/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
@@ -368,9 +368,8 @@ In this case, we ask TensorFlow to minimize `cross_entropy` using the
with a learning rate of 0.5. Gradient descent is a simple procedure, where
TensorFlow simply shifts each variable a little bit in the direction that
reduces the cost. But TensorFlow also provides
-[many other optimization algorithms]
-(../../../api_docs/python/train.md#optimizers): using one is as simple as
-tweaking one line.
+[many other optimization algorithms](../../../api_docs/python/train.md#optimizers):
+using one is as simple as tweaking one line.
What TensorFlow actually does here, behind the scenes, is to add new operations
to your graph which implement backpropagation and gradient descent. Then it
diff --git a/tensorflow/g3doc/tutorials/mnist/pros/index.md b/tensorflow/g3doc/tutorials/mnist/pros/index.md
index d77e70c895..72792c6fbe 100644
--- a/tensorflow/g3doc/tutorials/mnist/pros/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/pros/index.md
@@ -186,10 +186,9 @@ Now that we have defined our model and training loss function, it is
straightforward to train using TensorFlow. Because TensorFlow knows the entire
computation graph, it can use automatic differentiation to find the gradients of
the loss with respect to each of the variables. TensorFlow has a variety of
-[built-in optimization algorithms]
-(../../../api_docs/python/train.md#optimizers). For this example, we will use
-steepest gradient descent, with a step length of 0.5, to descend the cross
-entropy.
+[built-in optimization algorithms](../../../api_docs/python/train.md#optimizers).
+For this example, we will use steepest gradient descent, with a step length of
+0.5, to descend the cross entropy.
```python
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
@@ -380,7 +379,7 @@ y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
How well does this model do? To train and evaluate it we will use code that is
nearly identical to that for the simple one layer SoftMax network above.
-The differences are that:
+The differences are that:
- We will replace the steepest gradient descent optimizer with the more
sophisticated ADAM optimizer.
diff --git a/tensorflow/g3doc/tutorials/monitors/index.md b/tensorflow/g3doc/tutorials/monitors/index.md
index 5e2431eac4..6df5e9de6c 100644
--- a/tensorflow/g3doc/tutorials/monitors/index.md
+++ b/tensorflow/g3doc/tutorials/monitors/index.md
@@ -57,9 +57,9 @@ y = classifier.predict(new_samples)
print('Predictions: {}'.format(str(y)))
```
-Copy the above code into a file, and download the corresponding [training]
-(http://download.tensorflow.org/data/iris_training.csv) and [test]
-(http://download.tensorflow.org/data/iris_test.csv) data sets to the same
+Copy the above code into a file, and download the corresponding
+[training](http://download.tensorflow.org/data/iris_training.csv) and
+[test](http://download.tensorflow.org/data/iris_test.csv) data sets to the same
directory.
In the following sections, you'll progressively make updates to the above code
@@ -90,11 +90,12 @@ appropriate.
One way to address this problem would be to split model training into multiple
`fit` calls with smaller numbers of steps in order to evaluate accuracy more
progressively. However, this is not recommended practice, as it greatly slows down model
-training. Fortunately, tf.contrib.learn offers another solution: a [Monitor API]
-(../../api_docs/python/contrib.learn.monitors.md) designed to help you log metrics
-and evaluate your model while training is in progress. In the following sections,
-you'll learn how to enable logging in TensorFlow, set up a ValidationMonitor to do
-streaming evaluations, and visualize your metrics using TensorBoard.
+training. Fortunately, tf.contrib.learn offers another solution: a
+[Monitor API](../../api_docs/python/contrib.learn.monitors.md)designed to help
+you log metrics and evaluate your model while training is in progress. In the
+following sections, you'll learn how to enable logging in TensorFlow, set up a
+ValidationMonitor to do streaming evaluations, and visualize your metrics using
+TensorBoard.
## Enabling Logging with TensorFlow
@@ -178,8 +179,7 @@ Place this code right before the line instantiating the `classifier`.
`ValidationMonitor`s rely on saved checkpoints to perform evaluation operations,
so you'll want to modify instantiation of the `classifier` to add a
-[`RunConfig`]
-(../../api_docs/python/contrib.learn.md#RunConfig)
+[`RunConfig`](../../api_docs/python/contrib.learn.md#RunConfig)
that includes `save_checkpoints_secs`, which specifies how many seconds should
elapse between checkpoint saves during training. Because the Iris data set is
quite small, and thus trains quickly, it makes sense to set
@@ -270,8 +270,8 @@ INFO:tensorflow:Validation (step 1600): recall = 1.0, accuracy = 0.966667, globa
Note that in the above log output, by step 150, the model has already achieved
precision and recall rates of 1.0. This raises the question as to whether model
-training could benefit from [early stopping]
-(https://en.wikipedia.org/wiki/Early_stopping).
+training could benefit from
+[early stopping](https://en.wikipedia.org/wiki/Early_stopping).
In addition to logging eval metrics, `ValidationMonitor`s make it easy to
implement early stopping when specified conditions are met, via three params:
@@ -347,9 +347,8 @@ Then load the provided URL (here, `http://0.0.0.0:6006`) in your browser. If you
click on the accuracy field, you'll see an image like the following, which shows
accuracy plotted against step count:
-![Accuracy over step count in TensorBoard]
-(../../images/validation_monitor_tensorboard_accuracy.png "Accuracy over step count in TensorBoard")
+![Accuracy over step count in TensorBoard](../../images/validation_monitor_tensorboard_accuracy.png "Accuracy over step count in TensorBoard")
-For more on using TensorBoard, see [TensorBoard: Visualizing Learning]
-(../../how_tos/summaries_and_tensorboard/index.md)
+For more on using TensorBoard, see
+[TensorBoard: Visualizing Learning](../../how_tos/summaries_and_tensorboard/index.md)
and [TensorBoard: Graph Visualization](../../how_tos/graph_viz/index.md).
diff --git a/tensorflow/g3doc/tutorials/recurrent/index.md b/tensorflow/g3doc/tutorials/recurrent/index.md
index 3ab8061981..e22976100f 100644
--- a/tensorflow/g3doc/tutorials/recurrent/index.md
+++ b/tensorflow/g3doc/tutorials/recurrent/index.md
@@ -2,8 +2,7 @@
## Introduction
-Take a look at [this great article]
-(http://colah.github.io/posts/2015-08-Understanding-LSTMs/)
+Take a look at [this great article](http://colah.github.io/posts/2015-08-Understanding-LSTMs/)
for an introduction to recurrent neural networks and LSTMs in particular.
## Language Modeling
@@ -18,10 +17,10 @@ models, whilst being small and relatively fast to train.
Language modeling is key to many interesting problems such as speech
recognition, machine translation, or image captioning. It is also fun, too --
-take a look [here] (http://karpathy.github.io/2015/05/21/rnn-effectiveness/).
+take a look [here](http://karpathy.github.io/2015/05/21/rnn-effectiveness/).
For the purpose of this tutorial, we will reproduce the results from
-[Zaremba et al., 2014] (http://arxiv.org/abs/1409.2329)
+[Zaremba et al., 2014](http://arxiv.org/abs/1409.2329)
([pdf](http://arxiv.org/pdf/1409.2329.pdf)), which achieves very good results
on the PTB dataset.
diff --git a/tensorflow/g3doc/tutorials/seq2seq/index.md b/tensorflow/g3doc/tutorials/seq2seq/index.md
index 10f39553fd..7e8c3cb929 100644
--- a/tensorflow/g3doc/tutorials/seq2seq/index.md
+++ b/tensorflow/g3doc/tutorials/seq2seq/index.md
@@ -25,14 +25,14 @@ prepare it for training and train. It takes about 20GB of disk space,
and a while to download and prepare (see [later](#lets-run-it) for details),
so you can start and leave it running while reading this tutorial.
-This tutorial references the following files from `models/rnn`.
+This tutorial references the following files.
File | What's in it?
--- | ---
-`seq2seq.py` | Library for building sequence-to-sequence models.
-`translate/seq2seq_model.py` | Neural translation sequence-to-sequence model.
-`translate/data_utils.py` | Helper functions for preparing translation data.
-`translate/translate.py` | Binary that trains and runs the translation model.
+`python/ops/seq2seq.py` | Library for building sequence-to-sequence models.
+`models/rnn/translate/seq2seq_model.py` | Neural translation sequence-to-sequence model.
+`models/rnn/translate/data_utils.py` | Helper functions for preparing translation data.
+`models/rnn/translate/translate.py` | Binary that trains and runs the translation model.
## Sequence-to-Sequence Basics
@@ -74,7 +74,7 @@ attention mechanism in the decoder looks like this.
As you can see above, there are many different sequence-to-sequence
models. Each of these models can use different RNN cells, but all
of them accept encoder inputs and decoder inputs. This motivates
-the interfaces in the TensorFlow seq2seq library (`models/rnn/seq2seq.py`).
+the interfaces in the TensorFlow seq2seq library (`python/ops/seq2seq.py`).
The basic RNN encoder-decoder sequence-to-sequence model works as follows.
```python
@@ -151,7 +151,7 @@ have similar interfaces, so we will not describe them in detail. We will use
## Neural Translation Model
While the core of the sequence-to-sequence model is constructed by
-the functions in `models/rnn/seq2seq.py`, there are still a few tricks
+the functions in `python/ops/seq2seq.py`, there are still a few tricks
that are worth mentioning that are used in our translation model in
`models/rnn/translate/seq2seq_model.py`.
diff --git a/tensorflow/g3doc/tutorials/tflearn/index.md b/tensorflow/g3doc/tutorials/tflearn/index.md
index f7fc2df74e..a7cebaaba8 100644
--- a/tensorflow/g3doc/tutorials/tflearn/index.md
+++ b/tensorflow/g3doc/tutorials/tflearn/index.md
@@ -2,22 +2,21 @@
TensorFlow’s high-level machine learning API (tf.contrib.learn) makes it easy to
configure, train, and evaluate a variety of machine learning models. In this
-tutorial, you’ll use tf.contrib.learn to construct a [neural network]
-(https://en.wikipedia.org/wiki/Artificial_neural_network) classifier and train
-it on the [Iris data set](https://en.wikipedia.org/wiki/Iris_flower_data_set) to
-predict flower species based on sepal/petal geometry. You'll write code to
+tutorial, you’ll use tf.contrib.learn to construct a
+[neural network](https://en.wikipedia.org/wiki/Artificial_neural_network)
+classifier and train it on the [Iris data set](https://en.wikipedia.org/wiki/Iris_flower_data_set)
+to predict flower species based on sepal/petal geometry. You'll write code to
perform the following five steps:
1. Load CSVs containing Iris training/test data into a TensorFlow `Dataset`
-2. Construct a [neural network classifier]
- (../../api_docs/python/contrib.learn.md#DNNClassifier)
+2. Construct a [neural network classifier](../../api_docs/python/contrib.learn.md#DNNClassifier)
3. Fit the model using the training data
4. Evaluate the accuracy of the model
5. Classify new samples
-NOTE: Remember to [install TensorFlow on your machine]
-(../../get_started/os_setup.md#download-and-setup) before getting started with
-this tutorial.
+NOTE: Remember to
+[install TensorFlow on your machine](../../get_started/os_setup.md#download-and-setup)
+before getting started with this tutorial.
## Complete Neural Network Source Code
@@ -85,11 +84,11 @@ and [*Iris virginica*](https://www.flickr.com/photos/33397993@N05/3352169862)
(by [Frank Mayfield](https://www.flickr.com/photos/33397993@N05), CC BY-SA
2.0).**
-Each row contains the following data for each flower sample: [sepal]
-(https://en.wikipedia.org/wiki/Sepal) length, sepal width, [petal]
-(https://en.wikipedia.org/wiki/Petal) length, petal width, and flower species.
-Flower species are represented as integers, with 0 denoting *Iris setosa*, 1
-denoting *Iris versicolor*, and 2 denoting *Iris virginica*.
+Each row contains the following data for each flower sample:
+[sepal](https://en.wikipedia.org/wiki/Sepal) length, sepal width,
+[petal](https://en.wikipedia.org/wiki/Petal) length, petal width, and flower
+species. Flower species are represented as integers, with 0 denoting
+*Iris setosa*, 1 denoting *Iris versicolor*, and 2 denoting *Iris virginica*.
Sepal Length | Sepal Width | Petal Length | Petal Width | Species
:----------- | :---------- | :----------- | :---------- | :-------
@@ -108,10 +107,8 @@ Sepal Length | Sepal Width | Petal Length | Petal Width | Species
For this tutorial, the Iris data has been randomized and split into two separate
CSVs:
-* A training set of 120 samples ([iris_training.csv]
- (http://download.tensorflow.org/data/iris_training.csv))
-* A test set of 30 samples ([iris_test.csv]
- (http://download.tensorflow.org/data/iris_test.csv)).
+* A training set of 120 samples ([iris_training.csv](http://download.tensorflow.org/data/iris_training.csv))
+* A test set of 30 samples ([iris_test.csv](http://download.tensorflow.org/data/iris_test.csv)).
Place these files in the same directory as your Python code.
@@ -126,15 +123,14 @@ import tensorflow as tf
import numpy as np
```
-Next, load the training and test sets into `Dataset`s using the [`load_csv()`]
-(https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/datasets/base.py)
+Next, load the training and test sets into `Dataset`s using the
+[`load_csv()`](https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/datasets/base.py)
method in `learn.datasets.base`. The `load_csv()` method takes two required
arguments:
* `filename`, which takes the filepath to the CSV file
-* `target_dtype`, which takes the [`numpy` datatype]
- (http://docs.scipy.org/doc/numpy/user/basics.types.html) of the dataset's
- target value.
+* `target_dtype`, which takes the [`numpy` datatype](http://docs.scipy.org/doc/numpy/user/basics.types.html)
+ of the dataset's target value.
Here, the target (the value you're training the model to predict) is flower
species, which is an integer from 0&ndash;2, so the appropriate `numpy` datatype
@@ -152,28 +148,29 @@ test_set = tf.contrib.learn.datasets.base.load_csv(filename=IRIS_TEST,
target_dtype=np.int)
```
-`Dataset`s in tf.contrib.learn are [named tuples]
-(https://docs.python.org/2/library/collections.html#collections.namedtuple); you
-can access feature data and target values via the `data` and `target` fields.
-Here, `training_set.data` and `training_set.target` contain the feature data and
-target values for the training set, respectively, and `test_set.data` and
-`test_set.target` contain feature data and target values for the test set.
+`Dataset`s in tf.contrib.learn are
+[named tuples](https://docs.python.org/2/library/collections.html#collections.namedtuple);
+you can access feature data and target values via the `data` and `target`
+fields. Here, `training_set.data` and `training_set.target` contain the feature
+data and target values for the training set, respectively, and `test_set.data`
+and `test_set.target` contain feature data and target values for the test set.
-Later on, in ["Fit the DNNClassifier to the Iris Training Data,"]
-(#fit-dnnclassifier) you'll use `training_set.data` and `training_set.target` to
-train your model, and in ["Evaluate Model Accuracy,"](#evaluate-accuracy) you'll
-use `test_set.data` and `test_set.target`. But first, you'll construct your
-model in the next section.
+Later on, in
+["Fit the DNNClassifier to the Iris Training Data,"](#fit-dnnclassifier)
+you'll use `training_set.data` and `training_set.target` to train your model,
+and in ["Evaluate Model Accuracy,"](#evaluate-accuracy) you'll use
+`test_set.data` and `test_set.target`. But first, you'll construct your model in
+the next section.
## Construct a Deep Neural Network Classifier
-tf.contrib.learn offers a variety of predefined models, called [`Estimator`s]
-(../../api_docs/python/contrib.learn.md#estimators), which you can use "out of
-the box" to run training and evaluation operations on your data. Here, you'll
-configure a Deep Neural Network Classifier model to fit the Iris data. Using
-tf.contrib.learn, you can instantiate your [`DNNClassifier`]
-(../../api_docs/python/contrib.learn.md#DNNClassifier) with just a couple lines
-of code:
+tf.contrib.learn offers a variety of predefined models, called
+[`Estimator`s](../../api_docs/python/contrib.learn.md#estimators), which you can
+use "out of the box" to run training and evaluation operations on your data.
+Here, you'll configure a Deep Neural Network Classifier model to fit the Iris
+data. Using tf.contrib.learn, you can instantiate your
+[`DNNClassifier`](../../api_docs/python/contrib.learn.md#DNNClassifier)
+with just a couple lines of code:
```python
# Specify that all features have real-value data
@@ -196,21 +193,21 @@ accordingly to `4` to hold all the data.
Then, the code creates a `DNNClassifier` model using the following arguments:
* `feature_columns=feature_columns`. The set of feature columns defined above.
-* `hidden_units=[10, 20, 10]`. Three [hidden layers]
- (http://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw),
+* `hidden_units=[10, 20, 10]`. Three
+ [hidden layers](http://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw),
containing 10, 20, and 10 neurons, respectively.
* `n_classes=3`. Three target classes, representing the three Iris species.
* `model_dir=/tmp/iris_model`. The directory in which TensorFlow will save
checkpoint data during model training. For more on logging and monitoring
- with TensorFlow, see [Logging and Monitoring Basics with tf.contrib.learn]
- (../monitors/index.md).
+ with TensorFlow, see
+ [Logging and Monitoring Basics with tf.contrib.learn](../monitors/index.md).
## Fit the DNNClassifier to the Iris Training Data {#fit-dnnclassifier}
Now that you've configured your DNN `classifier` model, you can fit it to the
-Iris training data using the [`fit`]
-(../../api_docs/python/contrib.learn.md#BaseEstimator.fit) method. Pass as
-arguments your feature data (`training_set.data`), target values
+Iris training data using the
+[`fit`](../../api_docs/python/contrib.learn.md#BaseEstimator.fit) method. Pass
+as arguments your feature data (`training_set.data`), target values
(`training_set.target`), and the number of steps to train (here, 2000):
```python
@@ -228,8 +225,8 @@ classifier.fit(x=training_set.data, y=training_set.target, steps=1000)
```
However, if you're looking to track the model while it trains, you'll likely
-want to instead use a TensorFlow [`monitor`]
-(https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/monitors.py)
+want to instead use a TensorFlow
+[`monitor`](https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/monitors.py)
to perform logging operations. See the tutorial [&ldquo;Logging and Monitoring
Basics with tf.contrib.learn&rdquo;](../monitors/index.md) for more on this
topic.
@@ -237,12 +234,12 @@ topic.
## Evaluate Model Accuracy {#evaluate-accuracy}
You've fit your `DNNClassifier` model on the Iris training data; now, you can
-check its accuracy on the Iris test data using the [`evaluate`]
-(../../api_docs/python/contrib.learn.md#BaseEstimator.evaluate) method. Like
-`fit`, `evaluate` takes feature data and target values as arguments, and returns
-a `dict` with the evaluation results. The following code passes the Iris test
-data&mdash;`test_set.data` and `test_set.target`&mdash;to `evaluate` and prints
-the `accuracy` from the results:
+check its accuracy on the Iris test data using the
+[`evaluate`](../../api_docs/python/contrib.learn.md#BaseEstimator.evaluate)
+method. Like `fit`, `evaluate` takes feature data and target values as
+arguments, and returns a `dict` with the evaluation results. The following code
+passes the Iris test data&mdash;`test_set.data` and `test_set.target`&mdash;to
+`evaluate` and prints the `accuracy` from the results:
```python
accuracy_score = classifier.evaluate(x=test_set.data, y=test_set.target)["accuracy"]
@@ -296,8 +293,7 @@ second sample is *Iris virginica*.
[Large-scale Linear Models with TensorFlow](../linear/overview.md).
* To build your own Estimator using tf.contrib.learn APIs, check out [Building
- Machine Learning Estimator in TensorFlow]
- (http://terrytangyuan.github.io/2016/07/08/understand-and-build-tensorflow-estimator/).
+ Machine Learning Estimator in TensorFlow](http://terrytangyuan.github.io/2016/07/08/understand-and-build-tensorflow-estimator/).
* To experiment with neural network modeling and visualization in the browser,
check out [Deep Playground](http://playground.tensorflow.org/).
diff --git a/tensorflow/g3doc/tutorials/wide/index.md b/tensorflow/g3doc/tutorials/wide/index.md
index 4be238b72b..1bad7ea178 100644
--- a/tensorflow/g3doc/tutorials/wide/index.md
+++ b/tensorflow/g3doc/tutorials/wide/index.md
@@ -36,8 +36,9 @@ https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/w
$ sudo pip install pandas
```
- If you have trouble installing pandas, consult the [instructions]
-(http://pandas.pydata.org/pandas-docs/stable/install.html) on the pandas site.
+ If you have trouble installing pandas, consult the
+ [instructions](http://pandas.pydata.org/pandas-docs/stable/install.html)
+ on the pandas site.
4. Execute the tutorial code with the following command to train the linear
model described in this tutorial:
@@ -50,12 +51,11 @@ Read on to find out how this code builds its linear model.
## Reading The Census Data
-The dataset we'll be using is the [Census Income Dataset]
-(https://archive.ics.uci.edu/ml/datasets/Census+Income). You can download the
-[training data]
-(https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data) and
-[test data]
-(https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test)
+The dataset we'll be using is the
+[Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income).
+You can download the
+[training data](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data)
+and [test data](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test)
manually or use code like this:
```python
@@ -67,8 +67,8 @@ urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/ad
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", test_file.name)
```
-Once the CSV files are downloaded, let's read them into [Pandas]
-(http://pandas.pydata.org/) dataframes.
+Once the CSV files are downloaded, let's read them into
+[Pandas](http://pandas.pydata.org/) dataframes.
```python
import pandas as pd
@@ -147,10 +147,9 @@ When building a TF.Learn model, the input data is specified by means of an Input
Builder function. This builder function will not be called until it is later
passed to TF.Learn methods such as `fit` and `evaluate`. The purpose of this
function is to construct the input data, which is represented in the form of
-[Tensors]
-(https://www.tensorflow.org/versions/r0.9/api_docs/python/framework.html#Tensor)
-or [SparseTensors]
-(https://www.tensorflow.org/versions/r0.9/api_docs/python/sparse_ops.html#SparseTensor).
+[Tensors](https://www.tensorflow.org/versions/r0.9/api_docs/python/framework.html#Tensor)
+or
+[SparseTensors](https://www.tensorflow.org/versions/r0.9/api_docs/python/sparse_ops.html#SparseTensor).
In more detail, the Input Builder function returns the following as a pair:
1. `feature_cols`: A dict from feature column names to `Tensors` or
@@ -170,8 +169,7 @@ Our model represents the input data as *constant* tensors, meaning that the
tensor represents a constant value, in this case the values of a particular
column of `df_train` or `df_test`. This is the simplest way to pass data into
TensorFlow. Another more advanced way to represent input data would be to
-construct an [Input Reader]
-(https://www.tensorflow.org/versions/r0.9/api_docs/python/io_ops.html#inputs-and-readers)
+construct an [Input Reader](https://www.tensorflow.org/versions/r0.9/api_docs/python/io_ops.html#inputs-and-readers)
that represents a file or other data source, and iterates through the file as
TensorFlow runs the graph. Each continuous column in the train or test dataframe
will be converted into a `Tensor`, which in general is a good format to
@@ -387,9 +385,8 @@ The first line of the output should be something like `accuracy: 0.83557522`,
which means the accuracy is 83.6%. Feel free to try more features and
transformations and see if you can do even better!
-If you'd like to see a working end-to-end example, you can download our [example
-code]
-(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/wide_n_deep_tutorial.py)
+If you'd like to see a working end-to-end example, you can download our
+[example code](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/wide_n_deep_tutorial.py)
and set the `model_type` flag to `wide`.
## Adding Regularization to Prevent Overfitting
diff --git a/tensorflow/g3doc/tutorials/wide_and_deep/index.md b/tensorflow/g3doc/tutorials/wide_and_deep/index.md
index 8f2041e7ee..da7b2f7d6d 100644
--- a/tensorflow/g3doc/tutorials/wide_and_deep/index.md
+++ b/tensorflow/g3doc/tutorials/wide_and_deep/index.md
@@ -16,8 +16,7 @@ large-scale regression and classification problems with sparse input features
you're interested in learning more about how Wide & Deep Learning works, please
check out our [research paper](http://arxiv.org/abs/1606.07792).
-![Wide & Deep Spectrum of Models]
-(../../images/wide_n_deep.svg "Wide & Deep")
+![Wide & Deep Spectrum of Models](../../images/wide_n_deep.svg "Wide & Deep")
The figure above shows a comparison of a wide model (logistic regression with
sparse features and transformations), a deep model (feed-forward neural network
@@ -62,8 +61,9 @@ https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/w
$ sudo pip install pandas
```
- If you have trouble installing pandas, consult the [instructions]
-(http://pandas.pydata.org/pandas-docs/stable/install.html) on the pandas site.
+ If you have trouble installing pandas, consult the
+ [instructions](http://pandas.pydata.org/pandas-docs/stable/install.html)
+ on the pandas site.
4. Execute the tutorial code with the following command to train the linear
model described in this tutorial:
@@ -130,9 +130,9 @@ concatenated with the continuous features, and then fed into the hidden layers
of a neural network in the forward pass. The embedding values are initialized
randomly, and are trained along with all other model parameters to minimize the
training loss. If you're interested in learning more about embeddings, check out
-the TensorFlow tutorial on [Vector Representations of Words]
-(https://www.tensorflow.org/versions/r0.9/tutorials/word2vec/index.html), or
-[Word Embedding](https://en.wikipedia.org/wiki/Word_embedding) on Wikipedia.
+the TensorFlow tutorial on
+[Vector Representations of Words](https://www.tensorflow.org/versions/r0.9/tutorials/word2vec/index.html),
+or [Word Embedding](https://en.wikipedia.org/wiki/Word_embedding) on Wikipedia.
We'll configure the embeddings for the categorical columns using
`embedding_column`, and concatenate them with the continuous columns:
@@ -259,8 +259,8 @@ for key in sorted(results):
The first line of the output should be something like `accuracy: 0.84429705`. We
can see that the accuracy was improved from about 83.6% using a wide-only linear
model to about 84.4% using a Wide & Deep model. If you'd like to see a working
-end-to-end example, you can download our [example code]
-(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/wide_n_deep_tutorial.py).
+end-to-end example, you can download our
+[example code](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/wide_n_deep_tutorial.py).
Note that this tutorial is just a quick example on a small dataset to get you
familiar with the API. Wide & Deep Learning will be even more powerful if you
diff --git a/tensorflow/models/rnn/translate/data_utils.py b/tensorflow/models/rnn/translate/data_utils.py
index 551c49d532..e0e1d4bf05 100644
--- a/tensorflow/models/rnn/translate/data_utils.py
+++ b/tensorflow/models/rnn/translate/data_utils.py
@@ -26,6 +26,7 @@ import tarfile
from six.moves import urllib
from tensorflow.python.platform import gfile
+import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
@@ -137,6 +138,7 @@ def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
+ line = tf.compat.as_bytes(line)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = _DIGIT_RE.sub(b"0", w) if normalize_digits else w
diff --git a/tensorflow/python/framework/common_shapes.py b/tensorflow/python/framework/common_shapes.py
index f406be8371..a0c97f5f8f 100644
--- a/tensorflow/python/framework/common_shapes.py
+++ b/tensorflow/python/framework/common_shapes.py
@@ -183,7 +183,7 @@ def conv2d_shape(op):
data_format = None
if data_format == b"NCHW":
- # Convert input shape to the dfeault NHWC for inference.
+ # Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
@@ -368,7 +368,7 @@ def avg_pool_shape(op):
data_format = None
if data_format == b"NCHW":
- # Convert input shape to the dfeault NHWC for inference.
+ # Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc
index 61ebdecc6c..5700414149 100644
--- a/tensorflow/python/lib/core/py_func.cc
+++ b/tensorflow/python/lib/core/py_func.cc
@@ -15,6 +15,8 @@ limitations under the License.
#include "tensorflow/python/lib/core/py_func.h"
+#include <array>
+
#include <Python.h>
#include "numpy/arrayobject.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -162,8 +164,8 @@ bool IsSingleNone(PyObject* obj) {
if (PyArray_NDIM(array_obj) != 0 || PyArray_SIZE(array_obj) != 1) {
return false;
}
- npy_intp indices[] = {};
- char* item_ptr = static_cast<char*>(PyArray_GetPtr(array_obj, indices));
+ std::array<npy_intp, 0> indices;
+ char* item_ptr = static_cast<char*>(PyArray_GetPtr(array_obj, indices.data()));
PyObject* item = PyArray_GETITEM(array_obj, item_ptr);
CHECK(item);
return item == Py_None;
diff --git a/tensorflow/python/ops/nn.py b/tensorflow/python/ops/nn.py
index 992e0f6f79..0845c782b8 100644
--- a/tensorflow/python/ops/nn.py
+++ b/tensorflow/python/ops/nn.py
@@ -142,9 +142,8 @@ to the `Convolution` section for details about the padding calculation.
Morphological operators are non-linear filters used in image processing.
-[Greyscale morphological dilation]
-(https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart
-of standard sum-product convolution:
+[Greyscale morphological dilation](https://en.wikipedia.org/wiki/Dilation_(morphology))
+is the max-sum counterpart of standard sum-product convolution:
output[b, y, x, c] =
max_{dy, dx} input[b,
@@ -157,9 +156,8 @@ The `filter` is usually called structuring function. Max-pooling is a special
case of greyscale morphological dilation when the filter assumes all-zero
values (a.k.a. flat structuring function).
-[Greyscale morphological erosion]
-(https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart
-of standard sum-product convolution:
+[Greyscale morphological erosion](https://en.wikipedia.org/wiki/Erosion_(morphology))
+is the min-sum counterpart of standard sum-product convolution:
output[b, y, x, c] =
min_{dy, dx} input[b,
@@ -255,8 +253,8 @@ Candidate Sampling training algorithms can speed up your step times by
only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
-See our [Candidate Sampling Algorithms Reference]
-(../../extras/candidate_sampling.pdf)
+See our
+[Candidate Sampling Algorithms Reference](../../extras/candidate_sampling.pdf)
### Sampled Loss Functions
@@ -936,9 +934,9 @@ def batch_normalization(x,
As described in http://arxiv.org/abs/1502.03167.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
- `scale` \\\\(\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\):
+ `scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
- \\\\(\\frac{\gamma(x-\mu)}{\sigma}+\\beta\\\\)
+ \\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
@@ -964,9 +962,9 @@ def batch_normalization(x,
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
- offset: An offset `Tensor`, often denoted \\\\(\\beta\\\\) in equations, or
+ offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
- scale: A scale `Tensor`, often denoted \\\\(\gamma\\\\) in equations, or
+ scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py
index 180a396adc..ec5024e445 100644
--- a/tensorflow/python/ops/nn_grad.py
+++ b/tensorflow/python/ops/nn_grad.py
@@ -225,7 +225,7 @@ def _BiasAddGradGrad(op, received_grad):
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
- if data_format == "NCHW":
+ if data_format == b"NCHW":
expanded_shape = array_ops.concat(
0,
[array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[-2:])]
diff --git a/tensorflow/python/summary/impl/reservoir.py b/tensorflow/python/summary/impl/reservoir.py
index b644d23ad8..2a185898ce 100644
--- a/tensorflow/python/summary/impl/reservoir.py
+++ b/tensorflow/python/summary/impl/reservoir.py
@@ -72,7 +72,7 @@ class Reservoir(object):
raise ValueError('size must be nonegative integer, was %s' % size)
self._buckets = collections.defaultdict(
lambda: _ReservoirBucket(size, random.Random(seed)))
- # _mutex guards the keys - creating new keys, retreiving by key, etc
+ # _mutex guards the keys - creating new keys, retrieving by key, etc
# the internal items are guarded by the ReservoirBuckets' internal mutexes
self._mutex = threading.Lock()
diff --git a/tensorflow/tensorboard/README.md b/tensorflow/tensorboard/README.md
index 1881de76d4..edb8e4fe30 100644
--- a/tensorflow/tensorboard/README.md
+++ b/tensorflow/tensorboard/README.md
@@ -54,18 +54,18 @@ work, but there may be bugs or performance issues.
The first step in using TensorBoard is acquiring data from your TensorFlow run.
For this, you need [summary
-ops](https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html#summary-operations).
+ops](https://www.tensorflow.org/versions/r0.11/api_docs/python/train.html#summary-operations).
Summary ops are ops, like
-[`tf.matmul`](https://www.tensorflow.org/versions/r0.10/api_docs/python/math_ops.html#matmul)
+[`tf.matmul`](https://www.tensorflow.org/versions/r0.11/api_docs/python/math_ops.html#matmul)
or
-[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html#relu),
+[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#relu),
which means they take in tensors, produce tensors, and are evaluated from within
a TensorFlow graph. However, summary ops have a twist: the Tensors they produce
contain serialized protobufs, which are written to disk and sent to TensorBoard.
To visualize the summary data in TensorBoard, you should evaluate the summary
op, retrieve the result, and then write that result to disk using a
SummaryWriter. A full explanation, with examples, is in [the
-tutorial](https://www.tensorflow.org/versions/r0.10/how_tos/summaries_and_tensorboard/index.html).
+tutorial](https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html).
### Tags: Giving names to data
@@ -187,7 +187,7 @@ TensorFlow model. To get best use of the graph visualizer, you should use name
scopes to hierarchically group the ops in your graph - otherwise, the graph may
be difficult to decipher. For more information, including examples, see [the
graph visualizer
-tutorial](https://www.tensorflow.org/versions/r0.10/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
+tutorial](https://www.tensorflow.org/versions/r0.11/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
# Frequently Asked Questions
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 4a13cd8754..1abac91e1a 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -23,7 +23,12 @@ def src_to_test_name(src):
# Check that a specific bazel version is being used.
def check_version(bazel_version):
- if "bazel_version" in dir(native) and native.bazel_version:
+ if "bazel_version" not in dir(native):
+ fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % bazel_version)
+ elif not native.bazel_version:
+ print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
+ print("Make sure that you are running at least Bazel %s.\n" % bazel_version)
+ else:
current_bazel_version = _parse_bazel_version(native.bazel_version)
minimum_bazel_version = _parse_bazel_version(bazel_version)
if minimum_bazel_version > current_bazel_version:
diff --git a/tensorflow/tools/ci_build/builds/configured b/tensorflow/tools/ci_build/builds/configured
index e62a6ffe4d..2776942593 100755
--- a/tensorflow/tools/ci_build/builds/configured
+++ b/tensorflow/tools/ci_build/builds/configured
@@ -49,8 +49,7 @@ else
fi
pushd "${CI_TENSORFLOW_SUBMODULE_PATH:-.}"
-echo '' > test_input.txt
-./configure < test_input.txt
+yes "" | ./configure
popd
# Gather and print build information
diff --git a/tensorflow/tools/dist_test/Dockerfile b/tensorflow/tools/dist_test/Dockerfile
index 3fc50de9d5..9888cfd14f 100644
--- a/tensorflow/tools/dist_test/Dockerfile
+++ b/tensorflow/tools/dist_test/Dockerfile
@@ -1,13 +1,35 @@
-FROM ubuntu:14.04
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Docker image for testing distributed (GRPC) TensorFlow on Google Container
+# Engine (GKE).
+#
+# See ./remote_test.sh for usage example.
+
+FROM ubuntu:16.04
MAINTAINER Shanqing Cai <cais@google.com>
RUN apt-get update
RUN apt-get install -y --no-install-recommends \
- curl \
python \
python-numpy \
- python-pip
+ python-pip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
# Install Google Cloud SDK
RUN curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
@@ -17,10 +39,11 @@ RUN ./install_google_cloud_sdk.bash --disable-prompts --install-dir=/var/gcloud
# Install kubectl
RUN /var/gcloud/google-cloud-sdk/bin/gcloud components install kubectl
-# Install nightly TensorFlow pip
+# Install TensorFlow pip whl
# TODO(cais): Should we build it locally instead?
-RUN pip install \
- https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+COPY tensorflow-*.whl /
+RUN pip install /tensorflow-*.whl
+RUN rm -f /tensorflow-*.whl
# Copy test files
COPY scripts /var/tf-dist-test/scripts
diff --git a/tensorflow/tools/dist_test/Dockerfile.local b/tensorflow/tools/dist_test/Dockerfile.local
index eb615be1fe..0cfb8d529e 100644
--- a/tensorflow/tools/dist_test/Dockerfile.local
+++ b/tensorflow/tools/dist_test/Dockerfile.local
@@ -23,19 +23,16 @@ MAINTAINER Shanqing Cai <cais@google.com>
# Pick up some TF dependencies.
RUN apt-get update && apt-get install -y \
- curl \
python-numpy \
python-pip \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
-RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
- python get-pip.py && \
- rm get-pip.py
-
-# Install TensorFlow CPU version from nightly build.
-RUN pip --no-cache-dir install \
- https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+# Install TensorFlow pip whl
+# TODO(cais): Should we build it locally instead?
+COPY tensorflow-*.whl /
+RUN pip install /tensorflow-*.whl
+RUN rm -f /tensorflow-*.whl
ADD . /var/tf_dist_test
diff --git a/tensorflow/tools/dist_test/README.md b/tensorflow/tools/dist_test/README.md
index 91f64dd9c3..39c040e051 100644
--- a/tensorflow/tools/dist_test/README.md
+++ b/tensorflow/tools/dist_test/README.md
@@ -112,5 +112,5 @@ servers. For example:
kubectl create -f tf-k8s-with-lb.yaml
-See [Kubernetes kubectl documentation]
-(http://kubernetes.io/docs/user-guide/kubectl-overview/) for more details.
+See [Kubernetes kubectl documentation](http://kubernetes.io/docs/user-guide/kubectl-overview/)
+for more details.
diff --git a/tensorflow/tools/dist_test/local_test.sh b/tensorflow/tools/dist_test/local_test.sh
index 727258c6d8..e46e60dd81 100755
--- a/tensorflow/tools/dist_test/local_test.sh
+++ b/tensorflow/tools/dist_test/local_test.sh
@@ -24,16 +24,20 @@
# 3) Call a script to launch a k8s TensorFlow GRPC cluster inside the container
# and run the distributed test suite.
#
-# Usage: local_test.sh [--leave_container_running]
+# Usage: local_test.sh <whl_url>
+# [--leave_container_running]
# [--model_name <MODEL_NAME>]
# [--num_workers <NUM_WORKERS>]
# [--num_parameter_servers <NUM_PARAMETER_SERVERS>]
# [--sync_replicas]
#
-# E.g., local_test.sh --model_name CENSUS_WIDENDEEP
-# local_test.sh --num_workers 3 --num_parameter_servers 3
+# E.g., local_test.sh <whl_url> --model_name CENSUS_WIDENDEEP
+# local_test.sh <whl_url> --num_workers 3 --num_parameter_servers 3
#
# Arguments:
+# <whl_url>
+# Specify custom TensorFlow whl file URL to install in the test Docker image.
+#
# --leave_container_running: Do not stop the docker-in-docker container after
# the termination of the tests, e.g., for debugging
#
@@ -48,6 +52,7 @@
# (workers) will be aggregated before applied, which avoids stale parameter
# updates.
#
+#
# In addition, this script obeys the following environment variables:
# TF_DIST_DOCKER_NO_CACHE: do not use cache when building docker images
@@ -72,6 +77,11 @@ NUM_WORKERS=2
NUM_PARAMETER_SERVERS=2
SYNC_REPLICAS_FLAG=""
+WHL_URL=${1}
+if [[ -z "${WHL_URL}" ]]; then
+ die "whl file URL is not specified"
+fi
+
while true; do
if [[ $1 == "--leave_container_running" ]]; then
LEAVE_CONTAINER_RUNNING=1
@@ -84,6 +94,8 @@ while true; do
NUM_PARAMETER_SERVERS=$2
elif [[ $1 == "--sync_replicas" ]]; then
SYNC_REPLICAS_FLAG="--sync_replicas"
+ elif [[ $1 == "--whl_url" ]]; then
+ WHL_URL=$2
fi
shift
@@ -104,25 +116,35 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Get utility functions
source ${DIR}/scripts/utils.sh
-
-# First, make sure that no docker-in-docker container of the same image
-# is already running
-if [[ ! -z $(get_container_id_by_image_name ${DOCKER_IMG_NAME}) ]]; then
- die "It appears that there is already at least one Docker container "\
-"of image name ${DOCKER_IMG_NAME} running. Please stop it before trying again"
-fi
-
-# Build docker-in-docker image for local k8s cluster
+# Build docker-in-docker image for local k8s cluster.
NO_CACHE_FLAG=""
if [[ ! -z "${TF_DIST_DOCKER_NO_CACHE}" ]] &&
[[ "${TF_DIST_DOCKER_NO_CACHE}" != "0" ]]; then
NO_CACHE_FLAG="--no-cache"
fi
+# Create docker build context directory.
+BUILD_DIR=$(mktemp -d)
+echo ""
+echo "Using whl file URL: ${WHL_URL}"
+echo "Building in temporary directory: ${BUILD_DIR}"
+
+cp -r ${DIR}/* "${BUILD_DIR}"/ || \
+ die "Failed to copy files to ${BUILD_DIR}"
+
+# Download whl file into the build context directory.
+wget -P "${BUILD_DIR}" ${WHL_URL} || \
+ die "Failed to download tensorflow whl file from URL: ${WHL_URL}"
+
+# Build docker image for test.
docker build ${NO_CACHE_FLAG} -t ${DOCKER_IMG_NAME} \
- -f ${DIR}/Dockerfile.local ${DIR} || \
+ -f "${BUILD_DIR}/Dockerfile.local" "${BUILD_DIR}" || \
die "Failed to build docker image: ${DOCKER_IMG_NAME}"
+# Clean up docker build context directory.
+rm -rf "${BUILD_DIR}"
+
+# Run docker image for test.
docker run ${DOCKER_IMG_NAME} \
/var/tf_dist_test/scripts/dist_mnist_test.sh \
--ps_hosts "localhost:2000,localhost:2001" \
diff --git a/tensorflow/tools/dist_test/remote_test.sh b/tensorflow/tools/dist_test/remote_test.sh
index 1d4a52c4c2..b1e6b1e71e 100755
--- a/tensorflow/tools/dist_test/remote_test.sh
+++ b/tensorflow/tools/dist_test/remote_test.sh
@@ -20,13 +20,17 @@
# runs from within a container based on the image.
#
# Usage:
-# remote_test.sh [--setup_cluster_only]
+# remote_test.sh <whl_url>
+# [--setup_cluster_only]
# [--num_workers <NUM_WORKERS>]
# [--num_parameter_servers <NUM_PARAMETER_SERVERS>]
# [--sync_replicas]
#
# Arguments:
-# --setup_cluster_only:
+# <whl_url>
+# Specify custom TensorFlow whl file URL to install in the test Docker image.
+#
+# --setup_cluster_only:
# Setup the TensorFlow k8s cluster only, and do not perform testing of
# the distributed runtime.
#
@@ -42,6 +46,7 @@
# updates.
#
#
+#
# If any of the following environment variable has non-empty values, it will
# be mapped into the docker container to override the default values (see
# dist_test.sh)
@@ -95,8 +100,34 @@ if [[ ! -z "${TF_DIST_DOCKER_NO_CACHE}" ]] &&
NO_CACHE_FLAG="--no-cache"
fi
+# Parse command-line arguments.
+WHL_URL=${1}
+if [[ -z "${WHL_URL}" ]]; then
+ die "whl URL is not specified"
+fi
+
+# Create docker build context directory.
+BUILD_DIR=$(mktemp -d)
+echo ""
+echo "Using custom whl file URL: ${WHL_URL}"
+echo "Building in temporary directory: ${BUILD_DIR}"
+
+cp -r ${DIR}/* ${BUILD_DIR}/ || \
+ die "Failed to copy files to ${BUILD_DIR}"
+
+# Download whl file into the build context directory.
+wget -P "${BUILD_DIR}" ${WHL_URL} || \
+ die "Failed to download tensorflow whl file from URL: ${WHL_URL}"
+
+# Build docker image for test.
docker build ${NO_CACHE_FLAG} \
- -t ${DOCKER_IMG_NAME} -f "${DIR}/Dockerfile" "${DIR}"
+ -t ${DOCKER_IMG_NAME} -f "${BUILD_DIR}/Dockerfile" "${BUILD_DIR}" || \
+ die "Failed to build docker image: ${DOCKER_IMG_NAME}"
+
+# Clean up docker build context directory.
+rm -rf "${BUILD_DIR}"
+
+# Run docker image for test.
KEY_FILE=${TF_DIST_GCLOUD_KEY_FILE:-"${HOME}/gcloud-secrets/tensorflow-testing.json"}
docker run --rm -v ${KEY_FILE}:/var/gcloud/secrets/tensorflow-testing.json \
diff --git a/tensorflow/tools/dist_test/server/Dockerfile b/tensorflow/tools/dist_test/server/Dockerfile
index d1d11e0524..9cc61a826b 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile
+++ b/tensorflow/tools/dist_test/server/Dockerfile
@@ -36,7 +36,7 @@ RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
# Install TensorFlow CPU version from nightly build
RUN pip --no-cache-dir install \
- https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+ https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/dist_test/server/Dockerfile.test b/tensorflow/tools/dist_test/server/Dockerfile.test
index 298d1854a7..5bafa29468 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile.test
+++ b/tensorflow/tools/dist_test/server/Dockerfile.test
@@ -42,7 +42,7 @@ RUN pip install --upgrade pandas==0.18.1
# Install TensorFlow CPU version.
RUN pip --no-cache-dir install \
- https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+ https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index 68d71b0b2f..0178112320 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.10.0
+ENV TENSORFLOW_VERSION 0.11.0rc0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index 794c18f196..1c1f2e2059 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -80,7 +80,7 @@ RUN mkdir /bazel && \
RUN git clone --recursive https://github.com/tensorflow/tensorflow.git && \
cd tensorflow && \
- git checkout r0.10
+ git checkout r0.11
WORKDIR /tensorflow
# TODO(craigcitro): Don't install the pip package, since it makes it
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 9bd6129322..b4dc923687 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -79,9 +79,9 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
-RUN git clone -b r0.10 --recursive --recurse-submodules https://github.com/tensorflow/tensorflow.git && \
+RUN git clone -b r0.11 --recursive --recurse-submodules https://github.com/tensorflow/tensorflow.git && \
cd tensorflow && \
- git checkout r0.10
+ git checkout r0.11
WORKDIR /tensorflow
# Configure the build for our CUDA configuration.
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 6cd4a57e25..22403223e1 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.10.0
+ENV TENSORFLOW_VERSION 0.11.0rc0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
index 921d4e5353..f897f0a2b4 100644
--- a/tensorflow/tools/docker/README.md
+++ b/tensorflow/tools/docker/README.md
@@ -10,7 +10,7 @@ General installation instructions are
quick links here:
* [OSX](https://docs.docker.com/installation/mac/): [docker toolbox](https://www.docker.com/toolbox)
-* [ubuntu](https://docs.docker.com/installation/ubuntulinux/)
+* [ubuntu](https://docs.docker.com/engine/installation/linux/ubuntulinux/)
## Which containers exist?
diff --git a/tensorflow/tools/docker/parameterized_docker_build.sh b/tensorflow/tools/docker/parameterized_docker_build.sh
index 00f14f9cfb..30a6bca4d4 100755
--- a/tensorflow/tools/docker/parameterized_docker_build.sh
+++ b/tensorflow/tools/docker/parameterized_docker_build.sh
@@ -58,8 +58,14 @@
# If set to a valid binary/script path, will call the script with the final
# tagged image name with an argument, to push the image to a central repo
# such as gcr.io or Docker Hub.
-
-# TODO(cais): Add support for TF_DOCKER_BUILD_PYTHON_VERSION (PYTHON2/PYTHON3)
+#
+# TF_DOCKER_BUILD_PYTHON_VERSION
+# (Optional)
+# Specifies the desired Python version. Defaults to PYTHON2.
+#
+# TF_DOCKER_BUILD_OPTIONS
+# (Optional)
+# Specifices the desired build options. Defaults to OPT.
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -76,6 +82,8 @@ mark_check_failed() {
TF_DOCKER_BUILD_TYPE=$(to_lower ${TF_DOCKER_BUILD_TYPE})
TF_DOCKER_BUILD_IS_DEVEL=$(to_lower ${TF_DOCKER_BUILD_IS_DEVEL})
TF_DOCKER_BUILD_CENTRAL_PIP=$(to_lower ${TF_DOCKER_BUILD_CENTRAL_PIP})
+TF_DOCKER_BUILD_PYTHON_VERSION=$(to_lower ${TF_DOCKER_BUILD_PYTHON_VERSION:-PYTHON2})
+TF_DOCKER_BUILD_OPTIONS=$(to_lower ${TF_DOCKER_BUILD_OPTIONS:-OPT})
echo "Required build parameters:"
echo " TF_DOCKER_BUILD_TYPE=${TF_DOCKER_BUILD_TYPE}"
@@ -175,8 +183,8 @@ if [[ "${DO_PIP_BUILD}" == "1" ]]; then
# Perform local build of the required PIP whl file
export TF_BUILD_CONTAINER_TYPE=${TF_DOCKER_BUILD_TYPE}
- export TF_BUILD_PYTHON_VERSION="PYTHON2"
- export TF_BUILD_IS_OPT="OPT"
+ export TF_BUILD_PYTHON_VERSION=${TF_DOCKER_BUILD_PYTHON_VERSION}
+ export TF_BUILD_OPTIONS=${TF_DOCKER_BUILD_OPTIONS}
export TF_BUILD_IS_PIP="PIP"
if [[ "${TF_DOCKER_BUILD_TYPE}" == "gpu" ]]; then
diff --git a/tensorflow/tools/gcs_test/Dockerfile b/tensorflow/tools/gcs_test/Dockerfile
index aebad1c5a7..0abe3d6304 100644
--- a/tensorflow/tools/gcs_test/Dockerfile
+++ b/tensorflow/tools/gcs_test/Dockerfile
@@ -1,10 +1,11 @@
-FROM ubuntu:14.04
+FROM ubuntu:16.04
MAINTAINER Shanqing Cai <cais@google.com>
RUN apt-get update
RUN apt-get install -y --no-install-recommends \
curl \
+ libcurl4-openssl-dev \
python \
python-numpy \
python-pip
@@ -16,7 +17,7 @@ RUN ./install_google_cloud_sdk.bash --disable-prompts --install-dir=/var/gcloud
# Install nightly TensorFlow pip
RUN pip install \
- https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0-cp27-none-linux_x86_64.whl
+ https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
# Copy test files
RUN mkdir -p /gcs-smoke/python
diff --git a/tensorflow/tools/gcs_test/python/gcs_smoke.py b/tensorflow/tools/gcs_test/python/gcs_smoke.py
index 90d32dc149..5db03afb4d 100644
--- a/tensorflow/tools/gcs_test/python/gcs_smoke.py
+++ b/tensorflow/tools/gcs_test/python/gcs_smoke.py
@@ -107,6 +107,6 @@ if __name__ == "__main__":
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
- except tf.python.framework.errors.OutOfRangeError:
+ except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
diff --git a/tensorflow/tools/git/gen_git_source.py b/tensorflow/tools/git/gen_git_source.py
index 3d2464a744..8b02c09064 100755
--- a/tensorflow/tools/git/gen_git_source.py
+++ b/tensorflow/tools/git/gen_git_source.py
@@ -138,10 +138,9 @@ def get_git_version(git_base_path):
Args:
git_base_path: where the .git directory is located
Returns:
- A string representing the git version
+ A bytestring representing the git version
"""
-
- unknown_label = "unknown"
+ unknown_label = b"unknown"
try:
val = subprocess.check_output(["git", "-C", git_base_path, "describe",
"--long", "--dirty", "--tags"]).strip()
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 385cdf1f86..b7d8f52617 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -26,7 +26,7 @@ from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.10.0'
+_VERSION = '0.11.0rc0'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/Tensor b/third_party/eigen3/unsupported/Eigen/CXX11/Tensor
index 41db119921..7fafd2a538 100644
--- a/third_party/eigen3/unsupported/Eigen/CXX11/Tensor
+++ b/third_party/eigen3/unsupported/Eigen/CXX11/Tensor
@@ -1 +1,10 @@
#include "unsupported/Eigen/CXX11/Tensor"
+
+#ifdef _WIN32
+// On Windows, Eigen will include Windows.h, which defines various
+// macros that conflict with TensorFlow symbols. Undefine them here to
+// prevent clashes.
+#undef DeleteFile
+#undef ERROR
+#undef LoadLibrary
+#endif // _WIN32
diff --git a/third_party/gpus/cuda/BUILD.tpl b/third_party/gpus/cuda/BUILD.tpl
index c920d64a79..cb985c2251 100644
--- a/third_party/gpus/cuda/BUILD.tpl
+++ b/third_party/gpus/cuda/BUILD.tpl
@@ -58,10 +58,8 @@ cc_library(
linkopts = [
"-ldl",
"-lpthread",
- ] + select({
- "@//tensorflow:darwin": [],
- "//conditions:default": ["-lrt"],
- }),
+ %{cudart_static_linkopt}
+ ],
visibility = ["//visibility:public"],
)
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index 31bf8cc3d8..90729463bd 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -315,6 +315,11 @@ def _find_cudnn_lib_path(repository_ctx, cudnn_install_basedir, symlink_files):
cudnn_install_basedir))
+def _cudart_static_linkopt(cpu_value):
+ """Returns additional platform-specific linkopts for cudart."""
+ return "" if cpu_value == "Darwin" else "\"-lrt\","
+
+
def _tpl(repository_ctx, tpl, substitutions={}, out=None):
if not out:
out = tpl.replace(":", "/")
@@ -364,8 +369,11 @@ def _create_dummy_repository(repository_ctx):
_DEFAULT_CUDNN_VERSION)
# Set up BUILD file for cuda/.
- _file(repository_ctx, "cuda:BUILD")
_file(repository_ctx, "cuda:build_defs.bzl")
+ _tpl(repository_ctx, "cuda:BUILD",
+ {
+ "%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
+ })
_tpl(repository_ctx, "cuda:platform.bzl",
{
"%{cuda_version}": _DEFAULT_CUDA_VERSION,
@@ -460,8 +468,11 @@ def _create_cuda_repository(repository_ctx):
repository_ctx.symlink(cudnn_lib_path, "cuda/" + symlink_files.cuda_dnn_lib)
# Set up BUILD file for cuda/
- _file(repository_ctx, "cuda:BUILD")
_file(repository_ctx, "cuda:build_defs.bzl")
+ _tpl(repository_ctx, "cuda:BUILD",
+ {
+ "%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
+ })
_tpl(repository_ctx, "cuda:platform.bzl",
{
"%{cuda_version}": cuda_version,
diff --git a/util/python/python_config.sh b/util/python/python_config.sh
index c32603315f..d75a4d62dd 100755
--- a/util/python/python_config.sh
+++ b/util/python/python_config.sh
@@ -46,7 +46,7 @@ function main {
}
function python_path {
- python - <<END
+ $PYTHON_BIN_PATH - <<END
from __future__ import print_function
import site
import os
@@ -80,7 +80,7 @@ END
}
function default_python_path {
- PYTHON_ARG="$1" python - <<END
+ PYTHON_ARG="$1" $PYTHON_BIN_PATH - <<END
from __future__ import print_function
import os