aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Dandelion Mané <dandelion@google.com>2017-03-10 14:43:23 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-03-10 15:18:15 -0800
commit0386a01ad3beb28364599d82199be1c0837b3fa9 (patch)
tree3a1d2ef947a7bf37286efc0e8ff760e0401ab319
parente73ceaebb209a1e577e7240fba41c692c89143d0 (diff)
Merge changes from github.
Change: 149800363
-rw-r--r--README.md16
-rw-r--r--RELEASE.md1
-rwxr-xr-xconfigure24
-rw-r--r--tensorflow/c/c_api.cc6
-rw-r--r--tensorflow/c/c_api.h7
-rw-r--r--tensorflow/compiler/jit/graph_to_functiondef.cc2
-rw-r--r--tensorflow/compiler/jit/graphcycles/graphcycles.cc10
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.cc18
-rw-r--r--tensorflow/compiler/xla/array2d.h10
-rw-r--r--tensorflow/compiler/xla/array3d.h8
-rw-r--r--tensorflow/compiler/xla/array4d.h16
-rw-r--r--tensorflow/compiler/xla/client/client.cc3
-rw-r--r--tensorflow/compiler/xla/client/computation_builder.cc14
-rw-r--r--tensorflow/compiler/xla/client/padding.cc3
-rw-r--r--tensorflow/compiler/xla/index_util.cc3
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_compiler.cc9
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc10
-rw-r--r--tensorflow/compiler/xla/service/executable.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper.cc4
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.cc5
-rw-r--r--tensorflow/contrib/BUILD7
-rw-r--r--tensorflow/contrib/cmake/external/farmhash.cmake4
-rw-r--r--tensorflow/contrib/cmake/external/gemmlowp.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/gif.cmake6
-rw-r--r--tensorflow/contrib/cmake/external/googletest.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/grpc.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/highwayhash.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/jpeg.cmake6
-rw-r--r--tensorflow/contrib/cmake/external/png.cmake2
-rw-r--r--tensorflow/contrib/cmake/external/tensorboard.cmake4
-rw-r--r--tensorflow/contrib/cmake/external/zlib.cmake2
-rw-r--r--tensorflow/contrib/factorization/python/ops/factorization_ops.py2
-rw-r--r--tensorflow/contrib/ffmpeg/default/ffmpeg_lib.cc5
-rw-r--r--tensorflow/contrib/ios_examples/camera/CameraExampleViewController.h2
-rw-r--r--tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm49
-rw-r--r--tensorflow/contrib/ios_examples/camera/Info.plist6
-rw-r--r--tensorflow/contrib/ios_examples/camera/camera_example.xcodeproj/project.pbxproj38
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py49
-rw-r--r--tensorflow/contrib/layers/python/layers/layers_test.py32
-rw-r--r--tensorflow/contrib/learn/__init__.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/README.md2
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions.py4
-rw-r--r--tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py4
-rw-r--r--tensorflow/contrib/metrics/python/metrics/classification.py5
-rw-r--r--tensorflow/contrib/opt/python/training/moving_average_optimizer.py2
-rw-r--r--tensorflow/contrib/slim/python/slim/learning.py2
-rw-r--r--tensorflow/core/BUILD3
-rw-r--r--tensorflow/core/kernels/eigen_pooling.h15
-rw-r--r--tensorflow/core/kernels/hexagon/hexagon_rewriter_transform.cc2
-rw-r--r--tensorflow/core/kernels/quantized_conv_ops.cc4
-rw-r--r--tensorflow/core/kernels/record_yielder.cc4
-rw-r--r--tensorflow/core/kernels/record_yielder.h2
-rw-r--r--tensorflow/core/kernels/sparse_matmul_op.h16
-rw-r--r--tensorflow/core/ops/ops.pbtxt53
-rw-r--r--tensorflow/core/public/version.h2
-rw-r--r--tensorflow/docs_src/extend/adding_an_op.md10
-rw-r--r--tensorflow/docs_src/get_started/mnist/beginners.md2
-rw-r--r--tensorflow/docs_src/get_started/monitors.md8
-rw-r--r--tensorflow/docs_src/install/install_linux.md48
-rw-r--r--tensorflow/docs_src/install/install_mac.md14
-rw-r--r--tensorflow/docs_src/install/install_sources.md7
-rw-r--r--tensorflow/docs_src/install/install_windows.md4
-rw-r--r--tensorflow/docs_src/performance/quantization.md4
-rw-r--r--tensorflow/docs_src/performance/xla/index.md2
-rw-r--r--tensorflow/docs_src/programmers_guide/debugger.md2
-rw-r--r--tensorflow/docs_src/programmers_guide/supervisor.md18
-rw-r--r--tensorflow/docs_src/tutorials/layers.md2
-rw-r--r--tensorflow/docs_src/tutorials/wide.md2
-rw-r--r--tensorflow/docs_src/tutorials/wide_and_deep.md4
-rw-r--r--tensorflow/examples/image_retraining/retrain.py6
-rw-r--r--tensorflow/examples/learn/iris.py4
-rwxr-xr-xtensorflow/examples/learn/resnet.py2
-rw-r--r--tensorflow/examples/tutorials/layers/cnn_mnist.py2
-rw-r--r--tensorflow/examples/tutorials/monitors/iris_monitors.py18
-rw-r--r--tensorflow/go/README.md12
-rw-r--r--tensorflow/go/graph.go47
-rw-r--r--tensorflow/java/BUILD13
-rw-r--r--tensorflow/java/README.md2
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Graph.java5
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java101
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Session.java7
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Shape.java3
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.cc107
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.h37
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java53
-rw-r--r--tensorflow/python/framework/ops.py4
-rw-r--r--tensorflow/python/framework/test_util.py1
-rw-r--r--tensorflow/python/kernel_tests/matmul_op_test.py54
-rw-r--r--tensorflow/python/kernel_tests/variable_scope_test.py51
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py7
-rw-r--r--tensorflow/python/layers/core.py21
-rw-r--r--tensorflow/python/ops/control_flow_ops.py2
-rw-r--r--tensorflow/python/ops/functional_ops.py2
-rw-r--r--tensorflow/python/ops/math_ops.py8
-rw-r--r--tensorflow/python/ops/rnn.py2
-rw-r--r--tensorflow/python/ops/variable_scope.py16
-rw-r--r--tensorflow/python/training/basic_loops.py2
-rw-r--r--tensorflow/python/training/monitored_session.py4
-rw-r--r--tensorflow/stream_executor/stream.cc1
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu10
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh3
-rwxr-xr-xtensorflow/tools/ci_build/update_version.sh10
-rwxr-xr-xtensorflow/tools/ci_build/windows/libtensorflow_cpu.sh12
-rwxr-xr-xtensorflow/tools/dist_test/local_test.sh6
-rw-r--r--tensorflow/tools/docker/README.md14
-rw-r--r--tensorflow/tools/docs/parser.py2
-rw-r--r--tensorflow/tools/pip_package/BUILD11
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--tensorflow/workspace.bzl9
-rw-r--r--third_party/curl.BUILD26
-rw-r--r--third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint2
-rw-r--r--third_party/gmock.BUILD18
114 files changed, 1033 insertions, 339 deletions
diff --git a/README.md b/README.md
index c99e9350dd..84c42aad18 100644
--- a/README.md
+++ b/README.md
@@ -25,20 +25,20 @@ guidelines](CONTRIBUTING.md).**
**We use [GitHub issues](https://github.com/tensorflow/tensorflow/issues) for
tracking requests and bugs, but please see
-[Community](tensorflow/g3doc/resources/index.md#community) for general questions
+[Community](tensorflow/docs_src/about/index.md#community) for general questions
and discussion.**
## Installation
-*See [Download and Setup](tensorflow/g3doc/get_started/os_setup.md) for instructions on how to install our release binaries or how to build from source.*
+*See [Installing TensorFlow](https://www.tensorflow.org/install/) for instructions on how to install our release binaries or how to build from source.*
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
-* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.0.0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/))
-* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.0.0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.0.1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.0.1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.0.1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/))
+* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.0.1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/))
* Android: [demo APK](https://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/tensorflow_demo.apk), [native libs](http://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/native/)
([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-android/))
diff --git a/RELEASE.md b/RELEASE.md
index f7f5e37bd7..b223f51730 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -87,6 +87,7 @@ To help you upgrade your existing TensorFlow Python code to match the API change
* In the C++ API (in tensorflow/cc), Input, Output, etc. have moved
from the tensorflow::ops namespace to tensorflow.
* Change arg order for `{softmax,sparse_softmax,sigmoid}_cross_entropy_with_logits` to be (labels, predictions), and force use of named args.
+* tf.nn.rnn_cell.* and most functions in tf.nn.rnn.* (with the exception of dynamic_rnn and raw_rnn) are temporarily in tf.contrib.rnn. They will be moved back into core for TF 1.1.
## Bug Fixes and Other Changes
* Numerous C++ API updates.
diff --git a/configure b/configure
index 5a9a7c0d30..0064d5574a 100755
--- a/configure
+++ b/configure
@@ -45,6 +45,14 @@ function bazel_clean_and_fetch() {
-//tensorflow/examples/android/..."
}
+function sed_hyphen_i() {
+ if is_macos; then
+ sed -i '' "$@"
+ else
+ sed -i "$@"
+ fi
+}
+
# Delete any leftover BUILD files from the Makefile build, which would interfere
# with Bazel parsing.
MAKEFILE_DOWNLOAD_DIR=tensorflow/contrib/makefile/downloads
@@ -173,9 +181,9 @@ else
fi
if [ "$TF_NEED_JEMALLOC" == "1" ]; then
- sed -i -e "s/WITH_JEMALLOC = False/WITH_JEMALLOC = True/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_JEMALLOC = False/WITH_JEMALLOC = True/" tensorflow/core/platform/default/build_config.bzl
else
- sed -i -e "s/WITH_JEMALLOC = True/WITH_JEMALLOC = False/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_JEMALLOC = True/WITH_JEMALLOC = False/" tensorflow/core/platform/default/build_config.bzl
fi
while [ "$TF_NEED_GCP" == "" ]; do
@@ -202,10 +210,10 @@ if [ "$TF_NEED_GCP" == "1" ]; then
fi
# Update Bazel build configuration.
- sed -i -e "s/WITH_GCP_SUPPORT = False/WITH_GCP_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_GCP_SUPPORT = False/WITH_GCP_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
else
# Update Bazel build configuration.
- sed -i -e "s/WITH_GCP_SUPPORT = True/WITH_GCP_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_GCP_SUPPORT = True/WITH_GCP_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
fi
while [ "$TF_NEED_HDFS" == "" ]; do
@@ -224,10 +232,10 @@ done
if [ "$TF_NEED_HDFS" == "1" ]; then
# Update Bazel build configuration.
- sed -i -e "s/WITH_HDFS_SUPPORT = False/WITH_HDFS_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = False/WITH_HDFS_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
else
# Update Bazel build configuration.
- sed -i -e "s/WITH_HDFS_SUPPORT = True/WITH_HDFS_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
+ sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = True/WITH_HDFS_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
fi
## Enable XLA.
@@ -243,10 +251,10 @@ done
if [ "$TF_ENABLE_XLA" == "1" ]; then
# Update Bazel build configuration.
- sed -i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = True/" tensorflow/core/platform/default/build_config_root.bzl
+ sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = True/" tensorflow/core/platform/default/build_config_root.bzl
else
# Update Bazel build configuration.
- sed -i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = False/" tensorflow/core/platform/default/build_config_root.bzl
+ sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = False/" tensorflow/core/platform/default/build_config_root.bzl
fi
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index 02aba54e43..295777e897 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -1701,6 +1701,12 @@ void TF_ImportGraphDefOptionsAddInputMapping(TF_ImportGraphDefOptions* opts,
opts->opts.input_map[TensorId(src_name, src_index)] = ToTensorId(dst);
}
+void TF_ImportGraphDefOptionsRemapControlDependency(
+ TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst) {
+ opts->opts.input_map[TensorId(src_name, tensorflow::Graph::kControlSlot)] =
+ TensorId(dst->node.name(), tensorflow::Graph::kControlSlot);
+}
+
extern void TF_ImportGraphDefOptionsAddControlDependency(
TF_ImportGraphDefOptions* opts, TF_Operation* oper) {
opts->opts.control_dependencies.push_back(oper->node.name());
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 02ba0ac509..f837b68d76 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -810,6 +810,13 @@ extern void TF_ImportGraphDefOptionsAddInputMapping(
TF_ImportGraphDefOptions* opts, const char* src_name, int src_index,
TF_Output dst);
+// Set any imported nodes with control input `src_name` to have that input
+// replaced with `dst`. `src_name` refers to a node in the graph to be imported,
+// `dst` references an operation already existing in the graph being imported
+// into.
+extern void TF_GraphImportGraphDefOptionsRemapControlDependency(
+ TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst);
+
// Cause the imported graph to have a control dependency on `oper`. `oper`
// should exist in the graph being imported into.
extern void TF_ImportGraphDefOptionsAddControlDependency(
diff --git a/tensorflow/compiler/jit/graph_to_functiondef.cc b/tensorflow/compiler/jit/graph_to_functiondef.cc
index d4482c87ff..ce943471fb 100644
--- a/tensorflow/compiler/jit/graph_to_functiondef.cc
+++ b/tensorflow/compiler/jit/graph_to_functiondef.cc
@@ -185,7 +185,7 @@ Status GraphToFunctionDef(const Graph& graph, const string& name,
}
// Add regular inputs
- for (int i = 0; i < in_edges.size(); ++i) {
+ for (std::vector<const Edge*>::size_type i = 0; i < in_edges.size(); ++i) {
const Edge* edge = in_edges[i];
if (edge == nullptr) {
return errors::InvalidArgument(
diff --git a/tensorflow/compiler/jit/graphcycles/graphcycles.cc b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
index 87d5de09d1..2139ffed4b 100644
--- a/tensorflow/compiler/jit/graphcycles/graphcycles.cc
+++ b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
@@ -76,7 +76,7 @@ struct GraphCycles::Rep {
GraphCycles::GraphCycles() : rep_(new Rep) {}
GraphCycles::~GraphCycles() {
- for (int i = 0; i < rep_->nodes_.size(); i++) {
+ for (Vec<Node*>::size_type i = 0; i < rep_->nodes_.size(); i++) {
delete rep_->nodes_[i];
}
delete rep_;
@@ -85,7 +85,7 @@ GraphCycles::~GraphCycles() {
bool GraphCycles::CheckInvariants() const {
Rep* r = rep_;
NodeSet ranks; // Set of ranks seen so far.
- for (int32 x = 0; x < r->nodes_.size(); x++) {
+ for (Vec<Node*>::size_type x = 0; x < r->nodes_.size(); x++) {
Node* nx = r->nodes_[x];
if (nx->visited) {
LOG(FATAL) << "Did not clear visited marker on node " << x;
@@ -259,7 +259,7 @@ static void Reorder(GraphCycles::Rep* r) {
r->deltaf_.end(), r->merged_.begin());
// Assign the ranks in order to the collected list.
- for (int32 i = 0; i < r->list_.size(); i++) {
+ for (Vec<int32>::size_type i = 0; i < r->list_.size(); i++) {
r->nodes_[r->list_[i]]->rank = r->merged_[i];
}
}
@@ -277,7 +277,7 @@ static void Sort(const Vec<Node*>& nodes, Vec<int32>* delta) {
}
static void MoveToList(GraphCycles::Rep* r, Vec<int32>* src, Vec<int32>* dst) {
- for (int32 i = 0; i < src->size(); i++) {
+ for (Vec<int32>::size_type i = 0; i < src->size(); i++) {
int32 w = (*src)[i];
(*src)[i] = r->nodes_[w]->rank; // Replace src entry with its rank
r->nodes_[w]->visited = false; // Prepare for future DFS calls
@@ -286,7 +286,7 @@ static void MoveToList(GraphCycles::Rep* r, Vec<int32>* src, Vec<int32>* dst) {
}
static void ClearVisitedBits(GraphCycles::Rep* r, const Vec<int32>& nodes) {
- for (int32 i = 0; i < nodes.size(); i++) {
+ for (Vec<int32>::size_type i = 0; i < nodes.size(); i++) {
r->nodes_[nodes[i]]->visited = false;
}
}
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index efc8dfce93..71a994b451 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -225,7 +225,8 @@ Status BuildArguments(const std::vector<XlaCompiler::Argument>& args,
parameters.reserve(args.size());
variables.reserve(args.size());
- for (int i = 0; i < args.size(); ++i) {
+ for (std::vector<XlaCompiler::Argument>::size_type i = 0; i < args.size();
+ ++i) {
XlaContext::Argument& context_arg = (*context_args)[i];
context_arg.name = args[i].name;
context_arg.value.constant_value = args[i].constant_value;
@@ -262,7 +263,7 @@ Status BuildArguments(const std::vector<XlaCompiler::Argument>& args,
input_shapes->resize(parameters.size());
input_mapping->resize(parameters.size());
- for (int i = 0; i < input_shapes->size(); ++i) {
+ for (std::vector<int>::size_type i = 0; i < input_shapes->size(); ++i) {
const XlaCompiler::Argument& arg = args[parameters[i]];
// Computes the shapes of non-constant arguments.
xla::PrimitiveType type;
@@ -276,12 +277,12 @@ Status BuildArguments(const std::vector<XlaCompiler::Argument>& args,
xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(*input_shapes);
xla::ComputationDataHandle tuple =
builder->Parameter(0, tuple_shape, "arg_tuple");
- for (int i = 0; i < input_shapes->size(); ++i) {
+ for (std::vector<int>::size_type i = 0; i < input_shapes->size(); ++i) {
(*context_args)[parameters[i]].value.handle =
builder->GetTupleElement(tuple, i);
}
} else {
- for (int i = 0; i < input_shapes->size(); ++i) {
+ for (std::vector<int>::size_type i = 0; i < input_shapes->size(); ++i) {
(*context_args)[parameters[i]].value.handle =
builder->Parameter(i, (*input_shapes)[i], strings::StrCat("arg", i));
}
@@ -413,7 +414,8 @@ Status XlaCompiler::CompileGraph(string const& name,
VLOG(2) << "Outputs: total: " << context->retvals().size()
<< " nonconstant: " << num_nonconst_outputs;
result->outputs.resize(context->retvals().size());
- for (int i = 0; i < context->retvals().size(); ++i) {
+ for (std::vector<XlaContext::HandleOrConstant>::size_type i = 0;
+ i < context->retvals().size(); ++i) {
const XlaContext::HandleOrConstant& retval = context->retvals()[i];
if (retval.is_constant) {
OutputDescription& output = result->outputs[i];
@@ -457,7 +459,8 @@ Status XlaCompiler::CompileGraph(string const& name,
// Converts the output shapes to TensorShapes.
int computation_output = 0;
- for (int i = 0; i < context->retvals().size(); ++i) {
+ for (std::vector<XlaContext::HandleOrConstant>::size_type i = 0;
+ i < context->retvals().size(); ++i) {
const XlaContext::HandleOrConstant& retval = context->retvals()[i];
if (!retval.is_constant) {
CHECK_LT(computation_output, num_nonconst_outputs);
@@ -474,7 +477,8 @@ Status XlaCompiler::CompileGraph(string const& name,
}
}
- for (int i = 0; i < result->variable_updates.size(); ++i) {
+ for (std::vector<VariableUpdate>::size_type i = 0;
+ i < result->variable_updates.size(); ++i) {
if (num_computation_outputs > 1) {
result->variable_updates[i].shape =
XLAShapeToTensorShape(xla::ShapeUtil::GetTupleElementShape(
diff --git a/tensorflow/compiler/xla/array2d.h b/tensorflow/compiler/xla/array2d.h
index ceed573f1f..f885821210 100644
--- a/tensorflow/compiler/xla/array2d.h
+++ b/tensorflow/compiler/xla/array2d.h
@@ -44,14 +44,12 @@ class Array2D {
Array2D() : n1_(0), n2_(0) {}
// Creates an array of dimensions n1 x n2, uninitialized values.
- Array2D(const int64 n1, const int64 n2) : n1_(n1), n2_(n2) {
- values_.resize(n1 * n2);
- }
+ Array2D(const int64 n1, const int64 n2)
+ : n1_(n1), n2_(n2), values_(n1 * n2) {}
// Creates an array of dimensions n1 x n2, initialized to value.
- Array2D(const int64 n1, const int64 n2, const T value) : Array2D(n1, n2) {
- Fill(value);
- }
+ Array2D(const int64 n1, const int64 n2, const T value)
+ : n1_(n1), n2_(n2), values_(n1 * n2, value) {}
// Creates an array from the given nested initializer list. The outer
// initializer list is the first dimension; the inner is the second dimension.
diff --git a/tensorflow/compiler/xla/array3d.h b/tensorflow/compiler/xla/array3d.h
index 46bc1a6392..654af8f030 100644
--- a/tensorflow/compiler/xla/array3d.h
+++ b/tensorflow/compiler/xla/array3d.h
@@ -39,15 +39,11 @@ class Array3D {
public:
// Creates an array of dimensions n1 x n2 x n3, uninitialized values.
Array3D(const int64 n1, const int64 n2, const int64 n3)
- : n1_(n1), n2_(n2), n3_(n3) {
- values_.resize(n1 * n2 * n3);
- }
+ : n1_(n1), n2_(n2), n3_(n3), values_(n1 * n2 * n3) {}
// Creates an array of dimensions n1 x n2 x n3, initialized to value.
Array3D(const int64 n1, const int64 n2, const int64 n3, const T value)
- : Array3D(n1, n2, n3) {
- Fill(value);
- }
+ : n1_(n1), n2_(n2), n3_(n3), values_(n1 * n2 * n3, value) {}
// Creates an array from the given nested initializer list. The outer
// initializer list is the first dimension, and so on.
diff --git a/tensorflow/compiler/xla/array4d.h b/tensorflow/compiler/xla/array4d.h
index db51a57cf2..199ad2baae 100644
--- a/tensorflow/compiler/xla/array4d.h
+++ b/tensorflow/compiler/xla/array4d.h
@@ -56,15 +56,19 @@ class Array4D {
public:
// Creates a 4D array, unitialized values.
Array4D(int64 planes, int64 depth, int64 height, int64 width)
- : planes_(planes), depth_(depth), height_(height), width_(width) {
- values_.resize(planes * depth * height * width);
- }
+ : planes_(planes),
+ depth_(depth),
+ height_(height),
+ width_(width),
+ values_(planes * depth * height * width) {}
// Creates a 4D array, initalized to value.
Array4D(int64 planes, int64 depth, int64 height, int64 width, T value)
- : Array4D(planes, depth, height, width) {
- Fill(value);
- }
+ : planes_(planes),
+ depth_(depth),
+ height_(height),
+ width_(width),
+ values_(planes * depth * height * width, value) {}
// Creates a 4D array, filled with values.
//
diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc
index c4430dab65..2a8a4b321a 100644
--- a/tensorflow/compiler/xla/client/client.cc
+++ b/tensorflow/compiler/xla/client/client.cc
@@ -301,7 +301,8 @@ StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::ExecuteParallel(
}
std::vector<std::unique_ptr<GlobalData>> outputs;
- for (int64 i = 0; i < computations.size(); ++i) {
+ for (tensorflow::gtl::ArraySlice<ComputationInstance>::size_type i = 0;
+ i < computations.size(); ++i) {
outputs.push_back(
MakeUnique<GlobalData>(stub_, response.responses(i).output()));
if (computations[i].execution_profile != nullptr) {
diff --git a/tensorflow/compiler/xla/client/computation_builder.cc b/tensorflow/compiler/xla/client/computation_builder.cc
index fb3c3e0530..70afaf2ccb 100644
--- a/tensorflow/compiler/xla/client/computation_builder.cc
+++ b/tensorflow/compiler/xla/client/computation_builder.cc
@@ -106,7 +106,9 @@ bool ComputationBuilder::MakeWindow(
tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
tensorflow::gtl::ArraySlice<int64> lhs_dilation,
tensorflow::gtl::ArraySlice<int64> rhs_dilation, Window* window) {
- const auto verify_size = [&](const int64 x, const char* x_name) {
+ const auto verify_size = [&](const tensorflow::gtl::ArraySlice<
+ int64>::size_type x,
+ const char* x_name) {
if (x == 0 || x == window_dimensions.size()) {
return true;
} else {
@@ -449,7 +451,8 @@ ComputationDataHandle ComputationBuilder::Collapse(
// Don't support out-of-order collapse here.
// Checks that the collapsed dimensions are in order and consecutive.
- for (int i = 1; i < dims_to_collapse.size(); ++i) {
+ for (tensorflow::gtl::ArraySlice<int64>::size_type i = 1;
+ i < dims_to_collapse.size(); ++i) {
if (dims_to_collapse[i] - 1 != dims_to_collapse[i - 1]) {
NoteError(InvalidArgument(
"Collapsed dimensions are not in order and consecutive."));
@@ -693,14 +696,15 @@ ComputationDataHandle ComputationBuilder::ConvWithGeneralDimensions(
std::vector<int64> base_area_dimensions(
dimension_numbers.spatial_dimensions_size());
- for (int i = 0; i < base_area_dimensions.size(); ++i) {
+ for (std::vector<int64>::size_type i = 0; i < base_area_dimensions.size();
+ ++i) {
base_area_dimensions[i] =
lhs_shape->dimensions(dimension_numbers.spatial_dimensions(i));
}
std::vector<int64> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
- for (int i = 0; i < window_dimensions.size(); ++i) {
+ for (std::vector<int64>::size_type i = 0; i < window_dimensions.size(); ++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
@@ -752,7 +756,7 @@ ComputationDataHandle ComputationBuilder::ConvGeneralDilated(
std::vector<int64> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
- for (int i = 0; i < window_dimensions.size(); ++i) {
+ for (std::vector<int64>::size_type i = 0; i < window_dimensions.size(); ++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
diff --git a/tensorflow/compiler/xla/client/padding.cc b/tensorflow/compiler/xla/client/padding.cc
index 281fa10408..8d75815711 100644
--- a/tensorflow/compiler/xla/client/padding.cc
+++ b/tensorflow/compiler/xla/client/padding.cc
@@ -35,7 +35,8 @@ std::vector<std::pair<int64, int64>> MakePadding(
return low_high_padding;
case Padding::kSame:
- for (int64 i = 0; i < input_dimensions.size(); ++i) {
+ for (tensorflow::gtl::ArraySlice<int64>::size_type i = 0;
+ i < input_dimensions.size(); ++i) {
int64 input_dimension = input_dimensions[i];
int64 window_dimension = window_dimensions[i];
int64 window_stride = window_strides[i];
diff --git a/tensorflow/compiler/xla/index_util.cc b/tensorflow/compiler/xla/index_util.cc
index 4edbfd2482..eb937c3614 100644
--- a/tensorflow/compiler/xla/index_util.cc
+++ b/tensorflow/compiler/xla/index_util.cc
@@ -32,7 +32,8 @@ namespace xla {
// Padding and nested layouts not supported yet.
DCHECK_EQ(0, shape.layout().padded_dimensions_size());
- for (int i = 0; i < multi_index.size(); ++i) {
+ for (tensorflow::gtl::ArraySlice<int64>::size_type i = 0;
+ i < multi_index.size(); ++i) {
DCHECK_GE(multi_index[i], 0);
DCHECK_LT(multi_index[i], shape.dimensions(i))
<< "indexing beyond extent in dimension " << i << ":"
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
index c46fcf75ed..3c27809db3 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
@@ -519,9 +519,9 @@ CpuCompiler::CompileAheadOfTime(
error.c_str());
}
- llvm::Reloc::Model reloc_model;
- llvm::PICLevel::Level pic_level;
- llvm::PIELevel::Level pie_level;
+ llvm::Reloc::Model reloc_model = llvm::Reloc::Static;
+ llvm::PICLevel::Level pic_level = llvm::PICLevel::NotPIC;
+ llvm::PIELevel::Level pie_level = llvm::PIELevel::Default;
switch (options.relocation_model()) {
case CpuAotCompilationOptions::RelocationModel::Static:
reloc_model = llvm::Reloc::Static;
@@ -571,7 +571,8 @@ CpuCompiler::CompileAheadOfTime(
}
std::vector<std::unique_ptr<AotCompilationResult>> results;
- for (int i = 0; i < hlo_modules.size(); ++i) {
+ for (std::vector<std::unique_ptr<HloModule>>::size_type i = 0;
+ i < hlo_modules.size(); ++i) {
HloModule* hlo_module = hlo_modules[i].get();
HloModuleConfig* module_config = module_configs[i].get();
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index c71756c18b..7e47558e94 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -506,7 +506,7 @@ Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window,
llvm_ir::IrArray::Index input_index(index.size());
llvm::Value* in_bounds_condition = nullptr;
- for (int64 i = 0; i < index.size(); ++i) {
+ for (size_t i = 0; i < index.size(); ++i) {
llvm::Value* strided_index = ir_builder_.CreateNSWMul(
index[i], ir_builder_.getInt64(window.dimensions(i).stride()));
input_index[i] = ir_builder_.CreateNSWSub(
@@ -1111,7 +1111,7 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce, HloInstruction* arg,
llvm_ir::IrArray::Index input_index = reduced_dims_index;
llvm_ir::IrArray::Index::const_iterator it = index.begin();
- for (int64 i = 0; i < input_index.size(); ++i) {
+ for (auto i = 0; i < input_index.size(); ++i) {
if (input_index[i] == nullptr) {
input_index[i] = *it++;
}
@@ -1180,7 +1180,7 @@ Status IrEmitter::HandlePad(HloInstruction* pad) {
// output_index := edge_padding_low + operand_index * (interior_padding + 1)
const PaddingConfig& padding_config = pad->padding_config();
llvm_ir::IrArray::Index output_index;
- for (int64 i = 0; i < operand_index.size(); ++i) {
+ for (auto i = 0; i < operand_index.size(); ++i) {
llvm::Value* offset = ir_builder_.CreateMul(
operand_index[i],
ir_builder_.getInt64(padding_config.dimensions(i).interior_padding() +
@@ -1294,7 +1294,7 @@ Status IrEmitter::HandleCustomCall(
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
i8_ptr_type, ir_builder_.getInt32(operands.size()),
"cc_operands_alloca", &ir_builder_);
- for (int i = 0; i < operands.size(); ++i) {
+ for (auto i = 0; i < operands.size(); ++i) {
const HloInstruction* operand = operands[i];
llvm::Value* operand_as_i8ptr =
ir_builder_.CreatePointerCast(GetEmittedValueFor(operand), i8_ptr_type);
@@ -1659,7 +1659,7 @@ void IrEmitter::EmitArrayFunctionCallInto(
ir_builder_.getInt32(parameter_addresses.size()),
tensorflow::strings::StrCat(name, "_parameter_addresses"),
&ir_builder_);
- for (int i = 0; i < parameter_addresses.size(); ++i) {
+ for (auto i = 0; i < parameter_addresses.size(); ++i) {
llvm::Value* parameter_as_i8ptr = ir_builder_.CreateBitCast(
parameter_addresses[i], ir_builder_.getInt8PtrTy(),
llvm_ir::AsStringRef(tensorflow::strings::StrCat(name, "_parameter_", i,
diff --git a/tensorflow/compiler/xla/service/executable.cc b/tensorflow/compiler/xla/service/executable.cc
index b6c8714d8f..d9d60529d8 100644
--- a/tensorflow/compiler/xla/service/executable.cc
+++ b/tensorflow/compiler/xla/service/executable.cc
@@ -40,7 +40,8 @@ Executable::ExecuteOnStreams(
std::vector<perftools::gputools::DeviceMemoryBase> return_values(
run_options.size());
- for (int64 i = 0; i < run_options.size(); ++i) {
+ for (tensorflow::gtl::ArraySlice<const ExecutableRunOptions>::size_type i = 0;
+ i < run_options.size(); ++i) {
// We cannot BlockHostUntilDone() on the already-launched executions in case
// of error, since if the executions communicate, the initially launched
// executions may never complete if not all executions are running.
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index 67c80bf93b..1cb03db4ee 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -276,7 +276,7 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
auto index = padded_index;
llvm::Value* in_bounds =
llvm::ConstantInt::get(ir_builder_->getInt1Ty(), 1);
- for (int i = 0; i < index.size(); ++i) {
+ for (auto i = 0; i < index.size(); ++i) {
auto index_typed_const = [=](int64 n) {
return llvm::ConstantInt::get(index[i]->getType(), n);
};
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index be441929bb..d17ef5a67e 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -430,12 +430,12 @@ Status IrEmitter::HandleDot(HloInstruction* dot,
// and lhs indexes with the reduction dimensions removed. The terms from the
// rhs index are the lower dimensions in the index so we add them first.
llvm_ir::IrArray::Index target_index;
- for (int dimension = 0; dimension < lhs_index.size(); ++dimension) {
+ for (size_t dimension = 0; dimension < lhs_index.size(); ++dimension) {
if (dimension != lhs_reduction_dimension) {
target_index.push_back(lhs_index[dimension]);
}
}
- for (int dimension = 0; dimension < rhs_index.size(); ++dimension) {
+ for (size_t dimension = 0; dimension < rhs_index.size(); ++dimension) {
if (dimension != rhs_reduction_dimension) {
target_index.push_back(rhs_index[dimension]);
}
@@ -513,7 +513,7 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce, HloInstruction* arg,
llvm_ir::IrArray::Index input_index = reduced_dims_index;
llvm_ir::IrArray::Index::const_iterator it = index.begin();
- for (int64 i = 0; i < input_index.size(); ++i) {
+ for (auto i = 0; i < input_index.size(); ++i) {
if (input_index[i] == nullptr) {
input_index[i] = *it++;
}
@@ -614,7 +614,7 @@ llvm_ir::IrArray::Index IrEmitter::EmitOperandArrayLoopNest(
llvm_ir::IrArray::Index index =
loop_nest->AddLoopsForShapeOnDimensions(shape, dimensions, name_suffix);
// Verify every dimension except the reduction dimension was set in the index.
- for (int dimension = 0; dimension < index.size(); ++dimension) {
+ for (auto dimension = 0; dimension < index.size(); ++dimension) {
if (dimension == reduction_dimension) {
DCHECK_EQ(nullptr, index[dimension]);
} else {
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
index 2c63b0634b..4373180535 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
@@ -136,7 +136,9 @@ string InstructionSequenceGraph(
std::vector<HloInstruction*> param_instructions;
for (auto& instruction : instructions) {
if (instruction->opcode() == HloOpcode::kParameter) {
- int64 param_number = instruction->parameter_number();
+ std::vector<HloInstruction*>::size_type param_number =
+ instruction->parameter_number();
+
if (param_instructions.size() < param_number + 1) {
param_instructions.resize(param_number + 1, nullptr);
}
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc
index 34a6bb8a52..c60e13fc9c 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion.cc
@@ -101,6 +101,8 @@ bool IsExpensive(const HloInstruction& instruction) {
case HloOpcode::kRecv:
return true;
}
+
+ return false;
}
bool FusionWouldDuplicate(HloInstruction* producer, HloInstruction* consumer) {
@@ -124,7 +126,8 @@ StatusOr<bool> InstructionFusion::Run(HloModule* module) {
std::vector<HloInstruction*> post_order(post_order_list.begin(),
post_order_list.end());
tensorflow::gtl::FlatMap<HloInstruction*, int> post_order_index;
- for (int i = 0; i < post_order.size(); ++i) {
+ for (std::vector<HloInstruction*>::size_type i = 0; i < post_order.size();
+ ++i) {
InsertOrDie(&post_order_index, post_order[i], i);
}
diff --git a/tensorflow/contrib/BUILD b/tensorflow/contrib/BUILD
index 2ab65a6074..bb59ddf621 100644
--- a/tensorflow/contrib/BUILD
+++ b/tensorflow/contrib/BUILD
@@ -7,6 +7,8 @@ exports_files(["LICENSE"])
package(default_visibility = ["//tensorflow:__subpackages__"])
+load("//tensorflow:tensorflow.bzl", "if_not_windows")
+
py_library(
name = "contrib_py",
srcs = glob(["**/*.py"]),
@@ -40,7 +42,6 @@ py_library(
"//tensorflow/contrib/losses:losses_py",
"//tensorflow/contrib/memory_stats:memory_stats_py",
"//tensorflow/contrib/metrics:metrics_py",
- "//tensorflow/contrib/nccl:nccl_py",
"//tensorflow/contrib/ndlstm",
"//tensorflow/contrib/nn:nn_py",
"//tensorflow/contrib/opt:opt_py",
@@ -61,7 +62,9 @@ py_library(
"//tensorflow/contrib/tfprof",
"//tensorflow/contrib/training:training_py",
"//tensorflow/contrib/util:util_py",
- ],
+ ] + if_not_windows([
+ "//tensorflow/contrib/nccl:nccl_py",
+ ]),
)
cc_library(
diff --git a/tensorflow/contrib/cmake/external/farmhash.cmake b/tensorflow/contrib/cmake/external/farmhash.cmake
index f6805a33aa..c256f5f303 100644
--- a/tensorflow/contrib/cmake/external/farmhash.cmake
+++ b/tensorflow/contrib/cmake/external/farmhash.cmake
@@ -19,7 +19,7 @@ if(WIN32)
URL_HASH ${farmhash_HASH}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/patches/farmhash/CMakeLists.txt ${farmhash_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/patches/farmhash/CMakeLists.txt ${farmhash_BUILD}
INSTALL_DIR ${farmhash_INSTALL}
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=Release
@@ -53,5 +53,5 @@ add_custom_target(farmhash_copy_headers_to_destination
foreach(header_file ${farmhash_HEADERS})
add_custom_command(TARGET farmhash_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${farmhash_INCLUDE_DIR}/)
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${header_file} ${farmhash_INCLUDE_DIR}/)
endforeach()
diff --git a/tensorflow/contrib/cmake/external/gemmlowp.cmake b/tensorflow/contrib/cmake/external/gemmlowp.cmake
index 93a0c8d864..30ee21a13b 100644
--- a/tensorflow/contrib/cmake/external/gemmlowp.cmake
+++ b/tensorflow/contrib/cmake/external/gemmlowp.cmake
@@ -11,5 +11,5 @@ ExternalProject_Add(gemmlowp
URL_HASH ${gemmlowp_HASH}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/patches/gemmlowp/CMakeLists.txt ${gemmlowp_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/patches/gemmlowp/CMakeLists.txt ${gemmlowp_BUILD}
INSTALL_COMMAND "")
diff --git a/tensorflow/contrib/cmake/external/gif.cmake b/tensorflow/contrib/cmake/external/gif.cmake
index da20561b88..aaa0787f8b 100644
--- a/tensorflow/contrib/cmake/external/gif.cmake
+++ b/tensorflow/contrib/cmake/external/gif.cmake
@@ -19,7 +19,7 @@ if(WIN32)
PREFIX gif
URL ${gif_URL}
URL_HASH ${gif_HASH}
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/patches/gif/CMakeLists.txt ${gif_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/patches/gif/CMakeLists.txt ${gif_BUILD}
INSTALL_DIR ${gif_INSTALL}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
CMAKE_CACHE_ARGS
@@ -29,7 +29,7 @@ if(WIN32)
)
ExternalProject_Add_Step(gif copy_unistd
- COMMAND ${CMAKE_COMMAND} -E copy
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
${CMAKE_SOURCE_DIR}/patches/gif/unistd.h ${gif_BUILD}/lib/unistd.h
DEPENDEES patch
DEPENDERS build
@@ -67,5 +67,5 @@ add_custom_target(gif_copy_headers_to_destination
foreach(header_file ${gif_HEADERS})
add_custom_command(TARGET gif_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${gif_INCLUDE_DIR}/)
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${header_file} ${gif_INCLUDE_DIR}/)
endforeach()
diff --git a/tensorflow/contrib/cmake/external/googletest.cmake b/tensorflow/contrib/cmake/external/googletest.cmake
index e6daf62a51..fe1a936717 100644
--- a/tensorflow/contrib/cmake/external/googletest.cmake
+++ b/tensorflow/contrib/cmake/external/googletest.cmake
@@ -19,7 +19,7 @@ ExternalProject_Add(googletest
GIT_TAG ${googletest_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
- #PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/patches/grpc/CMakeLists.txt ${GRPC_BUILD}
+ #PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/patches/grpc/CMakeLists.txt ${GRPC_BUILD}
INSTALL_COMMAND ""
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
diff --git a/tensorflow/contrib/cmake/external/grpc.cmake b/tensorflow/contrib/cmake/external/grpc.cmake
index c33b0dd81e..7065e5d60d 100644
--- a/tensorflow/contrib/cmake/external/grpc.cmake
+++ b/tensorflow/contrib/cmake/external/grpc.cmake
@@ -24,7 +24,7 @@ ExternalProject_Add(grpc
GIT_TAG ${GRPC_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/patches/grpc/CMakeLists.txt ${GRPC_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/patches/grpc/CMakeLists.txt ${GRPC_BUILD}
INSTALL_COMMAND ""
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=Release
diff --git a/tensorflow/contrib/cmake/external/highwayhash.cmake b/tensorflow/contrib/cmake/external/highwayhash.cmake
index 9f80be32cb..972c97a55a 100644
--- a/tensorflow/contrib/cmake/external/highwayhash.cmake
+++ b/tensorflow/contrib/cmake/external/highwayhash.cmake
@@ -28,7 +28,7 @@ ExternalProject_Add(highwayhash
GIT_TAG ${highwayhash_TAG}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
BUILD_IN_SOURCE 1
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/patches/highwayhash/CMakeLists.txt ${highwayhash_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/patches/highwayhash/CMakeLists.txt ${highwayhash_BUILD}
INSTALL_DIR ${highwayhash_INSTALL}
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=Release
diff --git a/tensorflow/contrib/cmake/external/jpeg.cmake b/tensorflow/contrib/cmake/external/jpeg.cmake
index cde037949c..fcfa9d3485 100644
--- a/tensorflow/contrib/cmake/external/jpeg.cmake
+++ b/tensorflow/contrib/cmake/external/jpeg.cmake
@@ -32,7 +32,7 @@ if (WIN32)
PREFIX jpeg
URL ${jpeg_URL}
URL_HASH ${jpeg_HASH}
- PATCH_COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/patches/jpeg/CMakeLists.txt ${jpeg_BUILD}
+ PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/patches/jpeg/CMakeLists.txt ${jpeg_BUILD}
INSTALL_DIR ${jpeg_INSTALL}
DOWNLOAD_DIR "${DOWNLOAD_LOCATION}"
CMAKE_CACHE_ARGS
@@ -42,7 +42,7 @@ if (WIN32)
)
ExternalProject_Add_Step(jpeg copy_jconfig
- COMMAND ${CMAKE_COMMAND} -E copy
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
${jpeg_BUILD}/jconfig.vc ${jpeg_BUILD}/jconfig.h
DEPENDEES patch
DEPENDERS build
@@ -76,5 +76,5 @@ add_custom_target(jpeg_copy_headers_to_destination
foreach(header_file ${jpeg_HEADERS})
add_custom_command(TARGET jpeg_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${jpeg_INCLUDE_DIR})
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${header_file} ${jpeg_INCLUDE_DIR})
endforeach()
diff --git a/tensorflow/contrib/cmake/external/png.cmake b/tensorflow/contrib/cmake/external/png.cmake
index bbf626f87b..bfad8e5a26 100644
--- a/tensorflow/contrib/cmake/external/png.cmake
+++ b/tensorflow/contrib/cmake/external/png.cmake
@@ -42,5 +42,5 @@ add_custom_target(png_copy_headers_to_destination
foreach(header_file ${png_HEADERS})
add_custom_command(TARGET png_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${png_INCLUDE_DIR}/)
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${header_file} ${png_INCLUDE_DIR}/)
endforeach()
diff --git a/tensorflow/contrib/cmake/external/tensorboard.cmake b/tensorflow/contrib/cmake/external/tensorboard.cmake
index dd2613a15c..a249af0705 100644
--- a/tensorflow/contrib/cmake/external/tensorboard.cmake
+++ b/tensorflow/contrib/cmake/external/tensorboard.cmake
@@ -20,7 +20,7 @@ function(tb_new_http_archive)
foreach(src_file ${_TB_FILES})
add_custom_command(
TARGET tensorboard_copy_dependencies PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${src_dir}/${src_file} ${dst_dir}/${src_file}
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src_dir}/${src_file} ${dst_dir}/${src_file}
)
endforeach()
@@ -37,7 +37,7 @@ function(tb_http_file)
add_custom_command(
TARGET tensorboard_copy_dependencies PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${src_dir}/${src_file} ${dst_dir}/${src_file}
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src_dir}/${src_file} ${dst_dir}/${src_file}
)
add_custom_target(${_TB_NAME} DEPENDS ${src_dir}/${src_file})
diff --git a/tensorflow/contrib/cmake/external/zlib.cmake b/tensorflow/contrib/cmake/external/zlib.cmake
index afe5e366ae..eb7d4bc38b 100644
--- a/tensorflow/contrib/cmake/external/zlib.cmake
+++ b/tensorflow/contrib/cmake/external/zlib.cmake
@@ -42,5 +42,5 @@ add_custom_target(zlib_copy_headers_to_destination
foreach(header_file ${ZLIB_HEADERS})
add_custom_command(TARGET zlib_copy_headers_to_destination PRE_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy ${header_file} ${zlib_INCLUDE_DIR})
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${header_file} ${zlib_INCLUDE_DIR})
endforeach()
diff --git a/tensorflow/contrib/factorization/python/ops/factorization_ops.py b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
index 167b442dbc..b853652629 100644
--- a/tensorflow/contrib/factorization/python/ops/factorization_ops.py
+++ b/tensorflow/contrib/factorization/python/ops/factorization_ops.py
@@ -128,7 +128,7 @@ class WALSModel(object):
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
- sv = tf.Supervisor(is_chief=is_chief,
+ sv = tf.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
diff --git a/tensorflow/contrib/ffmpeg/default/ffmpeg_lib.cc b/tensorflow/contrib/ffmpeg/default/ffmpeg_lib.cc
index a34c64d328..a758bb92aa 100644
--- a/tensorflow/contrib/ffmpeg/default/ffmpeg_lib.cc
+++ b/tensorflow/contrib/ffmpeg/default/ffmpeg_lib.cc
@@ -69,7 +69,10 @@ bool IsBinaryInstalled(const string& binary_name) {
for (const string& dir : str_util::Split(path, ':')) {
const string binary_path = io::JoinPath(dir, binary_name);
char absolute_path[PATH_MAX + 1];
- ::realpath(binary_path.c_str(), absolute_path);
+ if (::realpath(binary_path.c_str(), absolute_path) == NULL) {
+ LOG(ERROR) << "Invalid binary path: " << binary_path;
+ return false;
+ }
struct stat statinfo;
int result = ::stat(absolute_path, &statinfo);
if (result < 0) {
diff --git a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.h b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.h
index eb9d5aed68..0aefbc6eed 100644
--- a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.h
+++ b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.h
@@ -39,7 +39,7 @@
std::unique_ptr<tensorflow::MemmappedEnv> tf_memmapped_env;
std::vector<std::string> labels;
}
-@property(retain, nonatomic) CATextLayer *predictionTextLayer;
+@property(strong, nonatomic) CATextLayer *predictionTextLayer;
- (IBAction)takePicture:(id)sender;
- (IBAction)switchCameras:(id)sender;
diff --git a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
index 86570b19d2..e975a25b5e 100644
--- a/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
+++ b/tensorflow/contrib/ios_examples/camera/CameraExampleViewController.mm
@@ -43,8 +43,8 @@ const float input_std = 1.0f;
const std::string input_layer_name = "input";
const std::string output_layer_name = "softmax1";
-static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
- @"AVCaptureStillImageIsCapturingStillImageContext";
+static void *AVCaptureStillImageIsCapturingStillImageContext =
+ &AVCaptureStillImageIsCapturingStillImageContext;
@interface CameraExampleViewController (InternalMethods)
- (void)setupAVCapture;
@@ -105,28 +105,23 @@ static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
[rootLayer addSublayer:previewLayer];
[session startRunning];
- [session release];
if (error) {
- UIAlertView *alertView = [[UIAlertView alloc]
- initWithTitle:[NSString stringWithFormat:@"Failed with error %d",
- (int)[error code]]
- message:[error localizedDescription]
- delegate:nil
- cancelButtonTitle:@"Dismiss"
- otherButtonTitles:nil];
- [alertView show];
- [alertView release];
+ NSString *title = [NSString stringWithFormat:@"Failed with error %d", (int)[error code]];
+ UIAlertController *alertController =
+ [UIAlertController alertControllerWithTitle:title
+ message:[error localizedDescription]
+ preferredStyle:UIAlertControllerStyleAlert];
+ UIAlertAction *dismiss =
+ [UIAlertAction actionWithTitle:@"Dismiss" style:UIAlertActionStyleDefault handler:nil];
+ [alertController addAction:dismiss];
+ [self presentViewController:alertController animated:YES completion:nil];
[self teardownAVCapture];
}
}
- (void)teardownAVCapture {
- [videoDataOutput release];
- if (videoDataOutputQueue) dispatch_release(videoDataOutputQueue);
[stillImageOutput removeObserver:self forKeyPath:@"isCapturingStillImage"];
- [stillImageOutput release];
[previewLayer removeFromSuperlayer];
- [previewLayer release];
}
- (void)observeValueForKeyPath:(NSString *)keyPath
@@ -155,7 +150,6 @@ static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
}
completion:^(BOOL finished) {
[flashView removeFromSuperview];
- [flashView release];
flashView = nil;
}];
}
@@ -194,7 +188,6 @@ static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
}
completion:^(BOOL finished) {
[flashView removeFromSuperview];
- [flashView release];
flashView = nil;
}];
}];
@@ -256,7 +249,9 @@ static const NSString *AVCaptureStillImageIsCapturingStillImageContext =
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
+ CFRetain(pixelBuffer);
[self runCNNOnFrame:pixelBuffer];
+ CFRelease(pixelBuffer);
}
- (void)runCNNOnFrame:(CVPixelBufferRef)pixelBuffer {
@@ -275,7 +270,10 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
const int sourceRowBytes = (int)CVPixelBufferGetBytesPerRow(pixelBuffer);
const int image_width = (int)CVPixelBufferGetWidth(pixelBuffer);
const int fullHeight = (int)CVPixelBufferGetHeight(pixelBuffer);
- CVPixelBufferLockBaseAddress(pixelBuffer, 0);
+
+ CVPixelBufferLockFlags unlockFlags = kNilOptions;
+ CVPixelBufferLockBaseAddress(pixelBuffer, unlockFlags);
+
unsigned char *sourceBaseAddr =
(unsigned char *)(CVPixelBufferGetBaseAddress(pixelBuffer));
int image_height;
@@ -312,6 +310,8 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
}
}
+ CVPixelBufferUnlockBaseAddress(pixelBuffer, unlockFlags);
+
if (tf_session.get()) {
std::vector<tensorflow::Tensor> outputs;
tensorflow::Status run_status = tf_session->Run(
@@ -327,7 +327,7 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
const float predictionValue = predictions(index);
if (predictionValue > 0.05f) {
std::string label = labels[index % predictions.size()];
- NSString *labelObject = [NSString stringWithCString:label.c_str()];
+ NSString *labelObject = [NSString stringWithUTF8String:label.c_str()];
NSNumber *valueObject = [NSNumber numberWithFloat:predictionValue];
[newValues setObject:valueObject forKey:labelObject];
}
@@ -337,12 +337,11 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
});
}
}
+ CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
- (void)dealloc {
[self teardownAVCapture];
- [square release];
- [super dealloc];
}
// use front/back camera
@@ -376,7 +375,7 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
- (void)viewDidLoad {
[super viewDidLoad];
- square = [[UIImage imageNamed:@"squarePNG"] retain];
+ square = [UIImage imageNamed:@"squarePNG"];
synth = [[AVSpeechSynthesizer alloc] init];
labelLayers = [[NSMutableArray alloc] init];
oldPredictionValues = [[NSMutableDictionary alloc] init];
@@ -402,7 +401,6 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
- (void)viewDidUnload {
[super viewDidUnload];
- [oldPredictionValues release];
}
- (void)viewWillAppear:(BOOL)animated {
@@ -449,7 +447,6 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
forKey:label];
}
}
- [oldPredictionValues release];
oldPredictionValues = decayedPredictionValues;
for (NSString *label in newValues) {
@@ -553,7 +550,7 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
width:(float)width
height:(float)height
alignment:(NSString *)alignment {
- NSString *const font = @"Menlo-Regular";
+ CFTypeRef font = (CFTypeRef) @"Menlo-Regular";
const float fontSize = 20.0f;
const float marginSizeX = 5.0f;
diff --git a/tensorflow/contrib/ios_examples/camera/Info.plist b/tensorflow/contrib/ios_examples/camera/Info.plist
index 0cd75f61f7..82978ca278 100644
--- a/tensorflow/contrib/ios_examples/camera/Info.plist
+++ b/tensorflow/contrib/ios_examples/camera/Info.plist
@@ -24,8 +24,12 @@
<string>1.0</string>
<key>LSRequiresIPhoneOS</key>
<true/>
+ <key>NSCameraUsageDescription</key>
+ <string>Capture images to detect object</string>
<key>UIMainStoryboardFile</key>
<string>MainStoryboard_iPhone</string>
+ <key>UIRequiresFullScreen</key>
+ <true/>
<key>UIStatusBarHidden</key>
<true/>
<key>UISupportedInterfaceOrientations</key>
@@ -36,7 +40,5 @@
<array>
<string>UIInterfaceOrientationPortrait</string>
</array>
- <key>NSCameraUsageDescription</key>
- <string>Capture images to detect object</string>
</dict>
</plist>
diff --git a/tensorflow/contrib/ios_examples/camera/camera_example.xcodeproj/project.pbxproj b/tensorflow/contrib/ios_examples/camera/camera_example.xcodeproj/project.pbxproj
index e122fc3012..1134d0e117 100644
--- a/tensorflow/contrib/ios_examples/camera/camera_example.xcodeproj/project.pbxproj
+++ b/tensorflow/contrib/ios_examples/camera/camera_example.xcodeproj/project.pbxproj
@@ -238,7 +238,24 @@
592FF8AF18ECBD3600C164F8 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
ONLY_ACTIVE_ARCH = YES;
};
name = Debug;
@@ -246,6 +263,23 @@
592FF8B018ECBD3600C164F8 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
};
name = Release;
};
@@ -258,7 +292,7 @@
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "compiler-default";
CLANG_ENABLE_MODULES = YES;
- CLANG_ENABLE_OBJC_ARC = NO;
+ CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
@@ -323,7 +357,7 @@
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "compiler-default";
CLANG_ENABLE_MODULES = YES;
- CLANG_ENABLE_OBJC_ARC = NO;
+ CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index 3667f23697..ac55576980 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -359,25 +359,25 @@ def _fused_batch_norm(
@add_arg_scope
-def batch_norm(
- inputs,
- decay=0.999,
- center=True,
- scale=False,
- epsilon=0.001,
- activation_fn=None,
- param_initializers=None,
- updates_collections=ops.GraphKeys.UPDATE_OPS,
- is_training=True,
- reuse=None,
- variables_collections=None,
- outputs_collections=None,
- trainable=True,
- batch_weights=None,
- fused=False,
- data_format=DATA_FORMAT_NHWC,
- zero_debias_moving_mean=False,
- scope=None):
+def batch_norm(inputs,
+ decay=0.999,
+ center=True,
+ scale=False,
+ epsilon=0.001,
+ activation_fn=None,
+ param_initializers=None,
+ param_regularizers=None,
+ updates_collections=ops.GraphKeys.UPDATE_OPS,
+ is_training=True,
+ reuse=None,
+ variables_collections=None,
+ outputs_collections=None,
+ trainable=True,
+ batch_weights=None,
+ fused=False,
+ data_format=DATA_FORMAT_NHWC,
+ zero_debias_moving_mean=False,
+ scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
@@ -419,6 +419,7 @@ def batch_norm(
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
+ param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
@@ -450,6 +451,7 @@ def batch_norm(
Raises:
ValueError: If `batch_weights` is not None and `fused` is True.
+ ValueError: If `param_regularizers` is not None and `fused` is True.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
@@ -458,6 +460,9 @@ def batch_norm(
if batch_weights is not None:
raise ValueError('Weighted mean and variance is not currently '
'supported for fused batch norm.')
+ if param_regularizers is not None:
+ raise ValueError('Regularizers are not currently '
+ 'supported for fused batch norm.')
return _fused_batch_norm(
inputs,
decay=decay,
@@ -501,6 +506,10 @@ def batch_norm(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
+ if not param_regularizers:
+ param_regularizers = {}
+ beta_regularizer = param_regularizers.get('beta')
+ gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
@@ -511,6 +520,8 @@ def batch_norm(
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
+ beta_regularizer=beta_regularizer,
+ gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=sc.name,
_scope=sc,
diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py
index 1ba0534b5d..a5eb0a725c 100644
--- a/tensorflow/contrib/layers/python/layers/layers_test.py
+++ b/tensorflow/contrib/layers/python/layers/layers_test.py
@@ -1679,6 +1679,13 @@ class BatchNormTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, 'Weighted mean and variance'):
_layers.batch_norm(inputs, batch_weights=batch_weights, fused=True)
+ def testParamRegularizersFused(self):
+ with ops.Graph().as_default() as g, self.test_session(g):
+ inputs = array_ops.placeholder(dtype=dtypes.float32, shape=(5, 3, 3, 7))
+ with self.assertRaisesRegexp(ValueError,
+ 'Regularizers are not currently'):
+ _layers.batch_norm(inputs, param_regularizers={}, fused=True)
+
def _testCreateOp(self, fused):
height, width = 3, 3
with self.test_session():
@@ -1688,6 +1695,8 @@ class BatchNormTest(test.TestCase):
'BatchNorm/batchnorm')
self.assertTrue(output.op.name.startswith(expected_name))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
+ self.assertEqual(
+ ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testCreateOpDefault(self):
self._testCreateOp(False)
@@ -1695,6 +1704,29 @@ class BatchNormTest(test.TestCase):
def testCreateOpFused(self):
self._testCreateOp(True)
+ def testCreateOpBetaRegularizer(self):
+ height, width = 3, 3
+ with self.test_session():
+ reg = lambda x: 0.1 * math_ops.reduce_sum(x)
+ images = np.random.uniform(size=(5, height, width, 3)).astype('f')
+ output = _layers.batch_norm(images, param_regularizers={'beta': reg})
+ self.assertEqual(
+ len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
+ beta_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
+ self.assertEqual(beta_decay.op.name, 'BatchNorm/beta/Regularizer/mul')
+
+ def testCreateOpGammaRegularizer(self):
+ height, width = 3, 3
+ with self.test_session():
+ reg = lambda x: 0.1 * math_ops.reduce_sum(x)
+ images = np.random.uniform(size=(5, height, width, 3)).astype('f')
+ output = _layers.batch_norm(
+ images, param_regularizers={'gamma': reg}, scale=True)
+ self.assertEqual(
+ len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
+ gamma_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
+ self.assertEqual(gamma_decay.op.name, 'BatchNorm/gamma/Regularizer/mul')
+
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
diff --git a/tensorflow/contrib/learn/__init__.py b/tensorflow/contrib/learn/__init__.py
index b8c6e75061..85cef3d8db 100644
--- a/tensorflow/contrib/learn/__init__.py
+++ b/tensorflow/contrib/learn/__init__.py
@@ -37,6 +37,8 @@ See the @{$python/contrib.learn} guide.
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
+@@SVM
+@@SKCompat
@@Head
@@multi_class_head
diff --git a/tensorflow/contrib/learn/python/learn/README.md b/tensorflow/contrib/learn/python/learn/README.md
index 84c195d0b8..f412c83a97 100644
--- a/tensorflow/contrib/learn/python/learn/README.md
+++ b/tensorflow/contrib/learn/python/learn/README.md
@@ -14,7 +14,7 @@ TF Learn is a simplified interface for TensorFlow, to get people started on pred
## Installation
-[Install TensorFlow](../../../../g3doc/get_started/os_setup.md), and then simply import `learn` via `from tensorflow.contrib.learn` or use `tf.contrib.learn`.
+[Install TensorFlow](https://www.tensorflow.org/install/), and then simply import `learn` via `from tensorflow.contrib.learn` or use `tf.contrib.learn`.
Optionally you can install [scikit-learn](http://scikit-learn.org/stable/) and [pandas](http://pandas.pydata.org/) for additional functionality.
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions.py b/tensorflow/contrib/learn/python/learn/graph_actions.py
index 45a2dc1846..b2e4815691 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions.py
@@ -183,7 +183,7 @@ def _monitored_train(graph,
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
- arg to `tf.Saver` constructor.
+ arg to `tf.train.Saver` constructor.
keep_checkpoint_every_n_hours: In addition to keeping the most recent
`keep_checkpoint_max` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
@@ -378,7 +378,7 @@ def train(graph,
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
- arg to tf.Saver constructor.
+ arg to tf.train.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
diff --git a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
index 3d1589f27e..1202f961cc 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
@@ -1111,7 +1111,7 @@ def sequence_loss(logits,
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
- softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
+ softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, defaults to "sequence_loss".
@@ -1160,7 +1160,7 @@ def model_with_buckets(encoder_inputs,
seq2seq: A sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
- softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
+ softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be
diff --git a/tensorflow/contrib/metrics/python/metrics/classification.py b/tensorflow/contrib/metrics/python/metrics/classification.py
index 9c58d0c619..26aba1cc51 100644
--- a/tensorflow/contrib/metrics/python/metrics/classification.py
+++ b/tensorflow/contrib/metrics/python/metrics/classification.py
@@ -26,7 +26,7 @@ from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
-def accuracy(predictions, labels, weights=None):
+def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
@@ -35,6 +35,7 @@ def accuracy(predictions, labels, weights=None):
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
+ name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
@@ -52,7 +53,7 @@ def accuracy(predictions, labels, weights=None):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
- with ops.name_scope('accuracy', values=[predictions, labels]):
+ with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
diff --git a/tensorflow/contrib/opt/python/training/moving_average_optimizer.py b/tensorflow/contrib/opt/python/training/moving_average_optimizer.py
index 00cb6bfba9..c48494585e 100644
--- a/tensorflow/contrib/opt/python/training/moving_average_optimizer.py
+++ b/tensorflow/contrib/opt/python/training/moving_average_optimizer.py
@@ -122,7 +122,7 @@ class MovingAverageOptimizer(optimizer.Optimizer):
**kwargs: Keyword arguments of `Saver()`.
Returns:
- A `tf.Saver` object.
+ A `tf.train.Saver` object.
Raises:
RuntimeError: If apply_gradients or minimize has not been called before.
diff --git a/tensorflow/contrib/slim/python/slim/learning.py b/tensorflow/contrib/slim/python/slim/learning.py
index db496105b4..bc687e5cdf 100644
--- a/tensorflow/contrib/slim/python/slim/learning.py
+++ b/tensorflow/contrib/slim/python/slim/learning.py
@@ -18,7 +18,7 @@ This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
-to user-specified arguments. Note that the training loop uses the tf.Supervisor
+to user-specified arguments. Note that the training loop uses the tf.train.Supervisor
and its managed_session in its implementation to ensure the ability of worker
processes to recover from failures.
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 8ba7cb60d2..502aa32686 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -756,13 +756,12 @@ cc_library(
":proto_text",
":protos_all_cc",
":shape_inference_testutil",
- ":sycl_runtime",
":tensor_testutil",
":test",
"//tensorflow/core/kernels:constant_op",
"//tensorflow/core/kernels:ops_testutil",
"//tensorflow/core/kernels:ops_util",
- "//tensorflow/core/platform/default/build_config:gtest",
+ "//tensorflow/core/platform/default/build_config:gtest", # + if_sycl([":sycl_runtime"]),
],
)
diff --git a/tensorflow/core/kernels/eigen_pooling.h b/tensorflow/core/kernels/eigen_pooling.h
index e13c8b9835..492c358a52 100644
--- a/tensorflow/core/kernels/eigen_pooling.h
+++ b/tensorflow/core/kernels/eigen_pooling.h
@@ -330,9 +330,18 @@ struct AvgPoolMeanReducer {
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__)
#ifdef EIGEN_VECTORIZE_AVX512
-#define pequal(a, b) \
- _mm512_maskz_set1_epi32(_mm512_cmp_ps_mask(a, b, _CMP_EQ_UQ), -1)
-#define psel(a, b, false_mask) _mm512_ternarylogic_epi64(false_mask, a, b, 0xca)
+#define pequal(a, b) \
+ _mm512_castsi512_ps( \
+ _mm512_maskz_set1_epi32(_mm512_cmp_ps_mask(a, b, _CMP_EQ_UQ), -1))
+
+// The ternarylogic function immediate determines the values in the result
+// In the case below, 0xd8 implies (false_mask) ? (b) : (a)
+// For details, refer to the vpternlogd instruction table at
+// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-vol-2c-manual.pdf
+#define psel(a, b, false_mask) \
+ _mm512_castsi512_ps(_mm512_ternarylogic_epi32( \
+ _mm512_castps_si512(a), _mm512_castps_si512(b), \
+ _mm512_castps_si512(false_mask), 0xd8))
#elif defined EIGEN_VECTORIZE_AVX
#define pequal(a, b) _mm256_cmp_ps(a, b, _CMP_EQ_UQ)
#define psel(a, b, false_mask) _mm256_blendv_ps(a, b, false_mask)
diff --git a/tensorflow/core/kernels/hexagon/hexagon_rewriter_transform.cc b/tensorflow/core/kernels/hexagon/hexagon_rewriter_transform.cc
index 802eee34c1..71034f5a7e 100644
--- a/tensorflow/core/kernels/hexagon/hexagon_rewriter_transform.cc
+++ b/tensorflow/core/kernels/hexagon/hexagon_rewriter_transform.cc
@@ -47,7 +47,7 @@ Status RewriteQuantizedStrippedModelForHexagon(
"graph execute op...";
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> outputs;
- for (int i = 0; i < context.input_names.size(); ++i) {
+ for (auto i = 0; i < context.input_names.size(); ++i) {
const string& input_name = context.input_names.at(i);
// Get input shape
diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc
index c3f19c729c..afa1f65aef 100644
--- a/tensorflow/core/kernels/quantized_conv_ops.cc
+++ b/tensorflow/core/kernels/quantized_conv_ops.cc
@@ -88,9 +88,9 @@ class ReferenceConvFunctor {
int filter_top_offset;
if (padding == VALID) {
filter_left_offset =
- ((output_width - 1) * stride + filter_width - input_width) / 2;
+ ((output_width - 1) * stride + filter_width - input_width + 1) / 2;
filter_top_offset =
- ((output_height - 1) * stride + filter_height - input_height) / 2;
+ ((output_height - 1) * stride + filter_height - input_height + 1) / 2;
} else {
filter_left_offset =
((output_width - 1) * stride + filter_width - input_width) / 2;
diff --git a/tensorflow/core/kernels/record_yielder.cc b/tensorflow/core/kernels/record_yielder.cc
index 5fd8ba2c68..8386e9f60e 100644
--- a/tensorflow/core/kernels/record_yielder.cc
+++ b/tensorflow/core/kernels/record_yielder.cc
@@ -115,7 +115,7 @@ void RecordYielder::MainLoop() {
std::shuffle(filenames.begin(), filenames.end(), shuffle_rnd);
// Left-shift the filename list.
- const int64 num = filenames.size();
+ const std::vector<string>::size_type num = filenames.size();
int64 shift;
if (0 <= opts_.file_shuffle_shift_ratio &&
opts_.file_shuffle_shift_ratio < 1) {
@@ -130,7 +130,7 @@ void RecordYielder::MainLoop() {
for (int i = 0; i < N; ++i) {
Shard* shard = &shards[i];
shard->index = i;
- for (int j = i; j < filenames.size(); j += N) {
+ for (std::vector<string>::size_type j = i; j < filenames.size(); j += N) {
shard->filenames.push_back(filenames[j]);
}
thread_->Schedule([this, shard]() { ShardLoop(shard); });
diff --git a/tensorflow/core/kernels/record_yielder.h b/tensorflow/core/kernels/record_yielder.h
index 503644f3b8..44f7c9511f 100644
--- a/tensorflow/core/kernels/record_yielder.h
+++ b/tensorflow/core/kernels/record_yielder.h
@@ -142,7 +142,7 @@ class RecordYielder {
// any.
return stop_ || !status_.ok() || (epoch_end_ && !buf_.empty()) ||
(!epoch_end_ &&
- buf_.size() >= std::max<int64>(1, opts_.bufsize / 2));
+ buf_.size() >= std::max<uint64>(1, opts_.bufsize / 2));
}
void MainLoop();
diff --git a/tensorflow/core/kernels/sparse_matmul_op.h b/tensorflow/core/kernels/sparse_matmul_op.h
index 61bd6593c3..bff6a0c9b3 100644
--- a/tensorflow/core/kernels/sparse_matmul_op.h
+++ b/tensorflow/core/kernels/sparse_matmul_op.h
@@ -255,12 +255,13 @@ EIGEN_STRONG_INLINE Packet8d pbroadcast_second<Packet8d>(const Packet8d& a_in) {
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_third<Packet8d>(const Packet8d& a_in) {
- Packet2d a = _mm512_extractf32x4_ps(a_in, 1);
+ Packet2d a = _mm256_extractf128_pd(_mm512_castpd512_pd256(a_in), 1);
return _mm512_broadcastsd_pd(a);
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_fourth<Packet8d>(const Packet8d& a_in) {
- Packet2d a = _mm_permute_pd(_mm512_extractf32x4_ps(a_in, 1), 3);
+ Packet2d a =
+ _mm_permute_pd(_mm256_extractf128_pd(_mm512_castpd512_pd256(a_in), 1), 3);
return _mm512_broadcastsd_pd(a);
}
template <>
@@ -417,14 +418,17 @@ EIGEN_STRONG_INLINE Packet8f pbroadcast_fourth<Packet8f>(const Packet8f& a) {
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet16f pexpand_bf16_l(const Packet16f& from) {
- return _mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm512_castsi512_si256(from)),
- 16);
+ return _mm512_castsi512_ps(_mm512_slli_epi32(
+ _mm512_cvtepu16_epi32(_mm512_castsi512_si256(_mm512_castps_si512(from))),
+ 16));
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet16f pexpand_bf16_u(const Packet16f& from) {
- return _mm512_slli_epi32(
- _mm512_cvtepu16_epi32(_mm512_extractf64x4_pd(from, 1)), 16);
+ return _mm512_castsi512_ps(
+ _mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_castpd_si256(
+ _mm512_extractf64x4_pd(_mm512_castps_pd(from), 1))),
+ 16));
}
#endif
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 3b8579ef09..f758935a87 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -25598,6 +25598,59 @@ op {
description: "Read [the section on\nSegmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation\nof segments.\n\nComputes a tensor such that\n`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such\nthat `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\nrange of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/UnsortedSegmentSum.png\" alt>\n</div>"
}
op {
+ name: "UnsortedSegmentSum"
+ input_arg {
+ name: "data"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "segment_ids"
+ description: "A tensor whose shape is a prefix of `data.shape`."
+ type_attr: "Tindices"
+ }
+ input_arg {
+ name: "num_segments"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "output"
+ description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tindices"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ summary: "Computes the max along segments of a tensor."
+ description: "Read [the section on\nSegmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation\nof segments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\n range of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/UnsortedSegmentSum.png\" alt>\n</div>"
+}
+op {
name: "Unstage"
output_arg {
name: "values"
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index f0859ed23f..d175107be0 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -20,7 +20,7 @@ limitations under the License.
#define TF_MAJOR_VERSION 1
#define TF_MINOR_VERSION 0
-#define TF_PATCH_VERSION 0
+#define TF_PATCH_VERSION 1
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
diff --git a/tensorflow/docs_src/extend/adding_an_op.md b/tensorflow/docs_src/extend/adding_an_op.md
index 85f3aa2ce4..bfaf80300d 100644
--- a/tensorflow/docs_src/extend/adding_an_op.md
+++ b/tensorflow/docs_src/extend/adding_an_op.md
@@ -188,6 +188,8 @@ building the `.so` file.
> the older ABI. If you compile your op library with gcc5, add
> `-D_GLIBCXX_USE_CXX11_ABI=0` to the command line to make the library
> compatible with the older abi.
+> Furthermore if you are using TensorFlow package created from source remember to add `-cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"`
+> as bazel command to compile the Python package.
### Compile the op using bazel (TensorFlow source installation)
@@ -241,7 +243,7 @@ named `ZeroOut` in the C++ files, the python function will be called `zero_out`.
To make the op available as a regular function `import`-able from a Python
module, it maybe useful to have the `load_op_library` call in a Python source
-file as follows (see [zero_out_op_1.py](https://www.tensorflow.org/code/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_1.py))
+file as follows (see [zero_out_op_1.py](https://www.tensorflow.org/code/tensorflow/examples/adding_an_op/zero_out_op_1.py))
:
```python
@@ -1033,16 +1035,16 @@ kept on the CPU, add a `HostMemory()` call to the kernel registration, e.g.:
#### Compiling the kernel for the GPU device {#compiling-kernel}
Look at
-[cuda_op_kernel.cu.cc](https://www.tensorflow.org/code/tensorflow/g3doc/how_tos/adding_an_op/cuda_op_kernel.cu.cc)
+[cuda_op_kernel.cu.cc](https://www.tensorflow.org/code/tensorflow/examples/adding_an_op/cuda_op_kernel.cu.cc)
for an example that uses a CUDA kernel to implement an op. The
`tf_custom_op_library` accepts a `gpu_srcs` argument in which the list of source
files containing the CUDA kernels (`*.cu.cc` files) can be specified. For use
with a binary installation of TensorFlow, the CUDA kernels have to be compiled
with NVIDIA's `nvcc` compiler. Here is the sequence of commands you can use to
compile the
-[cuda_op_kernel.cu.cc](https://www.tensorflow.org/code/tensorflow/g3doc/how_tos/adding_an_op/cuda_op_kernel.cu.cc)
+[cuda_op_kernel.cu.cc](https://www.tensorflow.org/code/tensorflow/examples/adding_an_op/cuda_op_kernel.cu.cc)
and
-[cuda_op_kernel.cc](https://www.tensorflow.org/code/tensorflow/g3doc/how_tos/adding_an_op/cuda_op_kernel.cc)
+[cuda_op_kernel.cc](https://www.tensorflow.org/code/tensorflow/examples/adding_an_op/cuda_op_kernel.cc)
into a single dynamically loadable library:
```bash
diff --git a/tensorflow/docs_src/get_started/mnist/beginners.md b/tensorflow/docs_src/get_started/mnist/beginners.md
index ce7e23c5e1..f6d6b230b3 100644
--- a/tensorflow/docs_src/get_started/mnist/beginners.md
+++ b/tensorflow/docs_src/get_started/mnist/beginners.md
@@ -368,7 +368,7 @@ In this case, we ask TensorFlow to minimize `cross_entropy` using the
with a learning rate of 0.5. Gradient descent is a simple procedure, where
TensorFlow simply shifts each variable a little bit in the direction that
reduces the cost. But TensorFlow also provides
-@{$python/train#optimizers$many other optimization algorithms}:
+@{$python/train#Optimizers$many other optimization algorithms}:
using one is as simple as tweaking one line.
What TensorFlow actually does here, behind the scenes, is to add new operations
diff --git a/tensorflow/docs_src/get_started/monitors.md b/tensorflow/docs_src/get_started/monitors.md
index a51194326f..99d583b23d 100644
--- a/tensorflow/docs_src/get_started/monitors.md
+++ b/tensorflow/docs_src/get_started/monitors.md
@@ -280,17 +280,17 @@ metrics to log during model evaluation:
```python
validation_metrics = {
"accuracy":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
CLASSES),
"precision":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
CLASSES),
"recall":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
CLASSES)
@@ -300,7 +300,7 @@ validation_metrics = {
Add the above code before the `ValidationMonitor` constructor. Then revise the
`ValidationMonitor` constructor as follows to add a `metrics` parameter to log
the accuracy, precision, and recall metrics specified in `validation_metrics`
-(loss is always logged, and doesn't need to be explicity specified):
+(loss is always logged, and doesn't need to be explicitly specified):
```python
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index 082f34de0a..94738e753c 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -162,8 +162,8 @@ Take the following steps to install TensorFlow with Virtualenv:
issue the following command to install TensorFlow in the active
virtualenv environment:
- <pre> (tensorflow)$ <b>pip3 install --upgrade \\
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp27-none-linux_x86_64.whl</pre>
+ <pre>(tensorflow)$ <b>pip3 install --upgrade \
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#CommonInstallationProblems).
@@ -254,20 +254,22 @@ take the following steps:
2. (Optional.) If Step 1 failed, install the latest version of TensorFlow
by issuing a command of the following format:
- <pre> $ <b>sudo pip install --upgrade</b> <i>TF_BINARY_URL</i> # Python 2.7
- $ <b>sudo pip3 install --upgrade</b> <i>TF_BINARY_URL</i> # Python 3.N </pre>
+ <pre>$ <b>sudo pip install --upgrade</b> <i>TF_PYTHON_URL</i> # Python 2.7
+ $ <b>sudo pip3 install --upgrade</b> <i>TF_PYTHON_URL</i> # Python 3.N </pre>
- where <code></em>TF_BINARY_URL</em></code> identifies the URL of the
+ where <code><em>TF_PYTHON_URL</em></code> identifies the URL of the
TensorFlow Python package. The appropriate value of
- <code><em>TF_BINARY_URL</em></code> depends on the operating system,
+ <code><em>TF_PYTHON_URL</em></code>depends on the operating system,
Python version, and GPU support. Find the appropriate value for
- <code><em>TF_BINARY_URL</em></code>
- [here](#the_url_of_the_tensorflow_python_package). For example, to
- install TensorFlow for Linux, Python 3.4, and CPU-only support, issue
- the following command:
+ <code><em>TF_PYTHON_URL</em></code> for your system
+ [here](#TF_PYTHON_URL). For example, if you are installing TensorFlow
+ for Linux, Python version 3.4, and CPU-only support, issue the following
+ command:
- <pre> $ <b>sudo pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp34-cp34m-linux_x86_64.whl</b> </pre>
+ <pre>
+ $ <b>sudo pip3 install --upgrade \
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl</b>
+ </pre>
If this step fails, see
[Common Installation Problems](#CommonInstallationProblems).
@@ -333,9 +335,9 @@ where:
* <tt>gcr.io/tensorflow/tensorflow:latest-devel</tt>, which is the latest
TensorFlow CPU Binary image plus source code.
* <tt>gcr.io/tensorflow/tensorflow:<i>version</i></tt>, which is the
- specified version (for example, 1.0.0) of TensorFlow CPU binary image.
+ specified version (for example, 1.0.1) of TensorFlow CPU binary image.
* <tt>gcr.io/tensorflow/tensorflow:<i>version</i>-devel</tt>, which is
- the specified version (for example, 1.0.0) of the TensorFlow GPU
+ the specified version (for example, 1.0.1) of the TensorFlow GPU
binary image plus source code.
<tt>gcr.io</tt> is the Google Container Registry. Note that some
@@ -453,7 +455,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
<pre>
(tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp34-cp34m-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -624,14 +626,14 @@ This section documents the relevant values for Linux installations.
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp27-none-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -643,14 +645,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp34-cp34m-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -662,14 +664,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp35-cp35m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl
</pre>
@@ -681,14 +683,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp36-cp36m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp36-cp36m-linux_x86_64.whl
</pre>
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index f1c36f904f..72c7b3e4eb 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -174,7 +174,7 @@ Take the following steps to install TensorFlow with Virtualenv:
<pre>
$ <b>pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.0-py3-none-any.whl</b>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl</b>
</pre>
If you encounter installation problems, see
@@ -311,7 +311,7 @@ take the following steps:
support, issue the following command:
<pre>
- $ <b>sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.0-py3-none-any.whl</b>
+ $ <b>sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl</b>
</pre>
If the preceding command fails, see
@@ -426,7 +426,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
TensorFlow for Python 3.4:
<pre>
- (tensorflow)$ <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.0-py3-none-any.whl</b>
+ (tensorflow)$ <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl</b>
</pre>
@@ -623,13 +623,13 @@ This section documents the relevant values for Mac OS installations.
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.0-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.0-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.1-py2-none-any.whl
</pre>
Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
@@ -641,13 +641,13 @@ Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.0-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.0-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.1-py3-none-any.whl
</pre>
Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index 0fdfe5e546..9f0ae475c1 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -317,12 +317,15 @@ $ <b>bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pk
Invoke `pip install` to install that pip package.
The filename of the `.whl `file depends on your platform.
For example, the following command will install the pip package
-for TensorFlow 1.0.0 on Linux:
+for TensorFlow 1.0.1 on Linux:
<pre>
-$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.0.0-py2-none-any.whl</b>
+$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.0.1-py2-none-any.whl</b>
</pre>
+**NOTE on gcc version 5:** the binary pip packages
+available on the TensorFlow website are built with gcc4 that uses the older ABI.
+To make the library compatible with the older abi you have to add `-cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"`
<a name="#ValidateYourInstallation"></a>
## Validate your installation
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 9c3f2b92f5..0491524c36 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -114,12 +114,12 @@ Take the following steps to install TensorFlow in an Anaconda environment:
environment. To install the CPU-only version of TensorFlow, enter the
following command:
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.0.0-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.0.1-cp35-cp35m-win_amd64.whl</b> </pre>
To install the GPU version of TensorFlow, enter the following command
(on a single line):
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.0.0-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-win_amd64.whl</b> </pre>
<a name="#ValidateYourInstallation"></a>
## Validate your installation
diff --git a/tensorflow/docs_src/performance/quantization.md b/tensorflow/docs_src/performance/quantization.md
index 878371f674..86d2b92494 100644
--- a/tensorflow/docs_src/performance/quantization.md
+++ b/tensorflow/docs_src/performance/quantization.md
@@ -82,7 +82,7 @@ them directly is very convenient.
## How Can You Quantize Your Models?
-TensorFlow has production-grade support for eight-bit calculations built it. It
+TensorFlow has production-grade support for eight-bit calculations built in. It
also has a process for converting many models trained in floating-point over to
equivalent graphs using quantized calculations for inference. For example,
here's how you can translate the latest GoogLeNet model into a version that uses
@@ -153,7 +153,7 @@ bit.
The min and max operations actually look at the values in the input float
tensor, and then feeds them into the Dequantize operation that converts the
-tensor into eight-bits. There's more details on how the quantized representation
+tensor into eight-bits. There're more details on how the quantized representation
works later on.
Once the individual operations have been converted, the next stage is to remove
diff --git a/tensorflow/docs_src/performance/xla/index.md b/tensorflow/docs_src/performance/xla/index.md
index 222b2ba887..9c23e79845 100644
--- a/tensorflow/docs_src/performance/xla/index.md
+++ b/tensorflow/docs_src/performance/xla/index.md
@@ -10,7 +10,7 @@ XLA (Accelerated Linear Algebra) is a domain-specific compiler for linear
algebra that optimizes TensorFlow computations. The results are improvements in
speed, memory usage, and portability on server and mobile platforms. Initially,
most users will not see large benefits from XLA, but are welcome to experiment
-by using XLA via @{$jit$just-in-time (JIT) compilaton} or @{$tfcompile$ahead-of-time (AOT) compilation}. Developers targeting new hardware accelerators are
+by using XLA via @{$jit$just-in-time (JIT) compilation} or @{$tfcompile$ahead-of-time (AOT) compilation}. Developers targeting new hardware accelerators are
especially encouraged to try out XLA.
The XLA framework is experimental and in active development. In particular,
diff --git a/tensorflow/docs_src/programmers_guide/debugger.md b/tensorflow/docs_src/programmers_guide/debugger.md
index 19820421d3..f559ee3c6d 100644
--- a/tensorflow/docs_src/programmers_guide/debugger.md
+++ b/tensorflow/docs_src/programmers_guide/debugger.md
@@ -102,7 +102,7 @@ left corner of the screen to proceed.
This will bring up another screen
right after the `run()` call has ended, which will display all dumped
-intermedate tensors from the run. (These tensors can also be obtained by
+intermediate tensors from the run. (These tensors can also be obtained by
running the command `lt` after you executed `run`.) This is called the
**run-end UI**:
diff --git a/tensorflow/docs_src/programmers_guide/supervisor.md b/tensorflow/docs_src/programmers_guide/supervisor.md
index 5ea88cf64c..82ed1c2cf7 100644
--- a/tensorflow/docs_src/programmers_guide/supervisor.md
+++ b/tensorflow/docs_src/programmers_guide/supervisor.md
@@ -49,7 +49,7 @@ The simplest scenario for using a supervisor is to:
...create graph...
my_train_op = ...
- sv = tf.Supervisor(logdir="/my/training/directory")
+ sv = tf.train.Supervisor(logdir="/my/training/directory")
with sv.managed_session() as sess:
for step in range(100000):
if sv.should_stop():
@@ -130,7 +130,7 @@ For example this code runs the summary op every 100 steps in the training loop:
my_train_op = ...
my_summary_op = tf.summary.merge_all()
- sv = tf.Supervisor(logdir="/my/training/directory",
+ sv = tf.train.Supervisor(logdir="/my/training/directory",
summary_op=None) # Do not run the summary service
with sv.managed_session() as sess:
for step in range(100000):
@@ -169,7 +169,7 @@ the new model from the pre-trained checkpoint.
```python
...create graph...
# Create a saver that restores only the pre-trained variables.
- pre_train_saver = tf.Saver([pre_train_var1, pre_train_var2])
+ pre_train_saver = tf.train.Saver([pre_train_var1, pre_train_var2])
# Define an init function that loads the pretrained checkpoint.
def load_pretrain(sess):
@@ -179,7 +179,7 @@ the new model from the pre-trained checkpoint.
#
# The init function is called _after_ the variables have been initialized
# by running the init_op.
- sv = tf.Supervisor(logdir="/my/training/directory",
+ sv = tf.train.Supervisor(logdir="/my/training/directory",
init_fn=load_pretrain)
with sv.managed_session() as sess:
# Here sess was either initialized from the pre-trained-checkpoint or
@@ -207,7 +207,7 @@ def my_additional_sumaries(sv, sess):
...fetch and write summaries, see below...
...
- sv = tf.Supervisor(logdir="/my/training/directory")
+ sv = tf.train.Supervisor(logdir="/my/training/directory")
with sv.managed_session() as sess:
# Call my_additional_sumaries() every 1200s, or 20mn,
# passing (sv, sess) as arguments.
@@ -277,15 +277,15 @@ constructor:
for checkpointing.
If you do not pass one, the supervisor creates one for you by calling
- `tf.Saver()`, which add ops to save and restore all variables in your model.
- This is usally what you need.
+ `tf.train.Saver()`, which add ops to save and restore all variables in your model.
+ This is usually what you need.
Example: Use a custom Saver and checkpoint every 30 seconds.
```python
...create graph...
- my_saver = tf.Saver(<only some variables>)
- sv = tf.Supervisor(logdir="/my/training/directory",
+ my_saver = tf.train.Saver(<only some variables>)
+ sv = tf.train.Supervisor(logdir="/my/training/directory",
saver=my_saver,
save_model_secs=30)
with sv.managed_session() as sess:
diff --git a/tensorflow/docs_src/tutorials/layers.md b/tensorflow/docs_src/tutorials/layers.md
index fd305b58ba..2550bd3e42 100644
--- a/tensorflow/docs_src/tutorials/layers.md
+++ b/tensorflow/docs_src/tutorials/layers.md
@@ -674,7 +674,7 @@ that calculates accuracy. Add the following to `main()`:
# Configure the accuracy metric for evaluation
metrics = {
"accuracy":
- learn.metric_spec.MetricSpec(
+ learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
```
diff --git a/tensorflow/docs_src/tutorials/wide.md b/tensorflow/docs_src/tutorials/wide.md
index 312acdd45d..079efb201e 100644
--- a/tensorflow/docs_src/tutorials/wide.md
+++ b/tensorflow/docs_src/tutorials/wide.md
@@ -294,7 +294,7 @@ only learn one of the three cases:
1. Income stays the same no matter at what age (no correlation)
If we want to learn the fine-grained correlation between income and each age
-group seperately, we can leverage **bucketization**. Bucketization is a process
+group separately, we can leverage **bucketization**. Bucketization is a process
of dividing the entire range of a continuous feature into a set of consecutive
bins/buckets, and then converting the original numerical feature into a bucket
ID (as a categorical feature) depending on which bucket that value falls into.
diff --git a/tensorflow/docs_src/tutorials/wide_and_deep.md b/tensorflow/docs_src/tutorials/wide_and_deep.md
index 9d435da1db..b5e5981fe1 100644
--- a/tensorflow/docs_src/tutorials/wide_and_deep.md
+++ b/tensorflow/docs_src/tutorials/wide_and_deep.md
@@ -233,7 +233,7 @@ def input_fn(df):
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
- shape=[df[k].size, 1])
+ dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
@@ -269,4 +269,4 @@ familiar with the API. Wide & Deep Learning will be even more powerful if you
try it on a large dataset with many sparse feature columns that have a large
number of possible feature values. Again, feel free to take a look at our
[research paper](http://arxiv.org/abs/1606.07792) for more ideas about how to
-apply Wide & Deep Learning in real-world large-scale maching learning problems.
+apply Wide & Deep Learning in real-world large-scale machine learning problems.
diff --git a/tensorflow/examples/image_retraining/retrain.py b/tensorflow/examples/image_retraining/retrain.py
index 575dea5584..a3a4ba310e 100644
--- a/tensorflow/examples/image_retraining/retrain.py
+++ b/tensorflow/examples/image_retraining/retrain.py
@@ -39,8 +39,8 @@ The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
-bazel build third_party/tensorflow/examples/image_retraining:retrain && \
-bazel-bin/third_party/tensorflow/examples/image_retraining/retrain \
+bazel build tensorflow/examples/image_retraining:retrain && \
+bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
@@ -893,7 +893,7 @@ def main(_):
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
- print('%70s %s' % (test_filename,
+ print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
diff --git a/tensorflow/examples/learn/iris.py b/tensorflow/examples/learn/iris.py
index ad01f3544a..0c29caf9c7 100644
--- a/tensorflow/examples/learn/iris.py
+++ b/tensorflow/examples/learn/iris.py
@@ -16,7 +16,7 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from sklearn import cross_validation
+from sklearn import model_selection
from sklearn import metrics
import tensorflow as tf
@@ -24,7 +24,7 @@ import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = tf.contrib.learn.datasets.load_dataset('iris')
- x_train, x_test, y_train, y_test = cross_validation.train_test_split(
+ x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
diff --git a/tensorflow/examples/learn/resnet.py b/tensorflow/examples/learn/resnet.py
index f822903208..7737f10495 100755
--- a/tensorflow/examples/learn/resnet.py
+++ b/tensorflow/examples/learn/resnet.py
@@ -181,7 +181,7 @@ result = classifier.evaluate(
y=mnist.test.labels,
metrics={
'accuracy':
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='accuracy'),
})
diff --git a/tensorflow/examples/tutorials/layers/cnn_mnist.py b/tensorflow/examples/tutorials/layers/cnn_mnist.py
index 09dbffd517..aa92b1758a 100644
--- a/tensorflow/examples/tutorials/layers/cnn_mnist.py
+++ b/tensorflow/examples/tutorials/layers/cnn_mnist.py
@@ -148,7 +148,7 @@ def main(unused_argv):
# Configure the accuracy metric for evaluation
metrics = {
"accuracy":
- learn.metric_spec.MetricSpec(
+ learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
diff --git a/tensorflow/examples/tutorials/monitors/iris_monitors.py b/tensorflow/examples/tutorials/monitors/iris_monitors.py
index 041592b9b0..a4bf353856 100644
--- a/tensorflow/examples/tutorials/monitors/iris_monitors.py
+++ b/tensorflow/examples/tutorials/monitors/iris_monitors.py
@@ -39,20 +39,20 @@ def main(unused_argv):
validation_metrics = {
"accuracy":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
- prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
- CLASSES),
+ prediction_key=
+ tf.contrib.learn.prediction_key.PredictionKey.CLASSES),
"precision":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
- prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
- CLASSES),
+ prediction_key=
+ tf.contrib.learn.prediction_key.PredictionKey.CLASSES),
"recall":
- tf.contrib.learn.metric_spec.MetricSpec(
+ tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
- prediction_key=tf.contrib.learn.prediction_key.PredictionKey.
- CLASSES)
+ prediction_key=
+ tf.contrib.learn.prediction_key.PredictionKey.CLASSES)
}
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
test_set.data,
diff --git a/tensorflow/go/README.md b/tensorflow/go/README.md
index 75d8b1dec8..e32c21ca72 100644
--- a/tensorflow/go/README.md
+++ b/tensorflow/go/README.md
@@ -118,6 +118,18 @@ from source.
go test github.com/tensorflow/tensorflow/tensorflow/go
```
+### Generate wrapper functions for ops
+
+Go functions corresponding to TensorFlow operations are generated in `op/wrappers.go`. To regenerate them:
+
+Prerequisites:
+- [Protocol buffer compiler (protoc) 3.x](https://github.com/google/protobuf/releases/)
+- The TensorFlow repository under GOPATH
+
+```sh
+go generate github.com/tensorflow/tensorflow/tensorflow/go/op
+```
+
## Support
Use [stackoverflow](http://stackoverflow.com/questions/tagged/tensorflow) and/or
diff --git a/tensorflow/go/graph.go b/tensorflow/go/graph.go
index c0f91ffb30..c64ba84432 100644
--- a/tensorflow/go/graph.go
+++ b/tensorflow/go/graph.go
@@ -162,7 +162,11 @@ func (g *Graph) AddOperation(args OpSpec) (*Operation, error) {
for i, v := range in {
list[i] = v.c()
}
- C.TF_AddInputList(cdesc, &list[0], C.int(size))
+ if size > 0 {
+ C.TF_AddInputList(cdesc, &list[0], C.int(size))
+ } else {
+ C.TF_AddInputList(cdesc, nil, 0)
+ }
}
}
status := newStatus()
@@ -202,7 +206,11 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
list[i] = unsafe.Pointer(C.CString(s))
lens[i] = C.size_t(len(s))
}
- C.TF_SetAttrStringList(cdesc, cAttrName, &list[0], &lens[0], C.int(size))
+ if size > 0 {
+ C.TF_SetAttrStringList(cdesc, cAttrName, &list[0], &lens[0], C.int(size))
+ } else {
+ C.TF_SetAttrStringList(cdesc, cAttrName, nil, nil, 0)
+ }
for _, s := range list {
C.free(s)
}
@@ -214,7 +222,11 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
for i, v := range value {
list[i] = C.int64_t(v)
}
- C.TF_SetAttrIntList(cdesc, cAttrName, &list[0], C.int(size))
+ if size > 0 {
+ C.TF_SetAttrIntList(cdesc, cAttrName, &list[0], C.int(size))
+ } else {
+ C.TF_SetAttrIntList(cdesc, cAttrName, nil, 0)
+ }
case float32:
C.TF_SetAttrFloat(cdesc, cAttrName, C.float(value))
case []float32:
@@ -223,7 +235,11 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
for i, v := range value {
list[i] = C.float(v)
}
- C.TF_SetAttrFloatList(cdesc, cAttrName, &list[0], C.int(size))
+ if size > 0 {
+ C.TF_SetAttrFloatList(cdesc, cAttrName, &list[0], C.int(size))
+ } else {
+ C.TF_SetAttrFloatList(cdesc, cAttrName, nil, 0)
+ }
case bool:
v := C.uchar(0)
if value {
@@ -238,11 +254,18 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
list[i] = 1
}
}
- C.TF_SetAttrBoolList(cdesc, cAttrName, &list[0], C.int(size))
+ if size > 0 {
+ C.TF_SetAttrBoolList(cdesc, cAttrName, &list[0], C.int(size))
+ } else {
+ C.TF_SetAttrBoolList(cdesc, cAttrName, nil, 0)
+ }
case DataType:
C.TF_SetAttrType(cdesc, cAttrName, C.TF_DataType(value))
case []DataType:
- list := (*C.TF_DataType)(&value[0])
+ var list *C.TF_DataType
+ if len(value) > 0 {
+ list = (*C.TF_DataType)(&value[0])
+ }
C.TF_SetAttrTypeList(cdesc, cAttrName, list, C.int(len(value)))
case *Tensor:
C.TF_SetAttrTensor(cdesc, cAttrName, value.c, status.c)
@@ -255,7 +278,11 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
for i, v := range value {
list[i] = v.c
}
- C.TF_SetAttrTensorList(cdesc, cAttrName, &list[0], C.int(size), status.c)
+ var plist **C.TF_Tensor
+ if size > 0 {
+ plist = &list[0]
+ }
+ C.TF_SetAttrTensorList(cdesc, cAttrName, plist, C.int(size), status.c)
if err := status.Err(); err != nil {
return fmt.Errorf("bad value for attribute %q: %v", name, err)
}
@@ -276,7 +303,11 @@ func setAttr(cdesc *C.TF_OperationDescription, status *status, name string, valu
dimsp[i] = &dims[i][0]
}
}
- C.TF_SetAttrShapeList(cdesc, cAttrName, &dimsp[0], &ndims[0], C.int(len(value)))
+ if len(value) > 0 {
+ C.TF_SetAttrShapeList(cdesc, cAttrName, &dimsp[0], &ndims[0], C.int(len(value)))
+ } else {
+ C.TF_SetAttrShapeList(cdesc, cAttrName, nil, nil, 0)
+ }
default:
return fmt.Errorf("attribute %q has a type (%T) which is not valid for operation attributes", name, value)
}
diff --git a/tensorflow/java/BUILD b/tensorflow/java/BUILD
index 78ef152689..f2904ad5a6 100644
--- a/tensorflow/java/BUILD
+++ b/tensorflow/java/BUILD
@@ -55,6 +55,19 @@ java_test(
)
java_test(
+ name = "SavedModelBundleTest",
+ size = "small",
+ srcs = ["src/test/java/org/tensorflow/SavedModelBundleTest.java"],
+ data = ["//tensorflow/cc/saved_model:saved_model_half_plus_two"],
+ test_class = "org.tensorflow.SavedModelBundleTest",
+ deps = [
+ ":tensorflow",
+ ":testutil",
+ "@junit",
+ ],
+)
+
+java_test(
name = "SessionTest",
size = "small",
srcs = ["src/test/java/org/tensorflow/SessionTest.java"],
diff --git a/tensorflow/java/README.md b/tensorflow/java/README.md
index f3cf435686..26377ba0d2 100644
--- a/tensorflow/java/README.md
+++ b/tensorflow/java/README.md
@@ -121,7 +121,7 @@ libraries will need to be built from source.
```
The JAR (`libtensorflow.jar`) and native library (`libtensorflow_jni.so`) will
-be in `bazel-genfiles/tensorflow/tensorflow/java`.
+be in `bazel-bin/tensorflow/java`.
### Maven
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Graph.java b/tensorflow/java/src/main/java/org/tensorflow/Graph.java
index 6a1dd4c113..42d7f48464 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Graph.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Graph.java
@@ -30,6 +30,11 @@ public final class Graph implements AutoCloseable {
nativeHandle = allocate();
}
+ /** Create a Graph from an existing handle (takes ownership). */
+ Graph(long nativeHandle) {
+ this.nativeHandle = nativeHandle;
+ }
+
/**
* Release resources associated with the Graph.
*
diff --git a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
new file mode 100644
index 0000000000..c3938fe23f
--- /dev/null
+++ b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
@@ -0,0 +1,101 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow;
+
+/**
+ * SavedModelBundle represents a model loaded from storage.
+ *
+ * <p>The model consists of a description of the computation (a {@link Graph}), a {@link Session}
+ * with tensors (e.g., parameters or variables in the graph) initialized to values saved in storage,
+ * and a description of the model (a serialized representation of a <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/meta_graph.proto">MetaGraphDef
+ * protocol buffer</a>).
+ */
+public class SavedModelBundle implements AutoCloseable {
+
+ /**
+ * Load a saved model from an export directory.
+ *
+ * @param exportDir the directory path containing a saved model.
+ * @param tags the tags identifying the specific metagraphdef to load.
+ * @return a bundle containing the graph and associated session.
+ */
+ public static SavedModelBundle load(String exportDir, String... tags) {
+ return load(exportDir, tags, null);
+ }
+
+ /**
+ * Returns the serialized <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/meta_graph.proto">MetaGraphDef
+ * protocol buffer</a> associated with the saved model.
+ */
+ public byte[] metaGraphDef() {
+ return metaGraphDef;
+ }
+
+ /** Returns the graph that describes the computation performed by the model. */
+ public Graph graph() {
+ return graph;
+ }
+
+ /**
+ * Returns the {@link Session} with which to perform computation using the model.
+ *
+ * @return the initialized session
+ */
+ public Session session() {
+ return session;
+ }
+
+ /**
+ * Releases resources (the {@link Graph} and {@link Session}) associated with the saved model
+ * bundle.
+ */
+ @Override
+ public void close() {
+ session.close();
+ graph.close();
+ }
+
+ private final Graph graph;
+ private final Session session;
+ private final byte[] metaGraphDef;
+
+ private SavedModelBundle(Graph graph, Session session, byte[] metaGraphDef) {
+ this.graph = graph;
+ this.session = session;
+ this.metaGraphDef = metaGraphDef;
+ }
+
+ /**
+ * Create a SavedModelBundle object from a handle to the C TF_Graph object and to the C TF_Session
+ * object, plus the serialized MetaGraphDef.
+ *
+ * <p>Invoked from the native load method. Takes ownership of the handles.
+ */
+ private static SavedModelBundle fromHandle(
+ long graphHandle, long sessionHandle, byte[] metaGraphDef) {
+ Graph graph = new Graph(graphHandle);
+ Session session = new Session(graph, sessionHandle);
+ return new SavedModelBundle(graph, session, metaGraphDef);
+ }
+
+ private static native SavedModelBundle load(String exportDir, String[] tags, byte[] runOptions);
+
+ static {
+ TensorFlow.init();
+ }
+}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Session.java b/tensorflow/java/src/main/java/org/tensorflow/Session.java
index 575fe5901a..0d51297846 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Session.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Session.java
@@ -59,6 +59,13 @@ public final class Session implements AutoCloseable {
}
}
+ /** Wrap an existing session with the associated {@link Graph}. */
+ Session(Graph g, long nativeHandle) {
+ graph = g;
+ this.nativeHandle = nativeHandle;
+ graphRef = g.ref();
+ }
+
/**
* Release resources associated with the Session.
*
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Shape.java b/tensorflow/java/src/main/java/org/tensorflow/Shape.java
index ed74e9bb1b..f6677e9a15 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Shape.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Shape.java
@@ -77,7 +77,8 @@ public final class Shape {
}
/** Succint description of the shape meant for debugging. */
- @Override public String toString() {
+ @Override
+ public String toString() {
if (shape == null) {
return "<unknown>";
}
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
new file mode 100644
index 0000000000..de6382a79c
--- /dev/null
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
@@ -0,0 +1,107 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <limits>
+#include <memory>
+
+#include "tensorflow/c/c_api.h"
+#include "tensorflow/java/src/main/native/exception_jni.h"
+#include "tensorflow/java/src/main/native/saved_model_bundle_jni.h"
+
+JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
+ JNIEnv* env, jclass clazz, jstring export_dir, jobjectArray tags,
+ jbyteArray run_options) {
+ TF_Status* status = TF_NewStatus();
+ jobject bundle = nullptr;
+
+ // allocate parameters for TF_LoadSessionFromSavedModel
+ TF_SessionOptions* opts = TF_NewSessionOptions();
+ TF_Buffer* crun_options = nullptr;
+ if (run_options != nullptr) {
+ size_t sz = env->GetArrayLength(run_options);
+ if (sz > 0) {
+ jbyte* run_options_data = env->GetByteArrayElements(run_options, nullptr);
+ crun_options =
+ TF_NewBufferFromString(static_cast<void*>(run_options_data), sz);
+ env->ReleaseByteArrayElements(run_options, run_options_data, JNI_ABORT);
+ }
+ }
+ const char* cexport_dir = env->GetStringUTFChars(export_dir, nullptr);
+ std::unique_ptr<const char* []> tags_ptrs;
+ size_t tags_len = env->GetArrayLength(tags);
+ tags_ptrs.reset(new const char*[tags_len]);
+ for (size_t i = 0; i < tags_len; ++i) {
+ jstring tag = static_cast<jstring>(env->GetObjectArrayElement(tags, i));
+ tags_ptrs[i] = env->GetStringUTFChars(tag, nullptr);
+ env->DeleteLocalRef(tag);
+ }
+
+ // load the session
+ TF_Graph* graph = TF_NewGraph();
+ TF_Buffer* metagraph_def = TF_NewBuffer();
+ TF_Session* session = TF_LoadSessionFromSavedModel(
+ opts, crun_options, cexport_dir, tags_ptrs.get(), tags_len, graph,
+ metagraph_def, status);
+
+ // release the parameters
+ TF_DeleteSessionOptions(opts);
+ if (crun_options != nullptr) {
+ TF_DeleteBuffer(crun_options);
+ }
+ env->ReleaseStringUTFChars(export_dir, cexport_dir);
+ for (size_t i = 0; i < tags_len; ++i) {
+ jstring tag = static_cast<jstring>(env->GetObjectArrayElement(tags, i));
+ env->ReleaseStringUTFChars(tag, tags_ptrs[i]);
+ env->DeleteLocalRef(tag);
+ }
+
+ // handle the result
+ if (throwExceptionIfNotOK(env, status)) {
+ // sizeof(jsize) is less than sizeof(size_t) on some platforms.
+ if (metagraph_def->length > std::numeric_limits<jint>::max()) {
+ throwException(
+ env, kIndexOutOfBoundsException,
+ "MetaGraphDef is too large to serialize into a byte[] array");
+ } else {
+ static_assert(sizeof(jbyte) == 1, "unexpected size of the jbyte type");
+ jint jmetagraph_len = static_cast<jint>(metagraph_def->length);
+ jbyteArray jmetagraph_def = env->NewByteArray(jmetagraph_len);
+ env->SetByteArrayRegion(jmetagraph_def, 0, jmetagraph_len,
+ static_cast<const jbyte*>(metagraph_def->data));
+
+ jmethodID method = env->GetStaticMethodID(
+ clazz, "fromHandle", "(JJ[B)Lorg/tensorflow/SavedModelBundle;");
+ bundle = env->CallStaticObjectMethod(
+ clazz, method, reinterpret_cast<jlong>(graph),
+ reinterpret_cast<jlong>(session), jmetagraph_def);
+ graph = nullptr;
+ session = nullptr;
+ env->DeleteLocalRef(jmetagraph_def);
+ }
+ }
+
+ if (session != nullptr) {
+ TF_CloseSession(session, status);
+ // Result of close is ignored, delete anyway.
+ TF_DeleteSession(session, status);
+ }
+ if (graph != nullptr) {
+ TF_DeleteGraph(graph);
+ }
+ TF_DeleteBuffer(metagraph_def);
+ TF_DeleteStatus(status);
+
+ return bundle;
+}
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.h b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
new file mode 100644
index 0000000000..6cce6a81bd
--- /dev/null
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
@@ -0,0 +1,37 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_JAVA_SAVEDMODELBUNDLE_JNI_H_
+#define TENSORFLOW_JAVA_SAVEDMODELBUNDLE_JNI_H_
+
+#include <jni.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Class: org_tensorflow_SavedModelBundle
+ * Method: load
+ * Signature:
+ * (Ljava/lang/String;[Ljava/lang/String;[B)Lorg/tensorflow/SavedModelBundle;
+ */
+JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
+ JNIEnv *, jclass, jstring, jobjectArray, jbyteArray);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+#endif // TENSORFLOW_JAVA_SAVEDMODELBUNDLE_JNI_H_
diff --git a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
new file mode 100644
index 0000000000..7922f3329c
--- /dev/null
+++ b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
@@ -0,0 +1,53 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Unit tests for {@link org.tensorflow.SavedModelBundle}. */
+@RunWith(JUnit4.class)
+public class SavedModelBundleTest {
+
+ private static final String SAVED_MODEL_PATH =
+ "tensorflow/cc/saved_model/testdata/half_plus_two/00000123";
+
+ @Test
+ public void load() {
+ try (SavedModelBundle bundle = SavedModelBundle.load(SAVED_MODEL_PATH, "serve")) {
+ assertNotNull(bundle.session());
+ assertNotNull(bundle.graph());
+ assertNotNull(bundle.metaGraphDef());
+ }
+ }
+
+ @Test
+ public void loadNonExistentBundle() {
+ try {
+ SavedModelBundle bundle = SavedModelBundle.load("__BAD__", "serve");
+ bundle.close();
+ fail("not expected");
+ } catch (org.tensorflow.TensorFlowException e) {
+ // expected exception
+ assertTrue(e.getMessage().contains("SavedModel not found"));
+ }
+ }
+}
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 7c67354c8b..08572955d9 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -265,7 +265,9 @@ class Tensor(_TensorLike):
# Unary.
"__invert__",
"__neg__",
- "__abs__"
+ "__abs__",
+ "__matmul__",
+ "__rmatmul__"
}
def __init__(self, op, value_index, dtype):
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index f65bb4c10e..e9e10ccb67 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -54,7 +54,6 @@ def gpu_device_name():
return x.name
return ""
-
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py
index 69d4198877..bfdd896422 100644
--- a/tensorflow/python/kernel_tests/matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/matmul_op_test.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import numpy as np
+import operator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
@@ -152,6 +153,59 @@ class MatMulStatsTest(test_lib.TestCase):
self.assertEqual(7200, flops)
+try:
+ # @ operator supported since python 3.5.
+ infix_matmul = operator.matmul
+except AttributeError:
+ # For earlier versions of python, emulate regular behavior.
+ # Useful to build and test for 3.5+ on earlier versions.
+ def infix_matmul(x, y):
+ try:
+ r = type(x).__matmul__(x, y)
+ except AttributeError:
+ r = NotImplemented
+ if r is NotImplemented and type(x) is not type(y):
+ try:
+ r = type(y).__rmatmul__(y, x)
+ except AttributeError:
+ r = NotImplemented
+ if r is NotImplemented:
+ raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
+ .format(type(x).__name__, type(y).__name__))
+ return r
+
+
+class MatMulInfixOperatorTest(test_lib.TestCase):
+
+ def testMismatchedShape(self):
+ with self.assertRaisesWithPredicateMatch(ValueError,
+ lambda e: "Shape must" in str(e)):
+ infix_matmul(
+ ops.convert_to_tensor([10.0, 20.0, 30.0]),
+ ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
+
+ def testMismatchedDimensions(self):
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Dimensions must" in str(e)):
+ infix_matmul(
+ ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
+ ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
+
+ def testInfixMatmulIsTfMatmul(self):
+ a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
+ b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
+ c = infix_matmul(a, b)
+ self.assertEqual(c.op.type, "MatMul")
+
+ def testInfixMatmulDoesDotProduct(self):
+ a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
+ b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
+ c = infix_matmul(a, b)
+ d = math_ops.matmul(a, b)
+ with self.test_session():
+ self.assertAllEqual(c.eval(), d.eval())
+
+
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
index fb27782562..2b56054629 100644
--- a/tensorflow/python/kernel_tests/variable_scope_test.py
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -721,6 +721,57 @@ class VariableScopeTest(test.TestCase):
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
+ def testGetCollection(self):
+ with self.test_session():
+ a = variable_scope.get_variable("a", [])
+ b = variable_scope.get_variable("b", [], trainable=False)
+ with variable_scope.variable_scope("foo_") as scope1:
+ a = variable_scope.get_variable("a", [])
+ b = variable_scope.get_variable("b", [], trainable=False)
+ self.assertEqual([
+ v.name
+ for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ ], ["foo_/a:0"])
+ self.assertEqual([
+ v.name
+ for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ ], ["foo_/a:0", "foo_/b:0"])
+ with variable_scope.variable_scope("foo") as scope2:
+ a = variable_scope.get_variable("a", [])
+ b = variable_scope.get_variable("b", [], trainable=False)
+ self.assertEqual([
+ v.name
+ for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ ], ["foo/a:0"])
+ self.assertEqual([
+ v.name
+ for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ ], ["foo/a:0", "foo/b:0"])
+ scope = variable_scope.get_variable_scope()
+ self.assertEqual([
+ v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ ], ["a:0", "b:0", "foo_/a:0", "foo_/b:0", "foo/a:0", "foo/b:0"])
+ self.assertEqual([
+ v.name
+ for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+ ], ["a:0", "foo_/a:0", "foo/a:0"])
+
+ def testGetTrainableVariables(self):
+ with self.test_session():
+ a = variable_scope.get_variable("a", [])
+ with variable_scope.variable_scope("foo") as scope:
+ b = variable_scope.get_variable("b", [])
+ c = variable_scope.get_variable("c", [], trainable=False)
+ self.assertEqual([v.name
+ for v in scope.trainable_variables()], ["foo/b:0"])
+
+ def testGetGlobalVariables(self):
+ with self.test_session():
+ a = variable_scope.get_variable("a", [])
+ with variable_scope.variable_scope("foo") as scope:
+ b = variable_scope.get_variable("b", [])
+ self.assertEqual([v.name
+ for v in scope.global_variables()], ["foo/b:0"])
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index 48be8cff4f..0f4d11dd3f 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -318,6 +318,10 @@ class VariablesTestCase(test.TestCase):
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
+ var_m = variables.Variable([[2.0, 3.0]])
+ matmul = var_m.__matmul__([[10.0], [20.0]])
+ rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
+
variables.global_variables_initializer().run()
self.assertAllClose([2.0], add.eval())
self.assertAllClose([3.0], radd.eval())
@@ -348,6 +352,9 @@ class VariablesTestCase(test.TestCase):
self.assertAllClose(rnd[2, 0:0], slice_v.eval())
+ self.assertAllClose([[80.0]], matmul.eval())
+ self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], rmatmul.eval())
+
def testSession(self):
with self.test_session() as sess:
var = variables.Variable([1, 12])
diff --git a/tensorflow/python/layers/core.py b/tensorflow/python/layers/core.py
index 92177f932e..fc027ca95c 100644
--- a/tensorflow/python/layers/core.py
+++ b/tensorflow/python/layers/core.py
@@ -133,24 +133,17 @@ class Dense(base._Layer): # pylint: disable=protected-access
def call(self, inputs):
shape = inputs.get_shape().as_list()
- input_dim = shape[-1]
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
- # Reshape the input to 2D.
- output_shape_tensors = array_ops.unstack(array_ops.shape(inputs))
- output_shape_tensors[-1] = self.units
- output_shape_tensor = array_ops.stack(output_shape_tensors)
- inputs = array_ops.reshape(inputs, [-1, input_dim])
-
- outputs = standard_ops.matmul(inputs, self.kernel)
- if self.use_bias:
- outputs = nn.bias_add(outputs, self.bias)
-
- if len(output_shape) > 2:
+ # Broadcasting is required for the inputs.
+ outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
+ [0]])
# Reshape the output back to the original ndim of the input.
- outputs = array_ops.reshape(outputs, output_shape_tensor)
outputs.set_shape(output_shape)
-
+ else:
+ outputs = standard_ops.matmul(inputs, self.kernel)
+ if self.use_bias:
+ outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 308e762320..80170ef021 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -1693,7 +1693,7 @@ def cond(pred, fn1, fn2, name=None):
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
- fn2: The callable to be performed if pref is false.
+ fn2: The callable to be performed if pred is false.
name: Optional name prefix for the returned tensors.
Returns:
diff --git a/tensorflow/python/ops/functional_ops.py b/tensorflow/python/ops/functional_ops.py
index 5c012704b9..413c29850e 100644
--- a/tensorflow/python/ops/functional_ops.py
+++ b/tensorflow/python/ops/functional_ops.py
@@ -210,7 +210,7 @@ def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
- The simplest version of `map` repeatedly applies the callable `fn` to a
+ The simplest version of `map_fn` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index c3cddb48f5..f81fc9edd6 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -1702,6 +1702,12 @@ def matmul(a,
[229 244]],
[[508 532]
[697 730]]]
+
+ # Since python >= 3.5 the @ operator is supported (see PEP 465).
+ # In TensorFlow, it simply calls the `tf.matmul()` function, so the
+ # following lines are equivalent:
+ d = a @ b @ [[10.], [11.]]
+ d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
@@ -1788,6 +1794,8 @@ def matmul(a,
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
+_OverrideBinaryOperatorHelper(matmul, "matmul")
+
sparse_matmul = gen_math_ops._sparse_mat_mul
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index 6129c4ed93..a83fb9fb48 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -305,8 +305,6 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
- dtype: (optional) The data type for the initial state. Required if
- either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index 8637d7513b..1de95f1291 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -683,6 +683,9 @@ class _VariableStore(object):
init_val = initializer
variable_dtype = None
else:
+ # Instantiate initializer if provided initializer is a type object.
+ if isinstance(initializer, type(init_ops.Initializer)):
+ initializer = initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
@@ -881,6 +884,19 @@ class VariableScope(object):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
+ def get_collection(self, name):
+ """Get this scope's variables."""
+ scope = self._name + "/" if self._name else ""
+ return ops.get_collection(name, scope)
+
+ def trainable_variables(self):
+ """Get this scope's trainable variables."""
+ return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
+
+ def global_variables(self):
+ """Get this scope's global variables."""
+ return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+
def get_variable(self,
var_store,
name,
diff --git a/tensorflow/python/training/basic_loops.py b/tensorflow/python/training/basic_loops.py
index af300db4e7..52b0f42106 100644
--- a/tensorflow/python/training/basic_loops.py
+++ b/tensorflow/python/training/basic_loops.py
@@ -34,7 +34,7 @@ def basic_train_loop(supervisor, train_step_fn, args=None,
typically runs one training step in the session.
Args:
- supervisor: `tf.Supervisor` to run the training services.
+ supervisor: `tf.train.Supervisor` to run the training services.
train_step_fn: Callable to execute one training step. Called
repeatedly as `train_step_fn(session, *args **kwargs)`.
args: Optional positional arguments passed to `train_step_fn`.
diff --git a/tensorflow/python/training/monitored_session.py b/tensorflow/python/training/monitored_session.py
index 8c4dc0cad8..fa5d2121ec 100644
--- a/tensorflow/python/training/monitored_session.py
+++ b/tensorflow/python/training/monitored_session.py
@@ -67,7 +67,7 @@ class Scaffold(object):
The following pieces are directly accessible as attributes of the `Scaffold`
object:
- * `saver`: A `tf.Saver` object taking care of saving the variables. Picked
+ * `saver`: A `tf.train.Saver` object taking care of saving the variables. Picked
from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
@@ -124,7 +124,7 @@ class Scaffold(object):
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
- saver: Optional `tf.Saver` object to use to save and restore variables.
+ saver: Optional `tf.train.Saver` object to use to save and restore variables.
"""
# NOTE(touts): modifying the init function to be passed the scaffold is a
diff --git a/tensorflow/stream_executor/stream.cc b/tensorflow/stream_executor/stream.cc
index f28f965f2c..76cbf0b1b6 100644
--- a/tensorflow/stream_executor/stream.cc
+++ b/tensorflow/stream_executor/stream.cc
@@ -163,6 +163,7 @@ string ToVlogString(dnn::DepthToSpaceLayout depth_to_space_layout) {
case dnn::DepthToSpaceLayout::DepthHeightWidth:
return "DepthToSpaceLayout::DepthHeightWidth";
}
+ return "unknown DepthToSpaceLayout";
}
// Used together with PARAM to VLOG calls made to the stream. Intended
diff --git a/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
index 16ff229c59..79cf1844f2 100644
--- a/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
@@ -5,7 +5,15 @@ MAINTAINER Jan Prach <jendap@google.com>
# Copy and run the install scripts.
COPY install/*.sh /install/
RUN /install/install_bootstrap_deb_packages.sh
-RUN echo "deb http://http.debian.net/debian jessie-backports main" | tee -a /etc/apt/sources.list
+RUN echo "deb http://http.debian.net/debian jessie-backports main" | \
+ tee -a /etc/apt/sources.list
+# Workaround bug in Jessie backport repository deb packages
+# http://serverfault.com/questions/830636/cannot-install-openjdk-8-jre-headless-on-debian-jessie
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends -t jessie-backports \
+ openjdk-8-jre-headless ca-certificates-java && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
RUN /install/install_deb_packages.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index c2ad03bc6f..7aadfd280e 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -117,8 +117,7 @@ DOCKER_MAIN_CMD="${CI_BUILD_DIR}/ci_build.sh"
NO_DOCKER_MAIN_CMD="${CI_BUILD_DIR}/builds/configured"
# Additional option flags to apply when Docker is unavailable (e.g., on Mac)
-NO_DOCKER_OPT_FLAG="--linkopt=-headerpad_max_install_names "\
-"--genrule_strategy=standalone"
+NO_DOCKER_OPT_FLAG="--genrule_strategy=standalone"
DO_DOCKER=1
diff --git a/tensorflow/tools/ci_build/update_version.sh b/tensorflow/tools/ci_build/update_version.sh
index da8b956c2a..cde0ab7909 100755
--- a/tensorflow/tools/ci_build/update_version.sh
+++ b/tensorflow/tools/ci_build/update_version.sh
@@ -85,16 +85,6 @@ check_existence file "${SETUP_PY}"
sed -i -e "s/^\_VERSION = [\'\"].*[\'\"]/\_VERSION = \'${MAJOR}.${MINOR}.${PATCH}\'/g" "${SETUP_PY}"
-# Update os_setup.md
-OS_SETUP="${TF_SRC_DIR}/g3doc/get_started/os_setup.md"
-check_existence file "${OS_SETUP}"
-
-sed -i -r -e "s/(.*pip[0-9]* install .*tensorflow-)([0-9]+\.[0-9]+\.[[:alnum:]]+)(-.*\.whl)/\1${MAJOR}.${MINOR}.${PIP_PATCH}\3/g" "${OS_SETUP}"
-sed -i -r -e "s/(.*pip[0-9]* install .*tensorflow_gpu-)([0-9]+\.[0-9]+\.[[:alnum:]]+)(-.*\.whl)/\1${MAJOR}.${MINOR}.${PIP_PATCH}\3/g" "${OS_SETUP}"
-sed -i -r -e "s/(.*export TF_BINARY_URL.*tensorflow-)([0-9]+\.[0-9]+\.[[:alnum:]]+)(-.*\.whl)/\1${MAJOR}.${MINOR}.${PIP_PATCH}\3/g" "${OS_SETUP}"
-sed -i -r -e "s/(.*export TF_BINARY_URL.*tensorflow_gpu-)([0-9]+\.[0-9]+\.[[:alnum:]]+)(-.*\.whl)/\1${MAJOR}.${MINOR}.${PIP_PATCH}\3/g" "${OS_SETUP}"
-sed -i -r -e "s/(.*\`)([0-9]+\.[0-9]+\.[[:alnum:]-]+)(-gpu.*)/\1${MAJOR}.${MINOR}.${PATCH}\3/g" "${OS_SETUP}"
-
# Update README.md
README_MD="./README.md"
diff --git a/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh b/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
index 7bab520f61..9d59a6eb51 100755
--- a/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
+++ b/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
@@ -76,14 +76,12 @@ zip -j ${DIR}/libtensorflow_jni-cpu-windows-$(uname -m).zip \
rm -f ${DIR}/tensorflow_jni.dll
# Zip up the .dll, LICENSE and include files for the C library.
+mkdir -p ${DIR}/include/tensorflow/c
+mkdir -p ${DIR}/lib
+cp bazel-bin/tensorflow/libtensorflow.so ${DIR}/lib/tensorflow.dll
+cp tensorflow/c/c_api.h ${DIR}/include/tensorflow/c
+cp bazel-genfiles/tensorflow/tools/lib_package/include/tensorflow/c/LICENSE ${DIR}/include/tensorflow/c
cd ${DIR}
-mkdir -p include/tensorflow/c
-mkdir -p lib
-cp bazel-bin/tensorflow/libtensorflow.so lib/tensorflow.dll
-cp tensorflow/c/c_api.h include/tensorflow/c
-cp ../bazel-genfiles/tensorflow/tools/lib_package/include/tensorflow/c/LICENSE include/tensorflow/c
-zip libtensorflow-cpu-windows-$(uname -m).zip lib/* include/*
-# Zip up the .dll, LICENSE and header files for the C library.
zip -j libtensorflow-cpu-windows-$(uname -m).zip \
lib/tensorflow.dll \
include/c/c_api.h \
diff --git a/tensorflow/tools/dist_test/local_test.sh b/tensorflow/tools/dist_test/local_test.sh
index f9f37ff0e1..f536beef54 100755
--- a/tensorflow/tools/dist_test/local_test.sh
+++ b/tensorflow/tools/dist_test/local_test.sh
@@ -151,6 +151,8 @@ rm -rf "${BUILD_DIR}"
# Run docker image for test.
docker run ${DOCKER_IMG_NAME} \
/var/tf_dist_test/scripts/dist_mnist_test.sh \
- --ps_hosts "localhost:2000,localhost:2001" \
- --worker_hosts "localhost:3000,localhost:3001" \
+ --ps_hosts $(seq -f "localhost:%g" -s "," \
+ 2000 $((2000 + ${NUM_PARAMETER_SERVERS} - 1))) \
+ --worker_hosts $(seq -f "localhost:%g" -s "," \
+ 3000 $((3000 + ${NUM_WORKERS} - 1))) \
--num_gpus 0 ${SYNC_REPLICAS_FLAG}
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
index 77fd8fc0d4..3e45ae362c 100644
--- a/tensorflow/tools/docker/README.md
+++ b/tensorflow/tools/docker/README.md
@@ -10,16 +10,16 @@ General installation instructions are
quick links here:
* [OSX](https://www.docker.com/products/docker#/mac)
-* [ubuntu](https://docs.docker.com/engine/installation/linux/ubuntulinux/)
+* [Ubuntu](https://docs.docker.com/engine/installation/linux/ubuntulinux/)
## Which containers exist?
-We currently maintain three Docker container images:
+We currently maintain two Docker container images:
* `gcr.io/tensorflow/tensorflow` - TensorFlow with all dependencies - CPU only!
* `gcr.io/tensorflow/tensorflow:latest-gpu` - TensorFlow with all dependencies
- and support for Nvidia Cuda
+ and support for NVidia CUDA
Note: We also publish the same containers into
[Docker Hub](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
@@ -37,9 +37,9 @@ For GPU support install NVidia drivers (ideally latest) and
$ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
-Note: If you would have a problem running nvidia-docker you may try the old way
-we have used. But it is not recomended. If you find a bug in nvidia-docker report
-it there please and try using the nvidia-docker as described above.
+Note: If you would have a problem running nvidia-docker you may try the old method
+we have used. But it is not recommended. If you find a bug in nvidia-docker, please report
+it there and try using nvidia-docker as described above.
$ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
@@ -49,7 +49,7 @@ it there please and try using the nvidia-docker as described above.
## More containers
See all available [tags](https://hub.docker.com/r/tensorflow/tensorflow/tags/)
-for additional containers like release candidates or nighlty builds.
+for additional containers, such as release candidates or nightly builds.
## Rebuilding the containers
diff --git a/tensorflow/tools/docs/parser.py b/tensorflow/tools/docs/parser.py
index 51f6adac1e..901b3db22b 100644
--- a/tensorflow/tools/docs/parser.py
+++ b/tensorflow/tools/docs/parser.py
@@ -586,7 +586,7 @@ def _generate_signature(func, reverse_index):
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
- 'saver_pb2.SaverDef': 'tf.SaverDef',
+ 'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index 6a3f66b0fa..a8b4fbf414 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -3,7 +3,11 @@
package(default_visibility = ["//visibility:private"])
-load("//tensorflow:tensorflow.bzl", "transitive_hdrs")
+load(
+ "//tensorflow:tensorflow.bzl",
+ "if_not_windows",
+ "transitive_hdrs",
+)
load("//third_party/mkl:build_defs.bzl", "if_mkl")
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_license_deps")
@@ -93,13 +97,14 @@ filegroup(
"@libxsmm_archive//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
"@nanopb_git//:LICENSE.txt",
- "@nccl_archive//:LICENSE.txt",
"@png_archive//:LICENSE",
"@protobuf//:LICENSE",
"@six_archive//:LICENSE",
"@org_pocoo_werkzeug//:LICENSE",
"@zlib_archive//:zlib.h",
- ] + tf_additional_license_deps(),
+ ] + if_not_windows([
+ "@nccl_archive//:LICENSE.txt",
+ ]) + tf_additional_license_deps(),
)
sh_binary(
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 9b229704d6..c7ca83f282 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.0.0'
+_VERSION = '1.0.1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 2fb415499d..1caaeb7015 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -231,11 +231,11 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
native.new_http_archive(
name = "gmock_archive",
urls = [
- "http://bazel-mirror.storage.googleapis.com/pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
- "http://pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
+ "http://bazel-mirror.storage.googleapis.com/github.com/google/googletest/archive/release-1.8.0.zip",
+ "https://github.com/google/googletest/archive/release-1.8.0.zip",
],
- sha256 = "26fcbb5925b74ad5fc8c26b0495dfc96353f4d553492eb97e85a8a6d2f43095b",
- strip_prefix = "gmock-1.7.0",
+ sha256 = "f3ed3b58511efd272eb074a3a6d6fb79d7c2e6a0e374323d1e6bcbcc1ef141bf",
+ strip_prefix = "googletest-release-1.8.0",
build_file = str(Label("//third_party:gmock.BUILD")),
)
@@ -286,6 +286,7 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
],
strip_prefix = "curl-7.49.1",
build_file = str(Label("//third_party:curl.BUILD")),
+ repository = tf_repo_name
)
# grpc expects //external:protobuf_clib and //external:protobuf_compiler
diff --git a/third_party/curl.BUILD b/third_party/curl.BUILD
index 557c2885eb..43f6599acc 100644
--- a/third_party/curl.BUILD
+++ b/third_party/curl.BUILD
@@ -204,13 +204,13 @@ cc_library(
"lib/wildcard.h",
"lib/x509asn1.h",
] + select({
- "@//tensorflow:darwin": [
+ "@%ws%//tensorflow:darwin": [
"lib/vtls/darwinssl.c",
],
- "@//tensorflow:ios": [
+ "@%ws%//tensorflow:ios": [
"lib/vtls/darwinssl.c",
],
- "@//tensorflow:windows": [
+ "@%ws%//tensorflow:windows": [
"lib/asyn-thread.c",
"lib/inet_ntop.c",
"lib/system_win32.c",
@@ -231,7 +231,7 @@ cc_library(
"include/curl/typecheck-gcc.h",
],
copts = select({
- "@//tensorflow:windows": [
+ "@%ws%//tensorflow:windows": [
"/I%prefix%/curl/lib",
"/DHAVE_CONFIG_H",
"/DCURL_DISABLE_FTP",
@@ -255,10 +255,10 @@ cc_library(
"-Wno-string-plus-int",
],
}) + select({
- "@//tensorflow:darwin": [
+ "@%ws%//tensorflow:darwin": [
"-fno-constant-cfstrings",
],
- "@//tensorflow:windows": [
+ "@%ws%//tensorflow:windows": [
# See curl.h for discussion of write size and Windows
"/DCURL_MAX_WRITE_SIZE=16384",
],
@@ -268,17 +268,17 @@ cc_library(
}),
includes = ["include"],
linkopts = select({
- "@//tensorflow:android": [
+ "@%ws%//tensorflow:android": [
"-pie",
],
- "@//tensorflow:darwin": [
+ "@%ws%//tensorflow:darwin": [
"-Wl,-framework",
"-Wl,CoreFoundation",
"-Wl,-framework",
"-Wl,Security",
],
- "@//tensorflow:ios": [],
- "@//tensorflow:windows": [
+ "@%ws%//tensorflow:ios": [],
+ "@%ws%//tensorflow:windows": [
"ws2_32.lib",
],
"//conditions:default": [
@@ -289,8 +289,8 @@ cc_library(
deps = [
"@zlib_archive//:zlib",
] + select({
- "@//tensorflow:ios": [],
- "@//tensorflow:windows": [],
+ "@%ws%//tensorflow:ios": [],
+ "@%ws%//tensorflow:windows": [],
"//conditions:default": [
"@boringssl//:ssl",
],
@@ -386,7 +386,7 @@ cc_binary(
"src/tool_xattr.h",
],
copts = select({
- "@//tensorflow:windows": [
+ "@%ws%//tensorflow:windows": [
"/I%prefix%/curl/lib",
"/DHAVE_CONFIG_H",
"/DCURL_DISABLE_LIBCURL_OPTION",
diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint b/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
index b0a73aac79..eb604d38b1 100644
--- a/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
+++ b/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
@@ -31,7 +31,7 @@
#include "src/FixedPoint/FixedPointTypes.h"
// Use optimized implementations whenever available
-#ifdef EIGEN_VECTORIZE_AVX512
+#if defined (EIGEN_VECTORIZE_AVX512DQ) || defined (EIGEN_VECTORIZE_AVX512BW)
#include "src/FixedPoint/PacketMathAVX512.h"
#include "src/FixedPoint/TypeCastingAVX512.h"
diff --git a/third_party/gmock.BUILD b/third_party/gmock.BUILD
index 501e322529..b800cac954 100644
--- a/third_party/gmock.BUILD
+++ b/third_party/gmock.BUILD
@@ -9,19 +9,19 @@ exports_files(["LICENSE"])
cc_library(
name = "gtest",
srcs = [
- "gtest/src/gtest-all.cc",
- "src/gmock-all.cc",
+ "googlemock/src/gmock-all.cc",
+ "googletest/src/gtest-all.cc",
],
hdrs = glob([
"**/*.h",
- "gtest/src/*.cc",
- "src/*.cc",
+ "googletest/src/*.cc",
+ "googlemock/src/*.cc",
]),
includes = [
- ".",
- "gtest",
- "gtest/include",
- "include",
+ "googlemock",
+ "googlemock/include",
+ "googletest",
+ "googletest/include",
],
linkopts = ["-pthread"],
visibility = ["//visibility:public"],
@@ -29,7 +29,7 @@ cc_library(
cc_library(
name = "gtest_main",
- srcs = ["src/gmock_main.cc"],
+ srcs = ["googlemock/src/gmock_main.cc"],
linkopts = ["-pthread"],
visibility = ["//visibility:public"],
deps = [":gtest"],