aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--README.md8
-rw-r--r--RELEASE.md4
-rwxr-xr-xconfigure7
-rw-r--r--tensorflow/BUILD6
-rw-r--r--tensorflow/contrib/cmake/README.md30
-rw-r--r--tensorflow/contrib/cmake/setup.py2
-rw-r--r--tensorflow/contrib/cmake/tf_core_kernels.cmake2
-rw-r--r--tensorflow/contrib/cmake/tf_core_ops.cmake2
-rw-r--r--tensorflow/contrib/cmake/tf_python.cmake22
-rw-r--r--tensorflow/contrib/cmake/tf_tests.cmake4
-rw-r--r--tensorflow/contrib/layers/python/layers/layers_test.py14
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator_test.py1
-rw-r--r--tensorflow/contrib/learn/python/learn/export_strategy.py3
-rw-r--r--tensorflow/contrib/learn/python/learn/utils/checkpoints.py51
-rw-r--r--tensorflow/contrib/makefile/tf_op_files.txt1
-rw-r--r--tensorflow/contrib/metrics/BUILD2
-rw-r--r--tensorflow/contrib/slim/README.md41
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py2
-rw-r--r--tensorflow/core/BUILD1
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_allocator.cc6
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_allocator.h1
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_device.cc31
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_device.h7
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_device_context.cc1
-rw-r--r--tensorflow/core/distributed_runtime/master_session.cc2
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc2
-rw-r--r--tensorflow/core/framework/tensor.proto7
-rw-r--r--tensorflow/core/kernels/argmax_op.cc16
-rw-r--r--tensorflow/core/kernels/constant_op.cc37
-rw-r--r--tensorflow/core/kernels/cwise_op_digamma.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_div.cc1
-rw-r--r--tensorflow/core/kernels/cwise_op_erf.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_erfc.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_igammas.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_lgamma.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_zeta.cc2
-rw-r--r--tensorflow/core/kernels/cwise_ops_sycl_common.h1
-rw-r--r--tensorflow/core/kernels/dense_update_ops.cc3
-rw-r--r--tensorflow/core/kernels/dynamic_stitch_op.cc14
-rw-r--r--tensorflow/core/kernels/fill_functor.cc13
-rw-r--r--tensorflow/core/kernels/fill_functor.h9
-rw-r--r--tensorflow/core/kernels/function_ops.cc8
-rw-r--r--tensorflow/core/kernels/identity_op.cc23
-rw-r--r--tensorflow/core/kernels/reshape_op.cc21
-rw-r--r--tensorflow/core/kernels/sequence_ops.cc6
-rw-r--r--tensorflow/core/kernels/shape_ops.cc62
-rw-r--r--tensorflow/core/kernels/training_ops.cc19
-rw-r--r--tensorflow/core/ops/math_grad_test.cc3
-rw-r--r--tensorflow/core/ops/math_ops.cc11
-rw-r--r--tensorflow/core/ops/math_ops_test.cc8
-rw-r--r--tensorflow/core/platform/cpu_info.h4
-rw-r--r--tensorflow/core/platform/windows/cpu_info.h (renamed from tensorflow/python/lib/core/status_helper.i)25
-rw-r--r--tensorflow/examples/tutorials/monitors/iris_monitors.py21
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnv.md4
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md2
-rw-r--r--tensorflow/g3doc/api_docs/leftnav_files1
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md244
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md17
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md49
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md237
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md521
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md22
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md26
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md27
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md207
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.summary.merge.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md37
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/state_ops.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/summary.md483
-rw-r--r--tensorflow/g3doc/api_docs/python/test.md521
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md103
-rw-r--r--tensorflow/g3doc/how_tos/tool_developers/index.md8
-rw-r--r--tensorflow/g3doc/resources/bib.md16
-rw-r--r--tensorflow/g3doc/resources/index.md7
-rw-r--r--tensorflow/g3doc/tutorials/estimators/index.md8
-rw-r--r--tensorflow/g3doc/tutorials/mnist/tf/index.md2
-rw-r--r--tensorflow/python/BUILD1
-rw-r--r--tensorflow/python/client/device_lib.i1
-rw-r--r--tensorflow/python/debug/BUILD1
-rw-r--r--tensorflow/python/framework/dtypes.py6
-rw-r--r--tensorflow/python/kernel_tests/argmax_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/atrous_convolution_test.py4
-rw-r--r--tensorflow/python/kernel_tests/bias_op_test.py6
-rw-r--r--tensorflow/python/kernel_tests/conv2d_transpose_test.py12
-rw-r--r--tensorflow/python/kernel_tests/pool_test.py4
-rw-r--r--tensorflow/python/kernel_tests/seq2seq_test.py770
-rw-r--r--tensorflow/python/kernel_tests/variable_scope_test.py37
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py8
-rw-r--r--tensorflow/python/ops/init_ops.py3
-rw-r--r--tensorflow/python/ops/nn_fused_batchnorm_test.py24
-rw-r--r--tensorflow/python/ops/variable_scope.py47
-rw-r--r--tensorflow/python/ops/variables.py42
-rw-r--r--tensorflow/python/saved_model/BUILD2
-rw-r--r--tensorflow/python/summary/summary.py2
-rw-r--r--tensorflow/python/tools/BUILD2
-rw-r--r--tensorflow/python/training/momentum.py4
-rw-r--r--tensorflow/stream_executor/cuda/cuda_gpu_executor.cc5
-rw-r--r--tensorflow/tensorflow.bzl24
-rwxr-xr-xtensorflow/tools/ci_build/builds/test_tutorials.sh20
-rwxr-xr-xtensorflow/tools/ci_build/linux/libtensorflow_gpu.sh22
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh201
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/common_env.sh57
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh104
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat2
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh40
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.bat1
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh60
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh70
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/pip/run.bat1
-rwxr-xr-xtensorflow/tools/docker/parameterized_docker_build.sh9
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rwxr-xr-xthird_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl12
-rw-r--r--third_party/gpus/cuda_configure.bzl13
-rwxr-xr-xthird_party/sycl/crosstool/computecpp.tpl42
-rw-r--r--third_party/sycl/sycl_configure.bzl11
-rw-r--r--tools/bazel.rc.template3
121 files changed, 2586 insertions, 2232 deletions
diff --git a/README.md b/README.md
index 88c1ad1f94..f8f5de8e0e 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/RELEASE.md b/RELEASE.md
index 16d3b76502..931c66398e 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -61,9 +61,7 @@
acceleration. Known limitations include: It is not currently possible to load
a custom op library. The GCS and HDFS file systems are not currently
supported. The following ops are not currently implemented:
- DepthwiseConv2dNative, DepthwiseConv2dNativeBackpropFilter,
- DepthwiseConv2dNativeBackpropInput, Dequantize, Digamma, Erf, Erfc, Igamma,
- Igammac, Lgamma, Polygamma, QuantizeAndDequantize, QuantizedAvgPool,
+ Dequantize, QuantizeAndDequantize, QuantizedAvgPool,
QuantizedBatchNomWithGlobalNormalization, QuantizedBiasAdd, QuantizedConcat,
QuantizedConv2D, QuantizedMatmul, QuantizedMaxPool,
QuantizeDownAndShrinkRange, QuantizedRelu, QuantizedRelu6, QuantizedReshape,
diff --git a/configure b/configure
index 65a11ec582..d6d3e19afa 100755
--- a/configure
+++ b/configure
@@ -24,7 +24,8 @@ function bazel_clean_and_fetch() {
if ! is_windows; then
bazel clean --expunge
fi
- bazel fetch //tensorflow/...
+ # TODO(https://github.com/bazelbuild/bazel/issues/2220) Remove the nested `bazel query`.
+ bazel fetch $(bazel query "//tensorflow/... -//tensorflow/examples/android/...")
}
## Set up python-related environment settings
@@ -279,7 +280,7 @@ while true; do
TF_CUDNN_VERSION=${BASH_REMATCH[1]}
echo "libcudnn.so resolves to libcudnn${TF_CUDNN_EXT}"
elif [[ "$REALVAL" =~ ([0-9]*).dylib ]]; then
- TF_CUDNN_EXT=${BASH_REMATCH[1]}".dylib"
+ TF_CUDNN_EXT="."${BASH_REMATCH[1]}".dylib"
TF_CUDNN_VERSION=${BASH_REMATCH[1]}
echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
fi
@@ -435,7 +436,7 @@ while true; do
# Point to ComputeCpp root
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
default_computecpp_toolkit_path=/usr/local/computecpp
- read -p "Please specify the location where ComputeCpp $TF_OPENCL_VERSION is installed. Refer to README.md for more details. [Default is $default_computecpp_toolkit_path]: " COMPUTECPP_TOOLKIT_PATH
+ read -p "Please specify the location where ComputeCpp for SYCL $TF_OPENCL_VERSION is installed. [Default is $default_computecpp_toolkit_path]: " COMPUTECPP_TOOLKIT_PATH
fromuser="1"
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
COMPUTECPP_TOOLKIT_PATH=$default_computecpp_toolkit_path
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index 6eb088a5f0..c324209805 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -51,6 +51,12 @@ config_setting(
)
config_setting(
+ name = "no_tensorflow_py_deps",
+ values = {"define": "no_tensorflow_py_deps=true"},
+ visibility = ["//visibility:public"],
+)
+
+config_setting(
name = "ios",
values = {
"crosstool_top": "//tools/osx/crosstool:crosstool",
diff --git a/tensorflow/contrib/cmake/README.md b/tensorflow/contrib/cmake/README.md
index 252a9e16bd..1383e0d983 100644
--- a/tensorflow/contrib/cmake/README.md
+++ b/tensorflow/contrib/cmake/README.md
@@ -13,12 +13,36 @@ Linux.
Current Status
--------------
+CMake can be used to build TensorFlow on Windows. See the [getting started documentation](https://www.tensorflow.org/get_started/os_setup.html#pip-installation-on-windows)
+for instructions on how to install a pre-built TensorFlow package on Windows.
+
+### Current known limitations
+* It is not possible to load a custom Op library.
+* GCS and HDFS file systems are not supported.
+* The following Ops are not currently implemented:
+ - Dequantize
+ - QuantizeAndDequantize
+ - QuantizedAvgPool
+ - QuantizedBatchNomWithGlobalNormalization
+ - QuantizedBiasAdd
+ - QuantizedConcat
+ - QuantizedConv2D
+ - QuantizedMatmul
+ - QuantizedMaxPoo
+ - QuantizeDownAndShrinkRange
+ - QuantizedRelu
+ - QuantizedRelu6
+ - QuantizedReshape
+ - QuantizeV2
+ - RequantizationRange
+ - Requantize
+
+## Building with CMake
+
The CMake files in this directory can build the core TensorFlow runtime, an
example C++ binary, and a PIP package containing the runtime and Python
bindings.
-Note: Windows support is in an **alpha** state, and we welcome your feedback.
-
### Pre-requisites
* CMake version 3.5 up to 3.6
@@ -46,6 +70,8 @@ Note: Windows support is in an **alpha** state, and we welcome your feedback.
- [swigwin-3.0.10](http://www.swig.org/download.html)
- [NVidia CUDA Toolkit 8.0] (https://developer.nvidia.com/cuda-downloads)
- [NVidia CUDNN 5.1] (https://developer.nvidia.com/cudnn)
+ - [CMake 3.6](https://cmake.org/files/v3.6/cmake-3.6.3-win64-x64.msi)
+
* Ubuntu 14.04
- Makefile generator
- Docker 1.9.1 (for automated testing)
diff --git a/tensorflow/contrib/cmake/setup.py b/tensorflow/contrib/cmake/setup.py
index 48adeb26e3..8d66e192ec 100644
--- a/tensorflow/contrib/cmake/setup.py
+++ b/tensorflow/contrib/cmake/setup.py
@@ -26,7 +26,7 @@ from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.12.0-rc0-cmake-experimental'
+_VERSION = '0.12.0-rc1-cmake-experimental'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/contrib/cmake/tf_core_kernels.cmake b/tensorflow/contrib/cmake/tf_core_kernels.cmake
index 0275cbe2d4..ed4ba37147 100644
--- a/tensorflow/contrib/cmake/tf_core_kernels.cmake
+++ b/tensorflow/contrib/cmake/tf_core_kernels.cmake
@@ -62,7 +62,7 @@ if(tensorflow_BUILD_CONTRIB_KERNELS)
"${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/stochastic_hard_routing_gradient_op.cc"
"${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/unpack_path_op.cc"
"${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/hybrid/core/ops/utils.cc"
- )
+ )
list(APPEND tf_core_kernels_srcs ${tf_contrib_kernels_srcs})
endif(tensorflow_BUILD_CONTRIB_KERNELS)
diff --git a/tensorflow/contrib/cmake/tf_core_ops.cmake b/tensorflow/contrib/cmake/tf_core_ops.cmake
index a9791cdeb7..14b6b17e61 100644
--- a/tensorflow/contrib/cmake/tf_core_ops.cmake
+++ b/tensorflow/contrib/cmake/tf_core_ops.cmake
@@ -48,7 +48,7 @@ GENERATE_CONTRIB_OP_LIBRARY(cudnn_rnn "${tensorflow_source_dir}/tensorflow/contr
GENERATE_CONTRIB_OP_LIBRARY(factorization_clustering "${tensorflow_source_dir}/tensorflow/contrib/factorization/ops/clustering_ops.cc")
GENERATE_CONTRIB_OP_LIBRARY(factorization_factorization "${tensorflow_source_dir}/tensorflow/contrib/factorization/ops/factorization_ops.cc")
GENERATE_CONTRIB_OP_LIBRARY(framework_variable "${tensorflow_source_dir}/tensorflow/contrib/framework/ops/variable_ops.cc")
-
+GENERATE_CONTRIB_OP_LIBRARY(tensor_forest "${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/ops/tensor_forest_ops.cc")
########################################################
# tf_user_ops library
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index ce305a4b1e..8672e17443 100644
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -188,6 +188,7 @@ add_python_module("tensorflow/python/lib")
add_python_module("tensorflow/python/lib/core")
add_python_module("tensorflow/python/lib/io")
add_python_module("tensorflow/python/ops")
+add_python_module("tensorflow/python/ops/losses")
add_python_module("tensorflow/python/platform")
add_python_module("tensorflow/python/platform/default")
add_python_module("tensorflow/python/platform/summary")
@@ -220,6 +221,7 @@ add_python_module("tensorflow/contrib/bayesflow/examples/reinforce_simple")
add_python_module("tensorflow/contrib/bayesflow/python")
add_python_module("tensorflow/contrib/bayesflow/python/kernel_tests")
add_python_module("tensorflow/contrib/bayesflow/python/ops")
+add_python_module("tensorflow/contrib/compiler")
add_python_module("tensorflow/contrib/copy_graph")
add_python_module("tensorflow/contrib/copy_graph/python")
add_python_module("tensorflow/contrib/copy_graph/python/util")
@@ -261,6 +263,12 @@ add_python_module("tensorflow/contrib/grid_rnn")
add_python_module("tensorflow/contrib/grid_rnn/python")
add_python_module("tensorflow/contrib/grid_rnn/python/kernel_tests")
add_python_module("tensorflow/contrib/grid_rnn/python/ops")
+add_python_module("tensorflow/contrib/image")
+add_python_module("tensorflow/contrib/image/python")
+add_python_module("tensorflow/contrib/image/python/ops")
+add_python_module("tensorflow/contrib/input_pipeline")
+add_python_module("tensorflow/contrib/input_pipeline/python")
+add_python_module("tensorflow/contrib/input_pipeline/python/ops")
add_python_module("tensorflow/contrib/integrate")
add_python_module("tensorflow/contrib/integrate/python")
add_python_module("tensorflow/contrib/integrate/python/ops")
@@ -301,6 +309,7 @@ add_python_module("tensorflow/contrib/learn/python/learn/preprocessing/tests")
add_python_module("tensorflow/contrib/learn/python/learn/tests")
add_python_module("tensorflow/contrib/learn/python/learn/tests/dataframe")
add_python_module("tensorflow/contrib/learn/python/learn/utils")
+add_python_module("tensorflow/contrib/legacy_seq2seq")
add_python_module("tensorflow/contrib/linalg")
add_python_module("tensorflow/contrib/linalg/python")
add_python_module("tensorflow/contrib/linalg/python/ops")
@@ -392,6 +401,7 @@ add_python_module("tensorflow/contrib/training/python")
add_python_module("tensorflow/contrib/training/python/training")
add_python_module("tensorflow/contrib/util")
+
# Additional directories with no Python sources.
add_custom_command(TARGET tf_python_touchup_modules PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tensorboard/dist")
@@ -423,6 +433,7 @@ set(tf_python_op_lib_names
)
function(GENERATE_PYTHON_OP_LIB tf_python_op_lib_name)
+ set(options SHAPE_FUNCTIONS_NOT_REQUIRED)
set(oneValueArgs DESTINATION)
set(multiValueArgs ADDITIONAL_LIBRARIES)
cmake_parse_arguments(GENERATE_PYTHON_OP_LIB
@@ -432,7 +443,12 @@ function(GENERATE_PYTHON_OP_LIB tf_python_op_lib_name)
set(GENERATE_PYTHON_OP_LIB_DESTINATION
"${python_ops_target_dir}/gen_${tf_python_op_lib_name}.py")
endif()
-
+ if(GENERATE_PYTHON_OP_LIB_SHAPE_FUNCTIONS_NOT_REQUIRED)
+ set(require_shape_fn 0)
+ else()
+ set(require_shape_fn 1)
+ endif()
+
# Create a C++ executable that links in the appropriate op
# registrations and generates Python wrapper code based on the
# registered ops.
@@ -453,7 +469,7 @@ function(GENERATE_PYTHON_OP_LIB tf_python_op_lib_name)
# containing the wrappers.
add_custom_command(
OUTPUT ${GENERATE_PYTHON_OP_LIB_DESTINATION}
- COMMAND ${tf_python_op_lib_name}_gen_python @${tensorflow_source_dir}/tensorflow/python/ops/hidden_ops.txt 1 > ${GENERATE_PYTHON_OP_LIB_DESTINATION}
+ COMMAND ${tf_python_op_lib_name}_gen_python @${tensorflow_source_dir}/tensorflow/python/ops/hidden_ops.txt ${require_shape_fn} > ${GENERATE_PYTHON_OP_LIB_DESTINATION}
DEPENDS ${tf_python_op_lib_name}_gen_python
)
@@ -496,6 +512,8 @@ GENERATE_PYTHON_OP_LIB("contrib_factorization_factorization_ops"
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/contrib/factorization/python/ops/gen_factorization_ops.py)
GENERATE_PYTHON_OP_LIB("contrib_framework_variable_ops"
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/contrib/framework/python/ops/gen_variable_ops.py)
+GENERATE_PYTHON_OP_LIB("contrib_tensor_forest_ops"
+ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/contrib/tensor_forest/python/ops/gen_tensor_forest_ops.py)
add_custom_target(tf_python_ops SOURCES ${tf_python_ops_generated_files} ${PYTHON_PROTO_GENFILES})
add_dependencies(tf_python_ops tf_python_op_gen_main)
diff --git a/tensorflow/contrib/cmake/tf_tests.cmake b/tensorflow/contrib/cmake/tf_tests.cmake
index 8608d3ff8f..e073148797 100644
--- a/tensorflow/contrib/cmake/tf_tests.cmake
+++ b/tensorflow/contrib/cmake/tf_tests.cmake
@@ -126,8 +126,9 @@ if (tensorflow_BUILD_PYTHON_TESTS)
set(tf_test_src_py_exclude
# generally not working
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/__init__.py"
- "${tensorflow_source_dir}/tensorflow/python/kernel_tests/benchmark_test.py"
+ "${tensorflow_source_dir}/tensorflow/python/kernel_tests/benchmark_test.py"
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/resource_variable_ops_test.py"
+ "${tensorflow_source_dir}/tensorflow/python/saved_model/saved_model_test.py"
)
if (WIN32)
set(tf_test_src_py_exclude
@@ -144,7 +145,6 @@ if (tensorflow_BUILD_PYTHON_TESTS)
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/string_to_number_op_test.py"
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/clip_ops_test.py"
# misc
- "${tensorflow_source_dir}/tensorflow/python/kernel_tests/cwise_ops_test.py"
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/variable_scope_test.py"
"${tensorflow_source_dir}/tensorflow/python/kernel_tests/reshape_op_test.py"
"${tensorflow_source_dir}/tensorflow/tensorboard/backend/server_test.py"
diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py
index 9b8d157bda..d7ff9492e5 100644
--- a/tensorflow/contrib/layers/python/layers/layers_test.py
+++ b/tensorflow/contrib/layers/python/layers/layers_test.py
@@ -532,7 +532,7 @@ class ConvolutionTest(tf.test.TestCase):
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
@@ -624,7 +624,7 @@ class Convolution2dTransposeTests(tf.test.TestCase):
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
# `NCHW` data fomat is only supported for `GPU` device.
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
@@ -1782,7 +1782,7 @@ class BatchNormTest(tf.test.TestCase):
self._testNoneUpdatesCollections(False, data_format='NCHW')
def testNoneUpdatesCollectionsFusedNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(True, data_format='NCHW')
def testNoneUpdatesCollectionsFusedNHWC(self):
@@ -1849,7 +1849,7 @@ class BatchNormTest(tf.test.TestCase):
self._testDelayedUpdateMovingVars(False, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testDelayedUpdateMovingVars(True, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNHWC(self):
@@ -2037,7 +2037,7 @@ class BatchNormTest(tf.test.TestCase):
self._testIsTrainingVariable(False, data_format='NCHW')
def testIsTrainingVariableFusedNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(True, data_format='NCHW')
def testIsTrainingVariableFusedNHWC(self):
@@ -2176,7 +2176,7 @@ class BatchNormTest(tf.test.TestCase):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollectionIsTrainingVariable(
True, data_format='NCHW')
@@ -2255,7 +2255,7 @@ class BatchNormTest(tf.test.TestCase):
self._testTrainMovingVars(False, data_format='NCHW')
def testTrainMovingVarsFusedNCHW(self):
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testTrainMovingVars(True, data_format='NCHW')
def testTrainMovingVarsFusedNHWC(self):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
index 3405005327..c339a89faa 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
@@ -90,7 +90,6 @@ def boston_eval_fn():
return tf.concat_v2([features, features], 0), tf.concat_v2([labels, labels],
0)
-
def extract(data, key):
if isinstance(data, dict):
assert key in data
diff --git a/tensorflow/contrib/learn/python/learn/export_strategy.py b/tensorflow/contrib/learn/python/learn/export_strategy.py
index ea41e60f43..44ec41afa2 100644
--- a/tensorflow/contrib/learn/python/learn/export_strategy.py
+++ b/tensorflow/contrib/learn/python/learn/export_strategy.py
@@ -13,7 +13,8 @@
# limitations under the License.
# ==============================================================================
-"""Experiment class collecting information needed for a single training run."""
+"""ExportStrategy class that provides strategies to export model so later it
+can be used for TensorFlow serving."""
from __future__ import absolute_import
from __future__ import division
diff --git a/tensorflow/contrib/learn/python/learn/utils/checkpoints.py b/tensorflow/contrib/learn/python/learn/utils/checkpoints.py
deleted file mode 100644
index b0908173d6..0000000000
--- a/tensorflow/contrib/learn/python/learn/utils/checkpoints.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Tools to work with checkpoints."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.framework import deprecated
-from tensorflow.contrib.framework.python.framework import checkpoint_utils
-
-
-@deprecated('2016-08-22', 'Please use tf.contrib.framework.load_checkpoint '
- 'instead')
-def load_checkpoint(filepattern):
- """See `tf.contrib.framework.load_checkpoint`."""
- return checkpoint_utils.load_checkpoint(filepattern)
-
-
-@deprecated('2016-08-22', 'Please use tf.contrib.framework.load_variable '
- 'instead')
-def load_variable(checkpoint_dir, name):
- """See `tf.contrib.framework.load_variable`."""
- return checkpoint_utils.load_variable(checkpoint_dir, name)
-
-
-@deprecated('2016-08-22', 'Please use tf.contrib.framework.list_variables '
- 'instead')
-def list_variables(checkpoint_dir):
- """See `tf.contrib.framework.list_variables`."""
- return checkpoint_utils.list_variables(checkpoint_dir)
-
-
-@deprecated('2016-08-22', 'Please use tf.contrib.framework.init_from_checkpoint'
- ' instead')
-def init_from_checkpoint(checkpoint_dir, assignment_map):
- """See `tf.contrib.framework.init_from_checkpoint`."""
- checkpoint_utils.init_from_checkpoint(checkpoint_dir, assignment_map)
diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt
index 4886f23f6b..e6da887bb5 100644
--- a/tensorflow/contrib/makefile/tf_op_files.txt
+++ b/tensorflow/contrib/makefile/tf_op_files.txt
@@ -106,6 +106,7 @@ tensorflow/core/kernels/deep_conv2d.cc
tensorflow/core/kernels/xsmm_conv2d.cc
tensorflow/core/kernels/cwise_ops_common.cc
tensorflow/core/kernels/cwise_op_tanh.cc
+tensorflow/core/kernels/cwise_op_pow.cc
tensorflow/core/kernels/cwise_op_sub.cc
tensorflow/core/kernels/cwise_op_squared_difference.cc
tensorflow/core/kernels/cwise_op_square.cc
diff --git a/tensorflow/contrib/metrics/BUILD b/tensorflow/contrib/metrics/BUILD
index a84b671998..bfeeee530b 100644
--- a/tensorflow/contrib/metrics/BUILD
+++ b/tensorflow/contrib/metrics/BUILD
@@ -8,8 +8,6 @@ exports_files(["LICENSE"])
package(default_visibility = ["//tensorflow:__subpackages__"])
-load("//tensorflow:tensorflow.bzl", "tf_py_test")
-
py_library(
name = "metrics_py",
srcs = [
diff --git a/tensorflow/contrib/slim/README.md b/tensorflow/contrib/slim/README.md
index 7c89dab7d1..1c192076ce 100644
--- a/tensorflow/contrib/slim/README.md
+++ b/tensorflow/contrib/slim/README.md
@@ -99,7 +99,7 @@ normal distribution, regularize it with an `l2_loss` and place it on the `CPU`,
one need only declare the following:
```python
-weights = variables.variable('weights',
+weights = slim.variable('weights',
shape=[10, 10, 3 , 3],
initializer=tf.truncated_normal_initializer(stddev=0.1),
regularizer=slim.l2_regularizer(0.05),
@@ -361,11 +361,11 @@ One can also nest `arg_scopes` and use multiple operations in the same scope.
For example:
```python
- with slim.arg_scope([slim.conv2d, slim.fully_connected],
+with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
- with arg_scope([slim.conv2d], stride=1, padding='SAME'):
+ with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=tf.truncated_normal_initializer(stddev=0.03),
@@ -450,7 +450,7 @@ images, labels = ...
predictions = vgg.vgg16(images)
# Define the loss functions and get the total loss.
-loss = losses.softmax_cross_entropy(predictions, labels)
+loss = slim.losses.softmax_cross_entropy(predictions, labels)
```
In this example, we start by creating the model (using TF-Slim's VGG
@@ -477,7 +477,7 @@ total_loss = slim.losses.get_total_loss(add_regularization_losses=False)
In this example, we have two losses which we add by calling
`slim.losses.softmax_cross_entropy` and `slim.losses.sum_of_squares`. We can
obtain the total loss by adding them together (`total_loss`) or by calling
-`slim.losses.GetTotalLoss()`. How did this work?
+`slim.losses.get_total_loss()`. How did this work?
When you create a loss function via TF-Slim, TF-Slim adds the loss to a
special TensorFlow collection of loss functions. This enables you to either
manage the total loss manually, or allow TF-Slim to manage them for you.
@@ -566,11 +566,10 @@ vgg = tf.contrib.slim.nets.vgg
...
train_log_dir = ...
-if not gfile.Exists(train_log_dir):
- gfile.MakeDirs(train_log_dir)
+if not tf.gfile.Exists(train_log_dir):
+ tf.gfile.MakeDirs(train_log_dir)
-g = tf.Graph()
-with g.as_default():
+with tf.Graph().as_default():
# Set up the data loading:
images, labels = ...
@@ -581,7 +580,7 @@ with g.as_default():
slim.losses.softmax_cross_entropy(predictions, labels)
total_loss = slim.losses.get_total_loss()
- tf.summary.scalar('losses/total loss', total_loss)
+ tf.summary.scalar('losses/total_loss', total_loss)
# Specify the optimization scheme:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=.001)
@@ -638,8 +637,8 @@ helper functions to select a subset of variables to restore:
```python
# Create some variables.
-v1 = slim.variables.variable(name="v1", ...)
-v2 = slim.variables.variable(name="nested/v2", ...)
+v1 = slim.variable(name="v1", ...)
+v2 = slim.variable(name="nested/v2", ...)
...
# Get list of variables to restore (which contains only 'v2'). These are all
@@ -748,7 +747,7 @@ We define a metric to be a performance measure that is not a loss function
(losses are directly optimized during training), but which we are still
interested in for the purpose of evaluating our model.
For example, we might want to minimize log loss, but our metrics of interest
-might be F1 score, or Intersection Over Union score (which are not
+might be F1 score (test accuracy), or Intersection Over Union score (which are not
differentiable, and therefore cannot be used as losses).
TF-Slim provides a set of metric operations that makes evaluating models
@@ -775,8 +774,8 @@ set (upon which the loss is computed), we'll assume we're using test data:
images, labels = LoadTestData(...)
predictions = MyModel(images)
-mae_value_op, mae_update_op = slim.metrics.mean_absolute_error(predictions, labels)
-mre_value_op, mre_update_op = slim.metrics.mean_relative_error(predictions, labels, labels)
+mae_value_op, mae_update_op = slim.metrics.streaming_mean_absolute_error(predictions, labels)
+mre_value_op, mre_update_op = slim.metrics.streaming_mean_relative_error(predictions, labels, labels)
pl_value_op, pl_update_op = slim.metrics.percentage_less(mean_relative_errors, 0.3)
```
@@ -793,13 +792,13 @@ this, TF-Slim provides two convenience functions:
# Aggregates the value and update ops in two lists:
value_ops, update_ops = slim.metrics.aggregate_metrics(
- slim.metrics.mean_absolute_error(predictions, labels),
- slim.metrics.mean_squared_error(predictions, labels))
+ slim.metrics.streaming_mean_absolute_error(predictions, labels),
+ slim.metrics.streaming_mean_squared_error(predictions, labels))
# Aggregates the value and update ops in two dictionaries:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
- "eval/mean_absolute_error": slim.metrics.mean_absolute_error(predictions, labels),
- "eval/mean_squared_error": slim.metrics.mean_squared_error(predictions, labels),
+ "eval/mean_absolute_error": slim.metrics.streaming_mean_absolute_error(predictions, labels),
+ "eval/mean_squared_error": slim.metrics.streaming_mean_squared_error(predictions, labels),
})
```
@@ -823,8 +822,8 @@ predictions = vgg.vgg_16(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
- "eval/mean_absolute_error": slim.metrics.mean_absolute_error(predictions, labels),
- "eval/mean_squared_error": slim.metrics.mean_squared_error(predictions, labels),
+ "eval/mean_absolute_error": slim.metrics.streaming_mean_absolute_error(predictions, labels),
+ "eval/mean_squared_error": slim.metrics.streaming_mean_squared_error(predictions, labels),
})
# Evaluate the model using 1000 batches of data:
diff --git a/tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py b/tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py
index 3321f967ca..5c8ae1401f 100644
--- a/tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py
+++ b/tensorflow/contrib/slim/python/slim/nets/inception_v3_test.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for nets.inception_v1."""
+"""Tests for nets.inception_v3."""
from __future__ import absolute_import
from __future__ import division
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 4d6f8cf857..011c17fd1a 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -2352,6 +2352,7 @@ filegroup(
# GIF data with optimization
"lib/gif/testdata/optimized.gif",
],
+ visibility = ["//visibility:public"],
)
filegroup(
diff --git a/tensorflow/core/common_runtime/sycl/sycl_allocator.cc b/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
index 699b54f345..0d238276f4 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
@@ -36,8 +36,10 @@ void SYCLAllocator::DeallocateRaw(void *ptr) {
}
void SYCLAllocator::EnterLameDuckMode() {
- device_->deallocate_all();
- device_ = nullptr;
+ if (device_) {
+ device_->deallocate_all();
+ device_ = nullptr;
+ }
}
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/sycl/sycl_allocator.h b/tensorflow/core/common_runtime/sycl/sycl_allocator.h
index 8558b6c873..c896f7f603 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_allocator.h
+++ b/tensorflow/core/common_runtime/sycl/sycl_allocator.h
@@ -22,7 +22,6 @@ limitations under the License.
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/types.h"
-#define EIGEN_USE_SYCL
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
namespace tensorflow {
diff --git a/tensorflow/core/common_runtime/sycl/sycl_device.cc b/tensorflow/core/common_runtime/sycl/sycl_device.cc
index e5fe85bcf5..2936b4c5c8 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_device.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_device.cc
@@ -23,11 +23,38 @@ limitations under the License.
namespace tensorflow {
+static std::unordered_set<SYCLDevice*> live_devices;
+
+void ShutdownSycl() {
+ for (auto device : live_devices) {
+ device->EnterLameDuckMode();
+ }
+ live_devices.clear();
+}
+bool first_time = true;
+
+void SYCLDevice::RegisterDevice() {
+ if (first_time) {
+ first_time = false;
+ atexit(ShutdownSycl);
+ }
+ live_devices.insert(this);
+}
+
SYCLDevice::~SYCLDevice() {
device_context_->Unref();
sycl_allocator_->EnterLameDuckMode();
delete sycl_device_;
delete sycl_queue_;
+ live_devices.erase(this);
+}
+
+void SYCLDevice::EnterLameDuckMode() {
+ sycl_allocator_->EnterLameDuckMode();
+ delete sycl_device_;
+ sycl_device_ = nullptr;
+ delete sycl_queue_;
+ sycl_queue_ = nullptr;
}
void SYCLDevice::Compute(OpKernel *op_kernel, OpKernelContext *context) {
@@ -63,8 +90,8 @@ Status SYCLDevice::MakeTensorFromProto(const TensorProto &tensor_proto,
Tensor copy(GetAllocator(alloc_attrs), parsed.dtype(), parsed.shape());
device_context_->CopyCPUTensorToDevice(&parsed, this, &copy,
[&status](const Status &s) {
- status = s;
- });
+ status = s;
+ });
*tensor = copy;
}
return status;
diff --git a/tensorflow/core/common_runtime/sycl/sycl_device.h b/tensorflow/core/common_runtime/sycl/sycl_device.h
index 2759053df5..db208984f6 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_device.h
+++ b/tensorflow/core/common_runtime/sycl/sycl_device.h
@@ -20,8 +20,6 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_SYCL_SYCL_DEVICE_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_SYCL_SYCL_DEVICE_H_
-#define EIGEN_USE_SYCL
-
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/sycl/sycl_allocator.h"
#include "tensorflow/core/common_runtime/sycl/sycl_device_context.h"
@@ -45,10 +43,13 @@ public:
sycl_allocator_(new SYCLAllocator(sycl_queue_)),
device_context_(new SYCLDeviceContext()) {
set_eigen_sycl_device(sycl_device_);
+ RegisterDevice();
}
~SYCLDevice() override;
+ void EnterLameDuckMode();
+
void Compute(OpKernel *op_kernel, OpKernelContext *context) override;
Allocator *GetAllocator(AllocatorAttributes attr) override;
Status MakeTensorFromProto(const TensorProto &tensor_proto,
@@ -65,6 +66,8 @@ public:
}
private:
+ void RegisterDevice();
+
Allocator *cpu_allocator_; // owned
Eigen::QueueInterface* sycl_queue_; // owned
Eigen::SyclDevice* sycl_device_; // owned
diff --git a/tensorflow/core/common_runtime/sycl/sycl_device_context.cc b/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
index b487d24c20..b49420b1b5 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
@@ -15,7 +15,6 @@ limitations under the License.
#if TENSORFLOW_USE_SYCL
-#define EIGEN_USE_SYCL
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/common_runtime/sycl/sycl_device_context.h"
diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc
index 1918eae875..0baff5c190 100644
--- a/tensorflow/core/distributed_runtime/master_session.cc
+++ b/tensorflow/core/distributed_runtime/master_session.cc
@@ -438,7 +438,7 @@ static bool CopyIfNeeded(TensorProto* in, TensorProto* out) {
} else {
Tensor t(in->dtype());
if (!t.FromProto(cpu_allocator(), *in)) return false;
- t.AsProtoField(out);
+ t.AsProtoTensorContent(out);
}
return true;
}
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
index 3bee20623b..13aeb9f9c7 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
@@ -377,7 +377,7 @@ class GrpcWorkerService : public AsyncServiceInterface {
recv->set_key(key);
// TODO(zhifengc): Deal with gpu -> cpu copy.
TensorProto* proto = recv->mutable_val();
- val.AsProtoField(proto);
+ val.AsProtoTensorContent(proto);
}
}
delete collector;
diff --git a/tensorflow/core/framework/tensor.proto b/tensorflow/core/framework/tensor.proto
index 86c5b88153..5d383bcc66 100644
--- a/tensorflow/core/framework/tensor.proto
+++ b/tensorflow/core/framework/tensor.proto
@@ -28,8 +28,11 @@ message TensorProto {
// to represent a constant Tensor with a single value.
int32 version_number = 3;
- // Serialized content from Tensor::AsProtoTensorContent(). This representation
- // can be used for all tensor types.
+ // Serialized raw tensor content from either Tensor::AsProtoTensorContent or
+ // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation
+ // can be used for all tensor types. The purpose of this representation is to
+ // reduce serialization overhead during RPC call by avoiding serialization of
+ // many repeated small items.
bytes tensor_content = 4;
// Type specific representations that make it easy to create tensor protos in
diff --git a/tensorflow/core/kernels/argmax_op.cc b/tensorflow/core/kernels/argmax_op.cc
index d78f6a7ff1..071d0e684a 100644
--- a/tensorflow/core/kernels/argmax_op.cc
+++ b/tensorflow/core/kernels/argmax_op.cc
@@ -57,19 +57,21 @@ class ArgOp : public OpKernel {
const int32 dim = internal::SubtleMustCopy(dimension.scalar<int32>()());
const int input_dims = input.dims();
- OP_REQUIRES(context, dim >= 0, errors::InvalidArgument("dim must be >= 0"));
- OP_REQUIRES(context, dim < input_dims,
- errors::InvalidArgument("Minimum tensor rank: ", dim + 1,
- " but got: ", input_dims));
+ int axis = dim < 0 ? dim + input_dims : dim;
+
+ OP_REQUIRES(context, axis >= 0 && axis < input_dims,
+ errors::InvalidArgument(
+ "Expected dimension in the range [", -input_dims, ", ",
+ input_dims, "), but got ", dim));
OP_REQUIRES(
- context, input.dim_size(dim) > 0,
+ context, input.dim_size(axis) > 0,
errors::InvalidArgument("Reduction axis ", dim, " is empty in shape ",
input.shape().DebugString()));
TensorShape output_shape;
const TensorShape& input_shape = input.shape();
for (int d = 0; d < input_dims - 1; ++d) {
- output_shape.AddDim(input_shape.dim_size((d < dim) ? d : d + 1));
+ output_shape.AddDim(input_shape.dim_size((d < axis) ? d : d + 1));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
@@ -77,7 +79,7 @@ class ArgOp : public OpKernel {
#define HANDLE_DIM(NDIM) \
case NDIM: \
ArgFunctor::Reduce##NDIM(context->eigen_device<Device>(), \
- input.tensor<T, NDIM>(), dim, \
+ input.tensor<T, NDIM>(), axis, \
output->tensor<int64, NDIM - 1>()); \
break;
diff --git a/tensorflow/core/kernels/constant_op.cc b/tensorflow/core/kernels/constant_op.cc
index 362abd4a1f..1ae290ec4b 100644
--- a/tensorflow/core/kernels/constant_op.cc
+++ b/tensorflow/core/kernels/constant_op.cc
@@ -16,9 +16,6 @@ limitations under the License.
// See docs in ../ops/array_ops.cc.
#define EIGEN_USE_THREADS
-#if TENSORFLOW_USE_SYCL
-#define EIGEN_USE_SYCL
-#endif
#include "tensorflow/core/kernels/constant_op.h"
@@ -116,6 +113,9 @@ REGISTER_KERNEL_BUILDER(Name("Const")
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif //TENSORFLOW_USE_SYCL
namespace functor {
@@ -128,6 +128,17 @@ struct FillFunctor<CPUDevice, T> {
}
};
+#ifdef TENSORFLOW_USE_SYCL
+// Partial specialization of FillFunctor<Device=SYCLDevice, T>.
+template <typename T>
+struct FillFunctor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstScalar in) {
+ To32Bit(out).device(d) = To32Bit(out).constant(in());
+ }
+};
+#endif // TENSORFLOW_USE_SYCL
+
} // end namespace functor
template <typename Device, typename T>
@@ -172,6 +183,17 @@ TF_CALL_ALL_TYPES(REGISTER_CPU_KERNEL);
REGISTER_KERNEL(CPU, quint8);
#undef REGISTER_CPU_KERNEL
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL(SYCL, float)
+REGISTER_KERNEL_BUILDER(Name("Fill")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .HostMemory("dims")
+ .HostMemory("value")
+ .HostMemory("output"),
+ FillOp<CPUDevice, int32>);
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
REGISTER_KERNEL(GPU, Eigen::half);
REGISTER_KERNEL(GPU, float);
@@ -220,6 +242,15 @@ class ZerosLikeOp : public OpKernel {
TF_CALL_POD_STRING_TYPES(REGISTER_CPU);
#undef REGISTER_CPU
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL(float, SYCL);
+REGISTER_KERNEL_BUILDER(Name("ZerosLike")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .HostMemory("y"),
+ ZerosLikeOp<CPUDevice, int32>);
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
REGISTER_KERNEL(bool, GPU);
REGISTER_KERNEL(Eigen::half, GPU);
diff --git a/tensorflow/core/kernels/cwise_op_digamma.cc b/tensorflow/core/kernels/cwise_op_digamma.cc
index 8b7b5d4b6a..1536478d21 100644
--- a/tensorflow/core/kernels/cwise_op_digamma.cc
+++ b/tensorflow/core/kernels/cwise_op_digamma.cc
@@ -16,12 +16,10 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
-#if EIGEN_HAS_C99_MATH
REGISTER3(UnaryOp, CPU, "Digamma", functor::digamma, float, Eigen::half,
double);
#if GOOGLE_CUDA
REGISTER3(UnaryOp, GPU, "Digamma", functor::digamma, float, Eigen::half,
double);
#endif // GOOGLE_CUDA
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_div.cc b/tensorflow/core/kernels/cwise_op_div.cc
index ef8c477e48..74d8faedb5 100644
--- a/tensorflow/core/kernels/cwise_op_div.cc
+++ b/tensorflow/core/kernels/cwise_op_div.cc
@@ -37,6 +37,7 @@ REGISTER5(BinaryOp, CPU, "RealDiv", functor::div, float, Eigen::half, double,
.TypeConstraint<TYPE>("T"), \
BinaryOp<SYCLDevice, functor::div<TYPE>>);
REGISTER_SYCL_KERNEL(float)
+REGISTER_SYCL_KERNEL(int32)
#undef REGISTER_SYCL_KERNEL
#endif // TENSORFLOW_USE_SYCL
#if GOOGLE_CUDA
diff --git a/tensorflow/core/kernels/cwise_op_erf.cc b/tensorflow/core/kernels/cwise_op_erf.cc
index 5095285fbc..524ca13e67 100644
--- a/tensorflow/core/kernels/cwise_op_erf.cc
+++ b/tensorflow/core/kernels/cwise_op_erf.cc
@@ -16,10 +16,8 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
-#if EIGEN_HAS_C99_MATH
REGISTER3(UnaryOp, CPU, "Erf", functor::erf, float, Eigen::half, double);
#if GOOGLE_CUDA
REGISTER3(UnaryOp, GPU, "Erf", functor::erf, float, Eigen::half, double);
#endif // GOOGLE_CUDA
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_erfc.cc b/tensorflow/core/kernels/cwise_op_erfc.cc
index ffc401352b..f3256dc1f5 100644
--- a/tensorflow/core/kernels/cwise_op_erfc.cc
+++ b/tensorflow/core/kernels/cwise_op_erfc.cc
@@ -16,10 +16,8 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
-#if EIGEN_HAS_C99_MATH
REGISTER3(UnaryOp, CPU, "Erfc", functor::erfc, float, Eigen::half, double);
#if GOOGLE_CUDA
REGISTER3(UnaryOp, GPU, "Erfc", functor::erfc, float, Eigen::half, double);
#endif // GOOGLE_CUDA
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_igammas.cc b/tensorflow/core/kernels/cwise_op_igammas.cc
index b1ea921ffd..a1d7f4dad4 100644
--- a/tensorflow/core/kernels/cwise_op_igammas.cc
+++ b/tensorflow/core/kernels/cwise_op_igammas.cc
@@ -16,8 +16,6 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
-#if EIGEN_HAS_C99_MATH
REGISTER2(BinaryOp, CPU, "Igamma", functor::igamma, float, double);
REGISTER2(BinaryOp, CPU, "Igammac", functor::igammac, float, double);
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_lgamma.cc b/tensorflow/core/kernels/cwise_op_lgamma.cc
index 5c19a942ac..737c10e723 100644
--- a/tensorflow/core/kernels/cwise_op_lgamma.cc
+++ b/tensorflow/core/kernels/cwise_op_lgamma.cc
@@ -16,11 +16,9 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
-#if EIGEN_HAS_C99_MATH
REGISTER3(UnaryOp, CPU, "Lgamma", functor::lgamma, float, Eigen::half, double);
#if GOOGLE_CUDA
REGISTER3(UnaryOp, GPU, "Lgamma", functor::lgamma, float, Eigen::half, double);
#endif
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_zeta.cc b/tensorflow/core/kernels/cwise_op_zeta.cc
index 6ccb54b680..2c5538534c 100644
--- a/tensorflow/core/kernels/cwise_op_zeta.cc
+++ b/tensorflow/core/kernels/cwise_op_zeta.cc
@@ -17,7 +17,5 @@ limitations under the License.
namespace tensorflow {
REGISTER2(BinaryOp, CPU, "Zeta", functor::zeta, float, double);
-#if EIGEN_HAS_C99_MATH
REGISTER2(BinaryOp, CPU, "Polygamma", functor::polygamma, float, double);
-#endif // EIGEN_HAS_C99_MATH
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_ops_sycl_common.h b/tensorflow/core/kernels/cwise_ops_sycl_common.h
index 3fcf0759d4..a0decbce87 100644
--- a/tensorflow/core/kernels/cwise_ops_sycl_common.h
+++ b/tensorflow/core/kernels/cwise_ops_sycl_common.h
@@ -20,7 +20,6 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_KERNELS_CWISE_OPS_SYCL_COMMON_H_
#define TENSORFLOW_CORE_KERNELS_CWISE_OPS_SYCL_COMMON_H_
-#define EIGEN_USE_SYCL
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/register_types.h"
diff --git a/tensorflow/core/kernels/dense_update_ops.cc b/tensorflow/core/kernels/dense_update_ops.cc
index 5216a4b5d0..b32d99e45c 100644
--- a/tensorflow/core/kernels/dense_update_ops.cc
+++ b/tensorflow/core/kernels/dense_update_ops.cc
@@ -14,9 +14,6 @@ limitations under the License.
==============================================================================*/
#define EIGEN_USE_THREADS
-#if TENSORFLOW_USE_SYCL
-#define EIGEN_USE_SYCL
-#endif
#include "tensorflow/core/kernels/dense_update_ops.h"
#include "tensorflow/core/framework/op_kernel.h"
diff --git a/tensorflow/core/kernels/dynamic_stitch_op.cc b/tensorflow/core/kernels/dynamic_stitch_op.cc
index ae883ea535..bff1914682 100644
--- a/tensorflow/core/kernels/dynamic_stitch_op.cc
+++ b/tensorflow/core/kernels/dynamic_stitch_op.cc
@@ -165,6 +165,20 @@ class DynamicStitchOp : public OpKernel {
TF_CALL_POD_STRING_TYPES(REGISTER_DYNAMIC_STITCH);
#undef REGISTER_DYNAMIC_STITCH
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_DYNAMIC_STITCH_SYCL(type) \
+ REGISTER_KERNEL_BUILDER(Name("DynamicStitch") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .HostMemory("indices") \
+ .HostMemory("data") \
+ .HostMemory("merged"), \
+ DynamicStitchOp<type>)
+
+TF_CALL_ALL_TYPES(REGISTER_DYNAMIC_STITCH_SYCL);
+#undef REGISTER_DYNAMIC_STITCH_SYCL
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
#define REGISTER_DYNAMIC_STITCH_GPU(type) \
REGISTER_KERNEL_BUILDER(Name("DynamicStitch") \
diff --git a/tensorflow/core/kernels/fill_functor.cc b/tensorflow/core/kernels/fill_functor.cc
index 0d10a080db..94cc91bba3 100644
--- a/tensorflow/core/kernels/fill_functor.cc
+++ b/tensorflow/core/kernels/fill_functor.cc
@@ -52,5 +52,18 @@ DEFINE_SETZERO_CPU(complex64);
DEFINE_SETZERO_CPU(complex128);
#undef DEFINE_SETZERO_CPU
+#ifdef TENSORFLOW_USE_SYCL
+template <typename T>
+void SetZeroFunctor<Eigen::SyclDevice, T>::operator()(
+ const Eigen::SyclDevice& d, typename TTypes<T>::Flat out) {
+ out.device(d) = out.constant(T(0));
+}
+
+#define DEFINE_SETZERO_SYCL(T) \
+ template struct SetZeroFunctor<Eigen::SyclDevice, T>;
+DEFINE_SETZERO_SYCL(float);
+#undef DEFINE_SETZERO_SYCL
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/fill_functor.h b/tensorflow/core/kernels/fill_functor.h
index 1032570fca..d6814cb477 100644
--- a/tensorflow/core/kernels/fill_functor.h
+++ b/tensorflow/core/kernels/fill_functor.h
@@ -45,6 +45,15 @@ struct SetZeroFunctor<Eigen::ThreadPoolDevice, T> {
typename TTypes<T>::Flat out);
};
+#ifdef TENSORFLOW_USE_SYCL
+// Partial specialization of SetZeroFunctor<Device=Eigen::SyclDevice, T>.
+template <typename T>
+struct SetZeroFunctor<Eigen::SyclDevice, T> {
+ void operator()(const Eigen::SyclDevice& d,
+ typename TTypes<T>::Flat out);
+};
+#endif // TENSORFLOW_USE_SYCL
+
template <>
struct SetZeroFunctor<Eigen::ThreadPoolDevice, string> {
void operator()(const Eigen::ThreadPoolDevice& d,
diff --git a/tensorflow/core/kernels/function_ops.cc b/tensorflow/core/kernels/function_ops.cc
index 504d9eeab4..b1e6c90ff2 100644
--- a/tensorflow/core/kernels/function_ops.cc
+++ b/tensorflow/core/kernels/function_ops.cc
@@ -93,7 +93,7 @@ REGISTER_KERNEL_BUILDER(Name("_Retval").Device(DEVICE_CPU), RetvalOp);
Name("_Arg").Device(DEVICE_SYCL).TypeConstraint<type>("T"), ArgOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER)
TF_CALL_bool(REGISTER) REGISTER_KERNEL_BUILDER(Name("_Arg")
- .Device(DEVICE_GPU)
+ .Device(DEVICE_SYCL)
.HostMemory("output")
.TypeConstraint<int32>("T"),
ArgOp);
@@ -104,7 +104,7 @@ TF_CALL_bool(REGISTER) REGISTER_KERNEL_BUILDER(Name("_Arg")
RetvalOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER)
TF_CALL_bool(REGISTER) REGISTER_KERNEL_BUILDER(Name("_Retval")
- .Device(DEVICE_GPU)
+ .Device(DEVICE_SYCL)
.HostMemory("input")
.TypeConstraint<int32>("T"),
RetvalOp);
@@ -238,5 +238,9 @@ REGISTER_KERNEL_BUILDER(Name(kGradientOp).Device(DEVICE_CPU),
SymbolicGradientOp);
REGISTER_KERNEL_BUILDER(Name(kGradientOp).Device(DEVICE_GPU),
SymbolicGradientOp);
+#if TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name(kGradientOp).Device(DEVICE_SYCL),
+ SymbolicGradientOp);
+#endif // TENSORFLOW_USE_SYCL
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/identity_op.cc b/tensorflow/core/kernels/identity_op.cc
index b482099c69..0e56a27c84 100644
--- a/tensorflow/core/kernels/identity_op.cc
+++ b/tensorflow/core/kernels/identity_op.cc
@@ -54,10 +54,29 @@ REGISTER_KERNEL_BUILDER(Name("RefIdentity").Device(DEVICE_CPU), IdentityOp);
IdentityOp)
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
-REGISTER_SYCL_KERNEL(bfloat16);
#undef REGISTER_SYCL_KERNEL
-#endif
+
+#define REGISTER_SYCL_HOST_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Identity") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("input") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ IdentityOp); \
+ REGISTER_KERNEL_BUILDER(Name("RefIdentity") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("input") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ IdentityOp)
+
+REGISTER_SYCL_HOST_KERNEL(int32);
+REGISTER_SYCL_HOST_KERNEL(bool);
+
+#undef REGISTER_SYCL_HOST_KERNEL
+
+#endif // TENSORFLOW_USE_SYCL
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
diff --git a/tensorflow/core/kernels/reshape_op.cc b/tensorflow/core/kernels/reshape_op.cc
index cd6875eeb2..245b324a38 100644
--- a/tensorflow/core/kernels/reshape_op.cc
+++ b/tensorflow/core/kernels/reshape_op.cc
@@ -31,6 +31,27 @@ REGISTER_KERNEL_BUILDER(Name("Reshape").Device(DEVICE_CPU).HostMemory("shape"),
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Reshape") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("shape") \
+ .TypeConstraint<type>("T") \
+ .TypeConstraint<int32>("Tshape"), \
+ ReshapeOp);
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+#undef REGISTER_SYCL_KERNEL
+
+REGISTER_KERNEL_BUILDER(Name("Reshape")
+ .Device(DEVICE_SYCL)
+ .HostMemory("tensor")
+ .HostMemory("shape")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int32>("Tshape"),
+ ReshapeOp);
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
// A special GPU kernel for int32.
// TODO(b/25387198): Also enable int32 in device memory. This kernel
diff --git a/tensorflow/core/kernels/sequence_ops.cc b/tensorflow/core/kernels/sequence_ops.cc
index e2577620d9..f8e6c22ab6 100644
--- a/tensorflow/core/kernels/sequence_ops.cc
+++ b/tensorflow/core/kernels/sequence_ops.cc
@@ -89,6 +89,12 @@ class RangeOp : public OpKernel {
#define REGISTER_CPU_KERNEL(T) REGISTER_KERNEL(DEVICE_CPU, T)
#define REGISTER_GPU_KERNEL(T) REGISTER_KERNEL(DEVICE_GPU, T)
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(T) REGISTER_KERNEL(DEVICE_SYCL, T)
+TF_CALL_float(REGISTER_SYCL_KERNEL);
+TF_CALL_int32(REGISTER_SYCL_KERNEL);
+TF_CALL_int64(REGISTER_SYCL_KERNEL);
+#endif // TENSORFLOW_USE_SYCL
TF_CALL_float(REGISTER_CPU_KERNEL);
TF_CALL_double(REGISTER_CPU_KERNEL);
diff --git a/tensorflow/core/kernels/shape_ops.cc b/tensorflow/core/kernels/shape_ops.cc
index 165fc64a84..cce5c3adb0 100644
--- a/tensorflow/core/kernels/shape_ops.cc
+++ b/tensorflow/core/kernels/shape_ops.cc
@@ -63,6 +63,40 @@ REGISTER_KERNEL_BUILDER(Name("Shape")
.TypeConstraint<int64>("out_type"),
ShapeOp<int64>);
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Shape") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("output") \
+ .TypeConstraint<int32>("out_type") \
+ .TypeConstraint<type>("T"), \
+ ShapeOp<int32>); \
+ REGISTER_KERNEL_BUILDER(Name("Shape") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("output") \
+ .TypeConstraint<int64>("out_type") \
+ .TypeConstraint<type>("T"), \
+ ShapeOp<int64>);
+
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+#undef REGISTER_SYCL_KERNEL
+
+REGISTER_KERNEL_BUILDER(Name("Shape")
+ .Device(DEVICE_SYCL)
+ .HostMemory("input")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int32>("out_type"),
+ ShapeOp<int32>);
+REGISTER_KERNEL_BUILDER(Name("Shape")
+ .Device(DEVICE_SYCL)
+ .HostMemory("input")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int64>("out_type"),
+ ShapeOp<int64>);
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Shape") \
@@ -193,6 +227,34 @@ class RankOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("Rank").Device(DEVICE_CPU).HostMemory("output"),
RankOp);
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Rank") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .HostMemory("output"), \
+ RankOp);
+REGISTER_SYCL_KERNEL(float);
+#undef REGISTER_SYCL_KERNEL
+
+// A special GPU kernel for int32 and bool.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Rank")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .HostMemory("input")
+ .HostMemory("output"),
+ RankOp);
+
+REGISTER_KERNEL_BUILDER(Name("Rank")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<bool>("T")
+ .HostMemory("input")
+ .HostMemory("output"),
+ RankOp);
+#endif // TENSORFLOW_USE_SYCL
+
#if GOOGLE_CUDA
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Rank") \
diff --git a/tensorflow/core/kernels/training_ops.cc b/tensorflow/core/kernels/training_ops.cc
index 600c54bdd2..4c2caa435f 100644
--- a/tensorflow/core/kernels/training_ops.cc
+++ b/tensorflow/core/kernels/training_ops.cc
@@ -25,6 +25,7 @@ namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
+using SYCLDevice = Eigen::SyclDevice;
namespace {
template <class T>
@@ -220,9 +221,9 @@ struct ApplyMomentum<CPUDevice, T> {
}
};
-template <typename T>
-struct ApplyAdam<CPUDevice, T> {
- void operator()(const CPUDevice& d, typename TTypes<T>::Flat var,
+template <typename Device, typename T>
+struct ApplyAdamNonCuda {
+ void operator()(const Device& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat m, typename TTypes<T>::Flat v,
typename TTypes<T>::ConstScalar beta1_power,
typename TTypes<T>::ConstScalar beta2_power,
@@ -240,6 +241,12 @@ struct ApplyAdam<CPUDevice, T> {
};
template <typename T>
+struct ApplyAdam<CPUDevice, T> : ApplyAdamNonCuda<CPUDevice, T> {};
+template <typename T>
+struct ApplyAdam<SYCLDevice, T> : ApplyAdamNonCuda<SYCLDevice, T> {};
+
+
+template <typename T>
struct ApplyRMSProp<CPUDevice, T> {
void operator()(const CPUDevice& d, typename TTypes<T>::Flat var,
typename TTypes<T>::Flat ms, typename TTypes<T>::Flat mom,
@@ -2139,6 +2146,12 @@ TF_CALL_half(REGISTER_CPU_KERNELS);
TF_CALL_float(REGISTER_CPU_KERNELS);
TF_CALL_double(REGISTER_CPU_KERNELS);
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNELS(T) REGISTER_KERNELS(SYCL, T);
+
+TF_CALL_float(REGISTER_SYCL_KERNELS);
+#endif
+
#if GOOGLE_CUDA
// Forward declarations of the functor specializations for GPU.
namespace functor {
diff --git a/tensorflow/core/ops/math_grad_test.cc b/tensorflow/core/ops/math_grad_test.cc
index b3b523001b..1fc5a1b128 100644
--- a/tensorflow/core/ops/math_grad_test.cc
+++ b/tensorflow/core/ops/math_grad_test.cc
@@ -388,6 +388,9 @@ class TestOp : public OpKernel {
void Compute(OpKernelContext* ctx) override { ctx->set_output(0, Tensor()); }
};
REGISTER_KERNEL_BUILDER(Name("TestOpWithNoGrad").Device(DEVICE_CPU), TestOp);
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name("TestOpWithNoGrad").Device(DEVICE_SYCL), TestOp);
+#endif // TENSORFLOW_USE_SYCL
TEST_F(MathGradTest, Error_Reporting) {
auto x = test::AsTensor<float>({-3.f});
diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc
index 6a1c285dbf..4031521d46 100644
--- a/tensorflow/core/ops/math_ops.cc
+++ b/tensorflow/core/ops/math_ops.cc
@@ -1268,17 +1268,18 @@ Status ArgOpShape(shape_inference::InferenceContext* c) {
dimension_val = dim_t->scalar<int64>()();
}
- if (dimension_val < 0 || dimension_val >= input_rank) {
+ int64 axis = dimension_val < 0 ? dimension_val + input_rank : dimension_val;
+ if (axis < 0 || axis >= input_rank) {
return errors::InvalidArgument("Dimension (", dimension_val,
- ") must be in the range [0, ", input_rank,
- "), where ", input_rank, " is the ",
- "number of dimensions in the input.");
+ ") must be in the range [", -input_rank,
+ ", ", input_rank, "), where ", input_rank,
+ " is the number of dimensions in the input.");
}
// Return the input shape without the dimension being reduced.
std::vector<DimensionHandle> dims;
for (int i = 0; i < input_rank; ++i) {
- if (dimension_val != i) {
+ if (axis != i) {
dims.emplace_back(c->Dim(input_shape, i));
}
}
diff --git a/tensorflow/core/ops/math_ops_test.cc b/tensorflow/core/ops/math_ops_test.cc
index 6a1cc8e7eb..84264f13dc 100644
--- a/tensorflow/core/ops/math_ops_test.cc
+++ b/tensorflow/core/ops/math_ops_test.cc
@@ -450,11 +450,15 @@ TEST(MathOpsTest, ArgOps_ShapeFn) {
// Dimension value out of bounds
dimension = test::AsScalar(10);
op.input_tensors[1] = &dimension;
- INFER_ERROR("must be in the range [0, 3)", op, "[2,3,4];[]");
+ INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
dimension = test::AsScalar(-10);
op.input_tensors[1] = &dimension;
- INFER_ERROR("must be in the range [0, 3)", op, "[2,3,4];[]");
+ INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
+
+ dimension = test::AsScalar(-1);
+ op.input_tensors[1] = &dimension;
+ INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]");
}
TEST(MathOpsTest, Betainc_ShapeFn) {
diff --git a/tensorflow/core/platform/cpu_info.h b/tensorflow/core/platform/cpu_info.h
index 706dc4dcc5..13e89e7b7b 100644
--- a/tensorflow/core/platform/cpu_info.h
+++ b/tensorflow/core/platform/cpu_info.h
@@ -16,6 +16,10 @@ limitations under the License.
#ifndef TENSORFLOW_PLATFORM_CPU_INFO_H_
#define TENSORFLOW_PLATFORM_CPU_INFO_H_
+#if defined(PLATFORM_WINDOWS)
+#include "tensorflow/core/platform/windows/cpu_info.h"
+#endif
+
namespace tensorflow {
namespace port {
diff --git a/tensorflow/python/lib/core/status_helper.i b/tensorflow/core/platform/windows/cpu_info.h
index d15b53dc99..44c0059d63 100644
--- a/tensorflow/python/lib/core/status_helper.i
+++ b/tensorflow/core/platform/windows/cpu_info.h
@@ -1,4 +1,4 @@
-/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,19 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// SWIG test helper for lib::tensorflow::Status
+#ifndef TENSORFLOW_PLATFORM_WINDOWS_CPU_INFO_H_
+#define TENSORFLOW_PLATFORM_WINDOWS_CPU_INFO_H_
-%include "tensorflow/python/platform/base.i"
-%import(module="tensorflow.python.pywrap_tensorflow") "tensorflow/python/lib/core/status.i"
+// Byte order defines provided by gcc. MSVC doesn't define those so
+// we define them here.
+// We assume that all windows platform out there are little endian.
+#define __ORDER_LITTLE_ENDIAN__ 0x4d2
+#define __ORDER_BIG_ENDIAN__ 0x10e1
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
-%inline %{
-#include "tensorflow/core/lib/core/status.h"
-
-tensorflow::Status NotOkay() {
- return tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, "Testing 1 2 3");
-}
-
-tensorflow::Status Okay() {
- return tensorflow::Status();
-}
-%}
+#endif // TENSORFLOW_PLATFORM_WINDOWS_CPU_INFO_H_
diff --git a/tensorflow/examples/tutorials/monitors/iris_monitors.py b/tensorflow/examples/tutorials/monitors/iris_monitors.py
index e2a46baf48..041592b9b0 100644
--- a/tensorflow/examples/tutorials/monitors/iris_monitors.py
+++ b/tensorflow/examples/tutorials/monitors/iris_monitors.py
@@ -21,6 +21,7 @@ import os
import numpy as np
import tensorflow as tf
+from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
tf.logging.set_verbosity(tf.logging.INFO)
@@ -65,6 +66,26 @@ def main(unused_argv):
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
+ validation_metrics = {
+ "accuracy": MetricSpec(
+ metric_fn=tf.contrib.metrics.streaming_accuracy,
+ prediction_key="classes"),
+ "recall": MetricSpec(
+ metric_fn=tf.contrib.metrics.streaming_recall,
+ prediction_key="classes"),
+ "precision": MetricSpec(
+ metric_fn=tf.contrib.metrics.streaming_precision,
+ prediction_key="classes")
+ }
+ validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
+ test_set.data,
+ test_set.target,
+ every_n_steps=50,
+ metrics=validation_metrics,
+ early_stopping_metric="loss",
+ early_stopping_metric_minimize=True,
+ early_stopping_rounds=200)
+
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnv.md b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
index 236ffdeeb2..9f554aa3aa 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnv.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
@@ -186,13 +186,13 @@ Renames file src to target. If target already exists, it will be replaced.
#### `virtual uint64 tensorflow::Env::NowMicros()=0` {#virtual_uint64_tensorflow_Env_NowMicros}
-Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
+Returns the number of micro-seconds since the Unix epoch.
#### `virtual uint64 tensorflow::Env::NowSeconds()` {#virtual_uint64_tensorflow_Env_NowSeconds}
-Returns the number of seconds since some fixed point in time. Only useful for computing deltas of time.
+Returns the number of seconds since the Unix epoch.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
index 70462b7eb8..e367f5f042 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
@@ -50,7 +50,7 @@ Returns true if the path matches the given pattern. The wildcards allowed in pat
#### `uint64 tensorflow::EnvWrapper::NowMicros() override` {#uint64_tensorflow_EnvWrapper_NowMicros}
-Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
+Returns the number of micro-seconds since the Unix epoch.
diff --git a/tensorflow/g3doc/api_docs/leftnav_files b/tensorflow/g3doc/api_docs/leftnav_files
index ac84f79dad..b1cf343c0b 100644
--- a/tensorflow/g3doc/api_docs/leftnav_files
+++ b/tensorflow/g3doc/api_docs/leftnav_files
@@ -14,7 +14,6 @@ python/sparse_ops.md
python/io_ops.md
python/python_io.md
python/nn.md
-python/rnn_cell.md
python/client.md
python/train.md
python/histogram_ops.md
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
index 788d2066ad..8dc62c4c18 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
@@ -1,8 +1,252 @@
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
index a64640478f..1badf0e5c5 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.all_variables.md
@@ -2,7 +2,7 @@
See `tf.global_variables`. (deprecated)
-THIS FUNCTION IS DEPRECATED. It will be removed after 2016-03-02.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
Instructions for updating:
Please use tf.global_variables instead.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
new file mode 100644
index 0000000000..bf17320a5a
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
@@ -0,0 +1,17 @@
+### `tf.merge_all_summaries(*args, **kwargs)` {#merge_all_summaries}
+
+Merges all summaries collected in the default graph. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge_all.
+
+ Args:
+ key: `GraphKey` used to collect the summaries. Defaults to
+ `GraphKeys.SUMMARIES`.
+
+ Returns:
+ If no summaries were collected, returns None. Otherwise returns a scalar
+ `Tensor` of type `string` containing the serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
new file mode 100644
index 0000000000..6220d3641b
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
@@ -0,0 +1,49 @@
+### `tf.image_summary(*args, **kwargs)` {#image_summary}
+
+Outputs a `Summary` protocol buffer with images. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.image. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.
+
+ The summary has up to `max_images` summary values containing images. The
+ images are built from `tensor` which must be 4-D with shape `[batch_size,
+ height, width, channels]` and where `channels` can be:
+
+ * 1: `tensor` is interpreted as Grayscale.
+ * 3: `tensor` is interpreted as RGB.
+ * 4: `tensor` is interpreted as RGBA.
+
+ The images have the same number of channels as the input tensor. For float
+ input, the values are normalized one image at a time to fit in the range
+ `[0, 255]`. `uint8` values are unchanged. The op uses two different
+ normalization algorithms:
+
+ * If the input values are all positive, they are rescaled so the largest one
+ is 255.
+
+ * If any input value is negative, the values are shifted so input value 0.0
+ is at 127. They are then rescaled so that either the smallest value is 0,
+ or the largest one is 255.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_images` is 1, the summary value tag is '*tag*/image'.
+ * If `max_images` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
+ width, channels]` where `channels` is 1, 3, or 4.
+ max_images: Max number of batch elements to generate images for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
index 19532f7cc3..bce704ef4f 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
@@ -1,8 +1,245 @@
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
index ae38c9fe0a..598827ea70 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
@@ -175,125 +175,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -314,49 +195,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -440,11 +282,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -457,30 +298,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -552,13 +369,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -589,33 +399,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -653,20 +436,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -741,38 +510,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -797,71 +534,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -869,79 +541,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -962,26 +561,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1033,51 +612,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1100,13 +634,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1166,9 +693,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1222,13 +749,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
@@ -1243,20 +763,6 @@ Return any properties that the user has recorded.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1280,18 +786,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
new file mode 100644
index 0000000000..3ffd9260c7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
@@ -0,0 +1,22 @@
+### `tf.scalar_summary(*args, **kwargs)` {#scalar_summary}
+
+Outputs a `Summary` protocol buffer with scalar values. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
+
+ The input `tags` and `values` must have the same shape. The generated
+ summary has a summary value for each tag-value pair in `tags` and `values`.
+
+ Args:
+ tags: A `string` `Tensor`. Tags for the summaries.
+ values: A real numeric Tensor. Values for the summaries.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
new file mode 100644
index 0000000000..3cfd7103d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
new file mode 100644
index 0000000000..570d7b712c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
@@ -0,0 +1,26 @@
+### `tf.histogram_summary(*args, **kwargs)` {#histogram_summary}
+
+Outputs a `Summary` protocol buffer with a histogram. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
+
+ The generated
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ has one summary value containing a histogram for `values`.
+
+ This op reports an `InvalidArgument` error if any value is not finite.
+
+ Args:
+ tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
+ values: A real numeric `Tensor`. Any shape. Values to use to
+ build the histogram.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
new file mode 100644
index 0000000000..ccb984f5ab
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
@@ -0,0 +1,27 @@
+### `tf.merge_summary(*args, **kwargs)` {#merge_summary}
+
+Merges summaries. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge.
+
+ This op creates a
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ protocol buffer that contains the union of all the values in the input
+ summaries.
+
+ When the Op is run, it reports an `InvalidArgument` error if multiple values
+ in the summaries to merge use the same tag.
+
+ Args:
+ inputs: A list of `string` `Tensor` objects containing serialized `Summary`
+ protocol buffers.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
new file mode 100644
index 0000000000..24a3b3f10c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
new file mode 100644
index 0000000000..f2d0c042d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
new file mode 100644
index 0000000000..e9bdda200f
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
@@ -0,0 +1,207 @@
+
+- - -
+
+#### `tf.train.SummaryWriter.__init__(*args, **kwargs)` {#SummaryWriter.__init__}
+
+Creates a `SummaryWriter` and an event file. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.
+
+ This class is deprecated, and should be replaced with tf.summary.FileWriter.
+
+ On construction the summary writer creates a new event file in `logdir`.
+ This event file will contain `Event` protocol buffers constructed when you
+ call one of the following functions: `add_summary()`, `add_session_log()`,
+ `add_event()`, or `add_graph()`.
+
+ If you pass a `Graph` to the constructor it is added to
+ the event file. (This is equivalent to calling `add_graph()` later).
+
+ TensorBoard will pick the graph from the file and display it graphically so
+ you can interactively explore the graph you built. You will usually pass
+ the graph from the session in which you launched it:
+
+ ```python
+ ...create a graph...
+ # Launch the graph in a session.
+ sess = tf.Session()
+ # Create a summary writer, add the 'graph' to the event file.
+ writer = tf.train.SummaryWriter(<some-directory>, sess.graph)
+ ```
+
+ The other arguments to the constructor control the asynchronous writes to
+ the event file:
+
+ * `flush_secs`: How often, in seconds, to flush the added summaries
+ and events to disk.
+ * `max_queue`: Maximum number of summaries or events pending to be
+ written to disk before one of the 'add' calls block.
+
+ Args:
+ logdir: A string. Directory where event file will be written.
+ graph: A `Graph` object, such as `sess.graph`.
+ max_queue: Integer. Size of the queue for pending events and summaries.
+ flush_secs: Number. How often, in seconds, to flush the
+ pending events and summaries to disk.
+ graph_def: DEPRECATED: Use the `graph` argument instead.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_event(event)` {#SummaryWriter.add_event}
+
+Adds an event to the event file.
+
+##### Args:
+
+
+* <b>`event`</b>: An `Event` protocol buffer.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_graph(graph, global_step=None, graph_def=None)` {#SummaryWriter.add_graph}
+
+Adds a `Graph` to the event file.
+
+The graph described by the protocol buffer will be displayed by
+TensorBoard. Most users pass a graph in the constructor instead.
+
+##### Args:
+
+
+* <b>`graph`</b>: A `Graph` object, such as `sess.graph`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+* <b>`graph_def`</b>: DEPRECATED. Use the `graph` parameter instead.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If both graph and graph_def are passed to the method.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_meta_graph(meta_graph_def, global_step=None)` {#SummaryWriter.add_meta_graph}
+
+Adds a `MetaGraphDef` to the event file.
+
+The `MetaGraphDef` allows running the given graph via
+`saver.import_meta_graph()`.
+
+##### Args:
+
+
+* <b>`meta_graph_def`</b>: A `MetaGraphDef` object, often as retured by
+ `saver.export_meta_graph()`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+
+##### Raises:
+
+
+* <b>`TypeError`</b>: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_run_metadata(run_metadata, tag, global_step=None)` {#SummaryWriter.add_run_metadata}
+
+Adds a metadata information for a single session.run() call.
+
+##### Args:
+
+
+* <b>`run_metadata`</b>: A `RunMetadata` protobuf object.
+* <b>`tag`</b>: The tag name for this metadata.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ StepStats.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If the provided tag was already used for this type of event.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_session_log(session_log, global_step=None)` {#SummaryWriter.add_session_log}
+
+Adds a `SessionLog` protocol buffer to the event file.
+
+This method wraps the provided session in an `Event` protocol buffer
+and adds it to the event file.
+
+##### Args:
+
+
+* <b>`session_log`</b>: A `SessionLog` protocol buffer.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` {#SummaryWriter.add_summary}
+
+Adds a `Summary` protocol buffer to the event file.
+
+This method wraps the provided summary in an `Event` protocol buffer
+and adds it to the event file.
+
+You can pass the result of evaluating any summary op, using
+[`Session.run()`](client.md#Session.run) or
+[`Tensor.eval()`](framework.md#Tensor.eval), to this
+function. Alternatively, you can pass a `tf.Summary` protocol
+buffer that you populate with your own data. The latter is
+commonly done to report evaluation results in event files.
+
+##### Args:
+
+
+* <b>`summary`</b>: A `Summary` protocol buffer, optionally serialized as a string.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.close()` {#SummaryWriter.close}
+
+Flushes the event file to disk and close the file.
+
+Call this method when you do not need the summary writer anymore.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.flush()` {#SummaryWriter.flush}
+
+Flushes the event file to disk.
+
+Call this method to make sure that all pending events have been written to
+disk.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.get_logdir()` {#SummaryWriter.get_logdir}
+
+Returns the directory where event file will be written.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.reopen()` {#SummaryWriter.reopen}
+
+Reopens the EventFileWriter.
+
+Can be called after `close()` to add more events in the same directory.
+The events will go into a new events file.
+
+Does nothing if the EventFileWriter was not closed.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.summary.merge.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.summary.merge.md
index 17ecbd0654..5a7bd8a0f5 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.summary.merge.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.summary.merge.md
@@ -16,7 +16,7 @@ in the summaries to merge use the same tag.
* <b>`inputs`</b>: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
* <b>`collections`</b>: Optional list of graph collections keys. The new summary op is
- added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ added to these collections. Defaults to `[]`.
* <b>`name`</b>: A name for the operation (optional).
##### Returns:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
new file mode 100644
index 0000000000..c5830ab550
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
@@ -0,0 +1,37 @@
+### `tf.audio_summary(*args, **kwargs)` {#audio_summary}
+
+Outputs a `Summary` protocol buffer with audio. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.audio. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.
+
+ The summary has up to `max_outputs` summary values containing audio. The
+ audio is built from `tensor` which must be 3-D with shape `[batch_size,
+ frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+ assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
+ `sample_rate`.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+ * If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
+ or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
+ sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
+ signal in hertz.
+ max_outputs: Max number of batch elements to generate audio for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
new file mode 100644
index 0000000000..613f4ebd73
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/state_ops.md b/tensorflow/g3doc/api_docs/python/state_ops.md
index 2c4ba9fa27..109f15494e 100644
--- a/tensorflow/g3doc/api_docs/python/state_ops.md
+++ b/tensorflow/g3doc/api_docs/python/state_ops.md
@@ -3444,7 +3444,7 @@ device assignments have not changed.
See `tf.global_variables`. (deprecated)
-THIS FUNCTION IS DEPRECATED. It will be removed after 2016-03-02.
+THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02.
Instructions for updating:
Please use tf.global_variables instead.
diff --git a/tensorflow/g3doc/api_docs/python/summary.md b/tensorflow/g3doc/api_docs/python/summary.md
index 208153b3c2..7fb3485ded 100644
--- a/tensorflow/g3doc/api_docs/python/summary.md
+++ b/tensorflow/g3doc/api_docs/python/summary.md
@@ -424,7 +424,7 @@ in the summaries to merge use the same tag.
* <b>`inputs`</b>: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
* <b>`collections`</b>: Optional list of graph collections keys. The new summary op is
- added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ added to these collections. Defaults to `[]`.
* <b>`name`</b>: A name for the operation (optional).
##### Returns:
@@ -487,11 +487,248 @@ metadata is stored in its NodeDef. This method retrieves the description.
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
- - -
@@ -500,9 +737,253 @@ Support the pickle protocol.
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/test.md b/tensorflow/g3doc/api_docs/python/test.md
index ef6aa29d87..81fd594cf1 100644
--- a/tensorflow/g3doc/api_docs/python/test.md
+++ b/tensorflow/g3doc/api_docs/python/test.md
@@ -215,125 +215,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -354,49 +235,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -480,11 +322,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -497,30 +338,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -592,13 +409,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -629,33 +439,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -693,20 +476,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -781,38 +550,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -837,71 +574,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -909,79 +581,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -1002,26 +601,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1073,51 +652,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1140,13 +674,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1206,9 +733,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1262,13 +789,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
@@ -1283,20 +803,6 @@ Return any properties that the user has recorded.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1320,18 +826,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index eb7503dbb6..a95c9853bc 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -82,37 +82,37 @@ of the binary on Linux or Mac, you can follow these instructions:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
```
Install TensorFlow:
@@ -136,24 +136,32 @@ You can now [test your installation](#test-the-tensorflow-installation).
### Pip installation on Windows
-TensorFlow supports only 64-bit Python 3.5 on Windows. We have tested
-the pip packages with the following distributions of Python:
+TensorFlow supports only 64-bit Python 3.5 on Windows. We have tested the pip packages
+with the following distributions of Python:
-* [Python 3.5 from python.org](https://www.python.org/downloads/release/python-352/)
* [Python 3.5 from Anaconda](https://www.continuum.io/downloads#windows)
+* [Python 3.5 from python.org](https://www.python.org/downloads/release/python-352/).
+
+ NOTE: TensorFlow requires `MSVCP140.DLL`, which may not be installed on your system.
+ If, when you `import tensorflow as tf`, you see an error about `No module named
+ "_pywrap_tensorflow"` and/or `DLL load failed`, check whether `MSVCP140.DLL` is in
+ your `%PATH%` and, if not, you should install the [Visual C++ 2015
+ redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=53587)
+ (x64 version).
+
Both distributions include pip. To install the CPU-only version of
TensorFlow, enter the following command at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.0rc0-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.0rc1-cp35-cp35m-win_amd64.whl
```
To install the GPU version of TensorFlow, enter the following command
at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.0rc0-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-win_amd64.whl
```
You can now [test your installation](#test-the-tensorflow-installation).
@@ -208,37 +216,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
```
Finally install TensorFlow:
@@ -360,37 +368,37 @@ select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
```
Finally install TensorFlow:
@@ -458,7 +466,7 @@ code.
code.
We also have tags with `latest` replaced by a released version (e.g.,
-`0.12.0-rc0-gpu`).
+`0.12.0-rc1-gpu`).
With Docker the installation is as follows:
@@ -860,7 +868,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.0rc0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.0rc1-py2-none-any.whl
```
## Optimizing CPU performance
@@ -1083,6 +1091,27 @@ Exception:
Solution: Add an `--ignore-installed` flag to the pip command.
+#### Cannot remove entries from nonexistent file: easy-install.pth
+
+If during a `pip` installation using an Anaconda Python distribution you encounter the error:
+
+```
+Cannot remove entries from nonexistent file <path-to-anaconda-instalation>/anaconda[version]/lib/site-packages/easy-install.pth
+```
+
+1. Upgrade setuptools:
+`pip install --upgrade -I setuptools`
+
+2. Install TensorFlow again adding `--ignore-installed` flag:
+`pip install --ignore-installed --upgrade <tensorflow_url>`
+
+
+
+Step #1 might already solve the problem, however if it still persists, execute step #2.
+
+This issue occurs with new Anaconda installations when `pip` tries to remove `easy-install.pth`.
+This file is not included in Anaconda packages, which causes the `pip` installation to fail.
+
### Linux issues
diff --git a/tensorflow/g3doc/how_tos/tool_developers/index.md b/tensorflow/g3doc/how_tos/tool_developers/index.md
index 0573af7a05..bcbd21b68c 100644
--- a/tensorflow/g3doc/how_tos/tool_developers/index.md
+++ b/tensorflow/g3doc/how_tos/tool_developers/index.md
@@ -34,11 +34,7 @@ definitions. If you see a standalone TensorFlow file representing a model, it's
likely to contain a serialized version of one of these `GraphDef` objects
saved out by the protobuf code.
-This generated code is used to save and load the GraphDef files from disk. A
-good example to look at as we dig into this is
-[graph_metrics.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/graph_metrics.py). This Python script takes a saved graph
-definition, and analyzes the model to estimate performance and resource
-statistics. The code that actually loads the model looks like this:
+This generated code is used to save and load the GraphDef files from disk. The code that actually loads the model looks like this:
```python
graph_def = graph_pb2.GraphDef()
@@ -67,7 +63,7 @@ There are actually two different formats that a ProtoBuf can be saved in.
TextFormat is a human-readable form, which makes it nice for debugging and
editing, but can get large when there's numerical data like weights stored in
it. You can see a small example of that in
-[graph_run_run2.pbtxt](https://github.com/tensorflow/tensorflow/blob/ae3c8479f88da1cd5636b974f653f27755cb0034/tensorflow/tensorboard/components/tf_tensorboard/test/data/graph_run_run2.pbtxt).
+[graph_run_run2.pbtxt](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tensorboard/components/tf_tensorboard/test/data/graph_run_run2.pbtxt).
Binary format files are a lot smaller than their text equivalents, even though
they're not as readable for us. In this script, we ask the user to supply a
diff --git a/tensorflow/g3doc/resources/bib.md b/tensorflow/g3doc/resources/bib.md
index acfdd18767..907f06161e 100644
--- a/tensorflow/g3doc/resources/bib.md
+++ b/tensorflow/g3doc/resources/bib.md
@@ -69,3 +69,19 @@ Yuan Yu, and Xiaoqiang Zheng.
TensorFlow: Large-scale machine learning on heterogeneous systems,
2015. Software available from tensorflow.org.
```
+
+If you use [TF.Learn](https://www.tensorflow.org/tutorials/tflearn/) in your research and would like to cite it, we suggest you cite the [whitepaper](https://arxiv.org/abs/1612.04251):
+
+```
+@article{tang2016tflearn,
+ title={TF.Learn: TensorFlow's High-level Module for Distributed Machine Learning},
+ author={Tang, Yuan},
+ journal={arXiv preprint arXiv:1612.04251},
+ year={2016}
+}
+```
+
+In textual form:
+```
+Tang, Yuan. "TF.Learn: TensorFlow's High-level Module for Distributed Machine Learning." arXiv preprint arXiv:1612.04251 (2016).
+``` \ No newline at end of file
diff --git a/tensorflow/g3doc/resources/index.md b/tensorflow/g3doc/resources/index.md
index b4dc63bb38..c56998523b 100644
--- a/tensorflow/g3doc/resources/index.md
+++ b/tensorflow/g3doc/resources/index.md
@@ -1,12 +1,17 @@
# Additional Resources
-## TensorFlow WhitePaper
+## TensorFlow WhitePapers
Additional details about the TensorFlow programming model and the underlying
implementation can be found in our white paper:
* [TensorFlow: Large-scale machine learning on heterogeneous systems](http://download.tensorflow.org/paper/whitepaper2015.pdf)
+
+A white paper is also available for [TF.Learn](https://www.tensorflow.org/tutorials/tflearn/):
+
+* [TF.Learn: TensorFlow's High-level Module for Distributed Machine Learning](https://arxiv.org/abs/1612.04251)
+
### Citation
If you use TensorFlow in your research and would like to cite the TensorFlow
diff --git a/tensorflow/g3doc/tutorials/estimators/index.md b/tensorflow/g3doc/tutorials/estimators/index.md
index 2fd1a8795c..46a0cf87a1 100644
--- a/tensorflow/g3doc/tutorials/estimators/index.md
+++ b/tensorflow/g3doc/tutorials/estimators/index.md
@@ -152,6 +152,8 @@ def maybe_download():
print("Training data is downloaded to %s" % train_file_name)
if FLAGS.test_data:
+ test_file_name = FLAGS.test_data
+ else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.urlretrieve("http://download.tensorflow.org/data/abalone_test.csv", test_file.name)
test_file_name = test_file.name
@@ -379,7 +381,7 @@ tf.contrib.layers provides the following convenience functions for constructing
fully connected layers:
* `relu(inputs, num_outputs)`. Create a layer of `num_outputs` nodes fully
- connected to the previous layer `inputs` with a [ReLu activation
+ connected to the previous layer `inputs` with a [ReLU activation
function](https://en.wikipedia.org/wiki/Rectifier_\(neural_networks\))
([tf.nn.relu](../../api_docs/python/nn.md#relu)):
@@ -388,7 +390,7 @@ fully connected layers:
```
* `relu6(inputs, num_outputs)`. Create a layer of `num_outputs` nodes fully
- connected to the previous layer `hidden_layer` with a ReLu 6 activation
+ connected to the previous layer `hidden_layer` with a ReLU 6 activation
function ([tf.nn.relu6](../../api_docs/python/nn.md#relu6)):
```python
@@ -448,7 +450,7 @@ def model_fn(features, targets, mode, params):
Here, because you'll be passing the abalone `Datasets` directly to `fit()`,
`evaluate()`, and `predict()` via `x` and `y` arguments, the input layer is the
`features` `Tensor` passed to the `model_fn`. The network contains two hidden
-layers, each with 10 nodes and a ReLu activation function. The output layer
+layers, each with 10 nodes and a ReLU activation function. The output layer
contains no activation function, and is
[reshaped](../../api_docs/python/array_ops.md#reshape) to a one-dimensional
tensor to capture the model's predictions, which are stored in
diff --git a/tensorflow/g3doc/tutorials/mnist/tf/index.md b/tensorflow/g3doc/tutorials/mnist/tf/index.md
index 4f376c3e3d..3c7a4ba697 100644
--- a/tensorflow/g3doc/tutorials/mnist/tf/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/tf/index.md
@@ -98,7 +98,7 @@ The `inference()` function builds the graph as far as needed to
return the tensor that would contain the output predictions.
It takes the images placeholder as input and builds on top
-of it a pair of fully connected layers with [ReLu](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) activation followed by a ten
+of it a pair of fully connected layers with [ReLU](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) activation followed by a ten
node linear layer specifying the output logits.
Each layer is created beneath a unique [`tf.name_scope`](../../../api_docs/python/framework.md#name_scope)
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index 3f8af14118..98666c2e62 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -13,6 +13,7 @@ exports_files(["LICENSE"])
load("//tensorflow:tensorflow.bzl", "if_not_windows")
load("//tensorflow:tensorflow.bzl", "tf_cuda_library")
load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
+load("//tensorflow:tensorflow.bzl", "py_test")
load("//tensorflow:tensorflow.bzl", "tf_py_test")
load("//tensorflow:tensorflow.bzl", "py_tests")
load("//tensorflow:tensorflow.bzl", "tf_py_wrap_cc")
diff --git a/tensorflow/python/client/device_lib.i b/tensorflow/python/client/device_lib.i
index ee3230eb1f..51c04584a5 100644
--- a/tensorflow/python/client/device_lib.i
+++ b/tensorflow/python/client/device_lib.i
@@ -37,7 +37,6 @@ static std::vector<string> ListDevices(TF_Status* out_status) {
std::vector<std::unique_ptr<Device>> device_holder(devices.begin(), devices.end());
-
for (const Device* device : devices) {
const DeviceAttributes& attr = device->attributes();
string attr_serialized;
diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD
index 5b9945a2fd..54e5e5072b 100644
--- a/tensorflow/python/debug/BUILD
+++ b/tensorflow/python/debug/BUILD
@@ -14,6 +14,7 @@ licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
+load("//tensorflow:tensorflow.bzl", "py_test")
py_library(
name = "debug_py",
diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py
index a91223749d..78a77d38ed 100644
--- a/tensorflow/python/framework/dtypes.py
+++ b/tensorflow/python/framework/dtypes.py
@@ -59,6 +59,7 @@ class DType(object):
@@name
@@base_dtype
@@real_dtype
+ @@is_bool
@@is_floating
@@is_complex
@@is_integer
@@ -142,6 +143,11 @@ class DType(object):
return self._type_enum
@property
+ def is_bool(self):
+ """Returns whether this is a boolean data type"""
+ return self.base_dtype == bool
+
+ @property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
diff --git a/tensorflow/python/kernel_tests/argmax_op_test.py b/tensorflow/python/kernel_tests/argmax_op_test.py
index c24078a35a..1ad3dba2ae 100644
--- a/tensorflow/python/kernel_tests/argmax_op_test.py
+++ b/tensorflow/python/kernel_tests/argmax_op_test.py
@@ -54,7 +54,7 @@ class ArgMaxTest(tf.test.TestCase):
x = np.asarray(100*np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all dimensions
- for dim in range(5):
+ for dim in range(-5, 5):
self._testBothArg(tf.argmax, x, dim, x.argmax(dim))
self._testBothArg(tf.argmin, x, dim, x.argmin(dim))
diff --git a/tensorflow/python/kernel_tests/atrous_convolution_test.py b/tensorflow/python/kernel_tests/atrous_convolution_test.py
index 3bd076b5bb..b52b648f79 100644
--- a/tensorflow/python/kernel_tests/atrous_convolution_test.py
+++ b/tensorflow/python/kernel_tests/atrous_convolution_test.py
@@ -100,8 +100,8 @@ class AtrousConvolutionTest(tf.test.TestCase):
dilation_rate=[rate])
def testAtrousConvolutionNC(self):
- if tf.test.is_gpu_available():
- # "NCW" and "NCHW" formats are not currently supported on CPU.
+ if tf.test.is_gpu_available(cuda_only=True):
+ # "NCW" and "NCHW" formats are currently supported only on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test_atrous_convolution(
diff --git a/tensorflow/python/kernel_tests/bias_op_test.py b/tensorflow/python/kernel_tests/bias_op_test.py
index 862948610d..4c8ff58af0 100644
--- a/tensorflow/python/kernel_tests/bias_op_test.py
+++ b/tensorflow/python/kernel_tests/bias_op_test.py
@@ -29,8 +29,8 @@ def GetTestConfigs():
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
- if tf.test.is_gpu_available():
- # "NCHW" format is not currently supported on CPU.
+ if tf.test.is_gpu_available(cuda_only=True):
+ # "NCHW" format is currently only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
@@ -89,7 +89,7 @@ class BiasAddTest(tf.test.TestCase):
self._testBias(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
def testInputDims(self):
diff --git a/tensorflow/python/kernel_tests/conv2d_transpose_test.py b/tensorflow/python/kernel_tests/conv2d_transpose_test.py
index 77f783d915..d260660e10 100644
--- a/tensorflow/python/kernel_tests/conv2d_transpose_test.py
+++ b/tensorflow/python/kernel_tests/conv2d_transpose_test.py
@@ -159,8 +159,8 @@ class Conv2DTransposeTest(tf.test.TestCase):
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
- # `NCHW` data fomat is only supported for `GPU` device.
- if tf.test.is_gpu_available():
+ # `NCHW` data fomat is only supported for CUDA device.
+ if tf.test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 1, 1]
@@ -192,8 +192,8 @@ class Conv2DTransposeTest(tf.test.TestCase):
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
- # `NCHW` data fomat is only supported for `GPU` device.
- if tf.test.is_gpu_available():
+ # `NCHW` data fomat is only supported for CUDA device.
+ if tf.test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
@@ -226,8 +226,8 @@ class Conv2DTransposeTest(tf.test.TestCase):
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
- # `NCHW` data fomat is only supported for `GPU` device.
- if tf.test.is_gpu_available():
+ # `NCHW` data fomat is only supported for CUDA device.
+ if tf.test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
strides = [1, 1, 2, 2]
diff --git a/tensorflow/python/kernel_tests/pool_test.py b/tensorflow/python/kernel_tests/pool_test.py
index 8136efe936..79450c3f70 100644
--- a/tensorflow/python/kernel_tests/pool_test.py
+++ b/tensorflow/python/kernel_tests/pool_test.py
@@ -232,8 +232,8 @@ class PoolingTest(tf.test.TestCase):
strides=strides)
def testPoolNC(self):
- if tf.test.is_gpu_available():
- # "NC*" format is not currently supported on CPU.
+ if tf.test.is_gpu_available(cuda_only=True):
+ # "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(input_shape=[2, 2, 9],
diff --git a/tensorflow/python/kernel_tests/seq2seq_test.py b/tensorflow/python/kernel_tests/seq2seq_test.py
deleted file mode 100644
index 03b5f68659..0000000000
--- a/tensorflow/python/kernel_tests/seq2seq_test.py
+++ /dev/null
@@ -1,770 +0,0 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Tests for functional style sequence-to-sequence models."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import math
-import random
-
-import numpy as np
-import tensorflow as tf
-
-
-class Seq2SeqTest(tf.test.TestCase):
-
- def testRNNDecoder(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- _, enc_state = tf.nn.rnn(
- tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- cell = tf.nn.rnn_cell.OutputProjectionWrapper(
- tf.nn.rnn_cell.GRUCell(2), 4)
- dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testBasicRNNSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- cell = tf.nn.rnn_cell.OutputProjectionWrapper(
- tf.nn.rnn_cell.GRUCell(2), 4)
- dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testTiedRNNSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- cell = tf.nn.rnn_cell.OutputProjectionWrapper(
- tf.nn.rnn_cell.GRUCell(2), 4)
- dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual(1, len(res))
- self.assertEqual((2, 2), res[0].shape)
-
- def testEmbeddingRNNDecoder(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- _, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
- dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 2), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual(1, len(res))
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
-
- def testEmbeddingRNNSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
- dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
-
- # Test with state_is_tuple=False.
- with tf.variable_scope("no_tuple"):
- cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
- dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell1, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 4), res[0].shape)
-
- # Test externally provided output projection.
- w = tf.get_variable("proj_w", [2, 5])
- b = tf.get_variable("proj_b", [5])
- with tf.variable_scope("proj_seq2seq"):
- dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 2), res[0].shape)
-
- # Test that previous-feeding model ignores inputs after the first.
- dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
- with tf.variable_scope("other"):
- d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp2, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2,
- feed_previous=tf.constant(True))
- sess.run([tf.global_variables_initializer()])
- tf.get_variable_scope().reuse_variables()
- d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, feed_previous=True)
- d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp2, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, feed_previous=True)
- res1 = sess.run(d1)
- res2 = sess.run(d2)
- res3 = sess.run(d3)
- self.assertAllClose(res1, res2)
- self.assertAllClose(res1, res3)
-
- def testEmbeddingTiedRNNSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
- dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
-
- # Test when num_decoder_symbols is provided, the size of decoder output
- # is num_decoder_symbols.
- with tf.variable_scope("decoder_symbols_seq2seq"):
- dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3,
- embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 3), res[0].shape)
-
- # Test externally provided output projection.
- w = tf.get_variable("proj_w", [2, 5])
- b = tf.get_variable("proj_b", [5])
- with tf.variable_scope("proj_seq2seq"):
- dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
- output_projection=(w, b))
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 2), res[0].shape)
-
- # Test that previous-feeding model ignores inputs after the first.
- dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
- with tf.variable_scope("other"):
- d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
- feed_previous=tf.constant(True))
- sess.run([tf.global_variables_initializer()])
- tf.get_variable_scope().reuse_variables()
- d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
- feed_previous=True)
- d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
- feed_previous=True)
- res1 = sess.run(d1)
- res2 = sess.run(d2)
- res3 = sess.run(d3)
- self.assertAllClose(res1, res2)
- self.assertAllClose(res1, res3)
-
- def testAttentionDecoder1(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.GRUCell(2)
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
- for e in enc_outputs])
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testAttentionDecoder2(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.GRUCell(2)
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
- for e in enc_outputs])
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4,
- num_heads=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testDynamicAttentionDecoder1(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.GRUCell(2)
- inp = tf.constant(0.5, shape=[2, 2, 2])
- enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32)
- attn_states = enc_outputs
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testDynamicAttentionDecoder2(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.GRUCell(2)
- inp = tf.constant(0.5, shape=[2, 2, 2])
- enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32)
- attn_states = enc_outputs
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4,
- num_heads=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testAttentionDecoderStateIsTuple(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
- state_is_tuple=True)
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
- for e in enc_outputs])
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual(2, len(res[0]))
- self.assertEqual((2, 2), res[0][0].c.shape)
- self.assertEqual((2, 2), res[0][0].h.shape)
- self.assertEqual((2, 2), res[0][1].c.shape)
- self.assertEqual((2, 2), res[0][1].h.shape)
-
- # pylint: disable=unused-variable,invalid-name
- def testDynamicAttentionDecoderStateIsTuple(self):
- with self.test_session() as sess:
- with tf.variable_scope(
- "root", initializer=tf.constant_initializer(0.5)):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
- state_is_tuple=True)
- inp = tf.constant(0.5, shape=[2, 2, 2])
- enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
- for e in enc_outputs])
- dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
- dec, mem = tf.nn.seq2seq.attention_decoder(
- dec_inp, enc_state,
- attn_states, cell, output_size=4)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 4), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual(2, len(res[0]))
- self.assertEqual((2, 2), res[0][0].c.shape)
- self.assertEqual((2, 2), res[0][0].h.shape)
- self.assertEqual((2, 2), res[0][1].c.shape)
- self.assertEqual((2, 2), res[0][1].h.shape)
-
- def testEmbeddingAttentionDecoder(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- inp = [tf.constant(0.5, shape=[2, 2])] * 2
- cell = tf.nn.rnn_cell.GRUCell(2)
- enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
- attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
- for e in enc_outputs])
- dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
- dec_inp, enc_state, attn_states, cell, num_symbols=4,
- embedding_size=2, output_size=3)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 3), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].shape)
-
- def testEmbeddingAttentionSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
- dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
-
- # Test with state_is_tuple=False.
- with tf.variable_scope("no_tuple"):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
- dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2)
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
-
- res = sess.run([mem])
- self.assertEqual((2, 4), res[0].shape)
-
- # Test externally provided output projection.
- w = tf.get_variable("proj_w", [2, 5])
- b = tf.get_variable("proj_b", [5])
- with tf.variable_scope("proj_seq2seq"):
- dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
- sess.run([tf.global_variables_initializer()])
- res = sess.run(dec)
- self.assertEqual(3, len(res))
- self.assertEqual((2, 2), res[0].shape)
-
- # Test that previous-feeding model ignores inputs after the first.
- dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
- with tf.variable_scope("other"):
- d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp2, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2,
- feed_previous=tf.constant(True))
- sess.run([tf.global_variables_initializer()])
- tf.get_variable_scope().reuse_variables()
- d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, feed_previous=True)
- d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp2, cell, num_encoder_symbols=2,
- num_decoder_symbols=5, embedding_size=2, feed_previous=True)
- res1 = sess.run(d1)
- res2 = sess.run(d2)
- res3 = sess.run(d3)
- self.assertAllClose(res1, res2)
- self.assertAllClose(res1, res3)
-
- def testOne2ManyRNNSeq2Seq(self):
- with self.test_session() as sess:
- with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
- enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
- dec_inp_dict = {}
- dec_inp_dict["0"] = [
- tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- dec_inp_dict["1"] = [
- tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
- dec_symbols_dict = {"0": 5, "1": 6}
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq(
- enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2)
-
- sess.run([tf.global_variables_initializer()])
- res = sess.run(outputs_dict["0"])
- self.assertEqual(3, len(res))
- self.assertEqual((2, 5), res[0].shape)
- res = sess.run(outputs_dict["1"])
- self.assertEqual(4, len(res))
- self.assertEqual((2, 6), res[0].shape)
- res = sess.run([state_dict["0"]])
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
- res = sess.run([state_dict["1"]])
- self.assertEqual((2, 2), res[0].c.shape)
- self.assertEqual((2, 2), res[0].h.shape)
-
- # Test that previous-feeding model ignores inputs after the first, i.e.
- # dec_inp_dict2 has different inputs from dec_inp_dict after the first
- # time-step.
- dec_inp_dict2 = {}
- dec_inp_dict2["0"] = [
- tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
- dec_inp_dict2["1"] = [
- tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
- with tf.variable_scope("other"):
- outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
- enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
- embedding_size=2, feed_previous=tf.constant(True))
- sess.run([tf.global_variables_initializer()])
- tf.get_variable_scope().reuse_variables()
- outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
- enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
- embedding_size=2, feed_previous=True)
- outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
- enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
- embedding_size=2, feed_previous=True)
- res1 = sess.run(outputs_dict1["0"])
- res2 = sess.run(outputs_dict2["0"])
- res3 = sess.run(outputs_dict3["0"])
- self.assertAllClose(res1, res2)
- self.assertAllClose(res1, res3)
-
- def testSequenceLoss(self):
- with self.test_session() as sess:
- logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
- targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
-
- average_loss_per_example = tf.nn.seq2seq.sequence_loss(
- logits, targets, weights,
- average_across_timesteps=True,
- average_across_batch=True)
- res = sess.run(average_loss_per_example)
- self.assertAllClose(1.60944, res)
-
- average_loss_per_sequence = tf.nn.seq2seq.sequence_loss(
- logits, targets, weights,
- average_across_timesteps=False,
- average_across_batch=True)
- res = sess.run(average_loss_per_sequence)
- self.assertAllClose(4.828314, res)
-
- total_loss = tf.nn.seq2seq.sequence_loss(
- logits, targets, weights,
- average_across_timesteps=False,
- average_across_batch=False)
- res = sess.run(total_loss)
- self.assertAllClose(9.656628, res)
-
- def testSequenceLossByExample(self):
- with self.test_session() as sess:
- output_classes = 5
- logits = [tf.constant(i + 0.5, shape=[2, output_classes])
- for i in range(3)]
- targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
- weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
-
- average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
- logits, targets, weights,
- average_across_timesteps=True)
- res = sess.run(average_loss_per_example)
- self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
-
- loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
- logits, targets, weights,
- average_across_timesteps=False)
- res = sess.run(loss_per_sequence)
- self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
-
- def testModelWithBucketsScopeAndLoss(self):
- """Test that variable scope reuse is not reset after model_with_buckets."""
- classes = 10
- buckets = [(4, 4), (8, 8)]
-
- with self.test_session():
- # Here comes a sample Seq2Seq model using GRU cells.
- def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
- """Example sequence-to-sequence model that uses GRU cells."""
- def GRUSeq2Seq(enc_inp, dec_inp):
- cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
- state_is_tuple=True)
- return tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=classes,
- num_decoder_symbols=classes, embedding_size=24)
- targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
- return tf.nn.seq2seq.model_with_buckets(
- enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
- per_example_loss=per_example_loss)
-
- # Now we construct the copy model.
- inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
- out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
- weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
- with tf.variable_scope("root"):
- _, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
- # Now check that we did not accidentally set reuse.
- self.assertEqual(False, tf.get_variable_scope().reuse)
- # Construct one more model with per-example loss.
- tf.get_variable_scope().reuse_variables()
- _, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
- # First loss is scalar, the second one is a 1-dimensinal tensor.
- self.assertEqual([], losses1[0].get_shape().as_list())
- self.assertEqual([None], losses2[0].get_shape().as_list())
-
- def testModelWithBuckets(self):
- """Larger tests that does full sequence-to-sequence model training."""
- # We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
- classes = 10
- buckets = [(4, 4), (8, 8)]
- perplexities = [[], []] # Results for each bucket.
- tf.set_random_seed(111)
- random.seed(111)
- np.random.seed(111)
-
- with self.test_session() as sess:
- # We use sampled softmax so we keep output projection separate.
- w = tf.get_variable("proj_w", [24, classes])
- w_t = tf.transpose(w)
- b = tf.get_variable("proj_b", [classes])
- # Here comes a sample Seq2Seq model using GRU cells.
- def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
- """Example sequence-to-sequence model that uses GRU cells."""
- def GRUSeq2Seq(enc_inp, dec_inp):
- cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
- state_is_tuple=True)
- return tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols=classes,
- num_decoder_symbols=classes, embedding_size=24,
- output_projection=(w, b))
- targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
- def SampledLoss(labels, inputs):
- labels = tf.reshape(labels, [-1, 1])
- return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes)
- return tf.nn.seq2seq.model_with_buckets(
- enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
- softmax_loss_function=SampledLoss)
-
- # Now we construct the copy model.
- batch_size = 8
- inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
- out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
- weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
- with tf.variable_scope("root"):
- _, losses = SampleGRUSeq2Seq(inp, out, weights)
- updates = []
- params = tf.global_variables()
- optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
- for i in range(len(buckets)):
- full_grads = tf.gradients(losses[i], params)
- grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
- update = optimizer.apply_gradients(zip(grads, params))
- updates.append(update)
- sess.run([tf.global_variables_initializer()])
- steps = 6
- for _ in range(steps):
- bucket = random.choice(np.arange(len(buckets)))
- length = buckets[bucket][0]
- i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
- dtype=np.int32) for _ in range(length)]
- # 0 is our "GO" symbol here.
- o = [np.array([0] * batch_size, dtype=np.int32)] + i
- feed = {}
- for i1, i2, o1, o2 in zip(inp[:length], i[:length],
- out[:length], o[:length]):
- feed[i1.name] = i2
- feed[o1.name] = o2
- if length < 8: # For the 4-bucket, we need the 5th as target.
- feed[out[length].name] = o[length]
- res = sess.run([updates[bucket], losses[bucket]], feed)
- perplexities[bucket].append(math.exp(float(res[1])))
- for bucket in range(len(buckets)):
- if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
- self.assertLess(perplexities[bucket][-1], perplexities[bucket][0])
-
- def testModelWithBooleanFeedPrevious(self):
- """Test the model behavior when feed_previous is True.
-
- For example, the following two cases have the same effect:
- - Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
- a `embedding_rnn_decoder` with `feed_previous=True` and
- `update_embedding_for_previous=True`. The decoder is fed with "<Go>"
- and outputs "A, B, C".
- - Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
- is fed with "<Go>, A, B".
- """
- num_encoder_symbols = 3
- num_decoder_symbols = 5
- batch_size = 2
- num_enc_timesteps = 2
- num_dec_timesteps = 3
-
- def TestModel(seq2seq):
- with self.test_session(graph=tf.Graph()) as sess:
- tf.set_random_seed(111)
- random.seed(111)
- np.random.seed(111)
-
- enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
- for i in range(num_enc_timesteps)]
- dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
- for i in range(num_dec_timesteps)]
- dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
- for _ in range(num_dec_timesteps)]
- targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
- for i in range(num_dec_timesteps)]
- weights = [tf.constant(1.0, shape=[batch_size])
- for i in range(num_dec_timesteps)]
-
- def ForwardBackward(enc_inp, dec_inp, feed_previous):
- scope_name = "fp_{}".format(feed_previous)
- with tf.variable_scope(scope_name):
- dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
- net_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
- scope_name)
- optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
- update_op = optimizer.minimize(
- tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
- var_list=net_variables)
- return dec_op, update_op, net_variables
-
- dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
- enc_inp, dec_inp_fp_true, feed_previous=True)
- _, update_fp_false, variables_fp_false = ForwardBackward(
- enc_inp, dec_inp_holder_fp_false, feed_previous=False)
-
- sess.run(tf.global_variables_initializer())
-
- # We only check consistencies between the variables existing in both
- # the models with True and False feed_previous. Variables created by
- # the loop_function in the model with True feed_previous are ignored.
- v_false_name_dict = {v.name.split("/", 1)[-1]: v
- for v in variables_fp_false}
- matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
- for v in variables_fp_true]
- for v_true, v_false in matched_variables:
- sess.run(tf.assign(v_false, v_true))
-
- # Take the symbols generated by the decoder with feed_previous=True as
- # the true input symbols for the decoder with feed_previous=False.
- dec_fp_true = sess.run(dec_op_fp_true)
- output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
- dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
- output_symbols_fp_true[:-1]))
- sess.run(update_fp_true)
- sess.run(update_fp_false,
- {holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
- dec_inp_fp_false)})
-
- for v_true, v_false in matched_variables:
- self.assertAllClose(v_true.eval(), v_false.eval())
-
- def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- return tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols,
- num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
-
- def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
- return tf.nn.seq2seq.embedding_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols,
- num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
-
- def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
- feed_previous=feed_previous)
-
- def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
- return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
- enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
- feed_previous=feed_previous)
-
- def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
- return tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols,
- num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
-
- def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
- cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
- return tf.nn.seq2seq.embedding_attention_seq2seq(
- enc_inp, dec_inp, cell, num_encoder_symbols,
- num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
-
- for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
- EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
- EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
- TestModel(model)
-
-
-if __name__ == "__main__":
- tf.test.main()
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
index 0c524a7f80..58772d9a23 100644
--- a/tensorflow/python/kernel_tests/variable_scope_test.py
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -22,6 +22,7 @@ import numpy
import tensorflow as tf
from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import init_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
@@ -95,6 +96,21 @@ class VariableScopeTest(tf.test.TestCase):
with self.assertRaises(TypeError):
tf.get_variable("x", initializer={})
+ def testInitFromNonInitializer(self):
+ with self.test_session() as sess:
+ # Test various dtypes with zeros initializer as following:
+ types = [tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32, tf.int64,
+ tf.bool]
+
+ # Use different varibale_name to distinguish various dtypes
+ for (i, dtype) in enumerate(types):
+ x = tf.get_variable(name='x%d' % i, shape=(3, 4), dtype=dtype)
+ y = tf.get_variable(name='y%d' % i, shape=(3, 4), dtype=dtype,
+ initializer=init_ops.zeros_initializer(dtype=dtype))
+
+ tf.global_variables_initializer().run()
+ self.assertAllEqual(x.eval(), y.eval())
+
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
@@ -672,6 +688,27 @@ def axis0_into3_partitioner(shape=None, **unused_kwargs):
class VariableScopeWithPartitioningTest(tf.test.TestCase):
+ def testInitFromNonInitializer(self):
+ with self.test_session() as sess:
+ # Test various dtypes with zeros initializer as following:
+ types = [tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32, tf.int64,
+ tf.bool]
+
+ # Use different varibale_name to distinguish various dtypes
+ for (i, dtype) in enumerate(types):
+ x = tf.get_variable(name='x%d' % i, shape=(3, 4), dtype=dtype,
+ partitioner=axis0_into2_partitioner)
+ y = tf.get_variable(name='y%d' % i, shape=(6, 4), dtype=dtype,
+ partitioner=axis0_into2_partitioner,
+ initializer=init_ops.zeros_initializer(dtype=dtype))
+
+ tf.global_variables_initializer().run()
+ # x and y would become var list after partition
+ val_x = sess.run(list(x))
+ val_y = sess.run(list(y))
+
+ self.assertAllEqual(val_x, val_y)
+
def testResultNameMatchesRequested(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index fa7071d042..38c84cdef1 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -366,6 +366,14 @@ class VariablesTestCase(tf.test.TestCase):
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
+ def testLoad(self):
+ with self.test_session():
+ var = tf.Variable(np.zeros((5,5), np.float32))
+ tf.global_variables_initializer().run()
+ var.load(np.ones((5, 5), np.float32))
+
+ self.assertAllClose(np.ones((5, 5), np.float32), var.eval())
+
class IsInitializedTest(tf.test.TestCase):
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index b18cf1376f..fc8adf6421 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -66,7 +66,8 @@ def zeros_initializer(dtype=dtypes.float32):
"""Returns an initializer that generates tensors initialized to 0."""
def _initializer(shape, dtype=dtype, partition_info=None):
- return constant_op.constant(0, dtype=dtype, shape=shape)
+ return constant_op.constant(False if dtype is dtypes.bool else 0,
+ dtype=dtype, shape=shape)
return _initializer
diff --git a/tensorflow/python/ops/nn_fused_batchnorm_test.py b/tensorflow/python/ops/nn_fused_batchnorm_test.py
index 35b4ec3134..e366c76770 100644
--- a/tensorflow/python/ops/nn_fused_batchnorm_test.py
+++ b/tensorflow/python/ops/nn_fused_batchnorm_test.py
@@ -139,66 +139,66 @@ class BatchNormalizationTest(tf.test.TestCase):
def testInference(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [7, 9, 13, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [9], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [6], use_gpu=False, data_format='NHWC')
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index 6f46fd633e..fa7ac4eaef 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -526,9 +526,14 @@ class _VariableStore(object):
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
+ # Create the tensor to initialize the variable with default value.
if initializer is None:
- init = init_ops.uniform_unit_scaling_initializer()
- init_shape = var_shape
+ init, initializing_from_value = self._get_default_initializer(
+ name=name, shape=shape, dtype=dtype)
+ if initializing_from_value:
+ init_shape = None
+ else:
+ init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
@@ -653,9 +658,10 @@ class _VariableStore(object):
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
- # Create the tensor to initialize the variable.
+ # Create the tensor to initialize the variable with default value.
if initializer is None:
- initializer = init_ops.uniform_unit_scaling_initializer()
+ initializer, initializing_from_value = self._get_default_initializer(
+ name=name, shape=shape, dtype=dtype)
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
@@ -692,6 +698,39 @@ class _VariableStore(object):
return v
+ # Initialize variable when no initializer provided
+ def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
+ """Provide a default initializer and a corresponding value.
+
+ Args:
+ name: see get_variable.
+ shape: see get_variable.
+ dtype: see get_variable.
+
+ Returns:
+ initializer and initializing_from_value. See get_variable above.
+
+ Raises:
+ ValueError: When giving unsupported dtype.
+ """
+ # If dtype is DT_FLOAT, provide a uniform unit scaling initializer
+ if dtype.is_floating:
+ initializer = init_ops.uniform_unit_scaling_initializer()
+ initializing_from_value = False
+ # If dtype is DT_INT/DT_UINT, provide a default value `zero`
+ # If dtype is DT_BOOL, provide a default value `FALSE`
+ elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
+ initializer = init_ops.zeros_initializer()(
+ shape=shape, dtype=dtype.base_dtype)
+ initializing_from_value = True
+ # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
+ else:
+ raise ValueError("An initializer for variable %s of %s is required"
+ % (name, dtype.base_dtype))
+
+ return initializer, initializing_from_value
+
+
# To stop regularization, use this regularizer
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 89a19f377d..32545e4eb3 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -650,6 +650,46 @@ class Variable(object):
"""
return state_ops.count_up_to(self._variable, limit=limit)
+ def load(self, value, session=None):
+ """Load new value into this variable
+
+ Writes new value to variable's memory. Doesn't add ops to the graph.
+
+ This convenience method requires a session where the graph containing this
+ variable has been launched. If no session is passed, the default session is
+ used. See the [Session class](../../api_docs/python/client.md#Session) for
+ more information on launching a graph and on sessions.
+
+ ```python
+ v = tf.Variable([1, 2])
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ # Usage passing the session explicitly.
+ v.load([2, 3], sess)
+ print(v.eval(sess)) # prints [2 3]
+ # Usage with the default session. The 'with' block
+ # above makes 'sess' the default session.
+ v.load([3, 4], sess)
+ print(v.eval()) # prints [3 4]
+ ```
+
+ Args:
+ value: New variable value
+ session: The session to use to evaluate this variable. If
+ none, the default session is used.
+
+ Raises:
+ ValueError: Session is not passed and no default session
+ """
+ session = session or ops.get_default_session()
+ if session is None:
+ raise ValueError(
+ "Either session argument should be provided or default session "
+ "should be established")
+ session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
+
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
@@ -1070,7 +1110,7 @@ def global_variables():
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
-@deprecated("2016-03-02", "Please use tf.global_variables instead.")
+@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""See `tf.global_variables`."""
return global_variables()
diff --git a/tensorflow/python/saved_model/BUILD b/tensorflow/python/saved_model/BUILD
index 40b495d97a..9854cd2c04 100644
--- a/tensorflow/python/saved_model/BUILD
+++ b/tensorflow/python/saved_model/BUILD
@@ -9,6 +9,8 @@ licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
+load("//tensorflow:tensorflow.bzl", "py_test")
+
py_library(
name = "constants",
srcs = ["constants.py"],
diff --git a/tensorflow/python/summary/summary.py b/tensorflow/python/summary/summary.py
index 2e653106f4..8b83ced693 100644
--- a/tensorflow/python/summary/summary.py
+++ b/tensorflow/python/summary/summary.py
@@ -273,7 +273,7 @@ def merge(inputs, collections=None, name=None):
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
- added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
diff --git a/tensorflow/python/tools/BUILD b/tensorflow/python/tools/BUILD
index 6ddba9e510..2548bdb67f 100644
--- a/tensorflow/python/tools/BUILD
+++ b/tensorflow/python/tools/BUILD
@@ -7,6 +7,8 @@ licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
+load("//tensorflow:tensorflow.bzl", "py_test")
+
py_library(
name = "freeze_graph_lib",
srcs = ["freeze_graph.py"],
diff --git a/tensorflow/python/training/momentum.py b/tensorflow/python/training/momentum.py
index 62f8028ce6..bd9e124cdf 100644
--- a/tensorflow/python/training/momentum.py
+++ b/tensorflow/python/training/momentum.py
@@ -40,6 +40,10 @@ class MomentumOptimizer(optimizer.Optimizer):
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
+ use_nesterov: If `True` use Nesterov Momentum.
+ See [Sutskever et. al., 2013](
+ http://jmlr.org/proceedings/papers/v28/sutskever13.pdf)
+
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
diff --git a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
index 6b31325694..b2da109bf0 100644
--- a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
+++ b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
@@ -873,6 +873,9 @@ static int TryToReadNumaNode(const string &pci_bus_id, int device_ordinal) {
#if defined(__APPLE__)
LOG(INFO) << "OS X does not support NUMA - returning NUMA node zero";
return 0;
+#elif defined(PLATFORM_WINDOWS)
+ // Windows support for NUMA is not currently implemented. Return node 0.
+ return 0;
#else
VLOG(2) << "trying to read NUMA node for device ordinal: " << device_ordinal;
static const int kUnknownNumaNode = -1;
@@ -890,10 +893,8 @@ static int TryToReadNumaNode(const string &pci_bus_id, int device_ordinal) {
// could use the file::* utilities).
FILE *file = fopen(filename.c_str(), "r");
if (file == nullptr) {
-#if !defined(PLATFORM_WINDOWS)
LOG(ERROR) << "could not open file to read NUMA node: " << filename
<< "\nYour kernel may have been built without NUMA support.";
-#endif
return kUnknownNumaNode;
}
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 9e26b4c654..c3deecdff6 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -539,6 +539,7 @@ def _py_wrap_cc_impl(ctx):
for dep in ctx.attr.deps:
inputs += dep.cc.transitive_headers
inputs += ctx.files._swiglib
+ inputs += ctx.files.toolchain_deps
swig_include_dirs = set(_get_repository_roots(ctx, inputs))
swig_include_dirs += sorted([f.dirname for f in ctx.files._swiglib])
args = ["-c++",
@@ -573,6 +574,9 @@ _py_wrap_cc = rule(
allow_files = True,
providers = ["cc"],
),
+ "toolchain_deps": attr.label_list(
+ allow_files = True,
+ ),
"module_name": attr.string(mandatory = True),
"py_module_name": attr.string(mandatory = True),
"_swig": attr.label(
@@ -763,6 +767,7 @@ def tf_py_wrap_cc(name, srcs, swig_includes=[], deps=[], copts=[], **kwargs):
srcs=srcs,
swig_includes=swig_includes,
deps=deps + extra_deps,
+ toolchain_deps=["//tools/defaults:crosstool"],
module_name=module_name,
py_module_name=name)
extra_linkopts = select({
@@ -812,6 +817,14 @@ def tf_py_wrap_cc(name, srcs, swig_includes=[], deps=[], copts=[], **kwargs):
"//conditions:default": [":" + cc_library_name],
}))
+def py_test(deps=[], **kwargs):
+ native.py_test(
+ deps=select({
+ "//conditions:default" : deps,
+ "//tensorflow:no_tensorflow_py_deps" : []
+ }),
+ **kwargs)
+
def tf_py_test(name, srcs, size="medium", data=[], main=None, args=[],
tags=[], shard_count=1, additional_deps=[], flaky=0):
native.py_test(
@@ -824,10 +837,13 @@ def tf_py_test(name, srcs, size="medium", data=[], main=None, args=[],
visibility=["//tensorflow:internal"],
shard_count=shard_count,
data=data,
- deps=[
- "//tensorflow/python:extra_py_tests_deps",
- "//tensorflow/python:gradient_checker",
- ] + additional_deps,
+ deps=select({
+ "//conditions:default" : [
+ "//tensorflow/python:extra_py_tests_deps",
+ "//tensorflow/python:gradient_checker",
+ ] + additional_deps,
+ "//tensorflow:no_tensorflow_py_deps" : []
+ }),
flaky=flaky,
srcs_version="PY2AND3")
diff --git a/tensorflow/tools/ci_build/builds/test_tutorials.sh b/tensorflow/tools/ci_build/builds/test_tutorials.sh
index 47c8fb2e8b..dc89919d73 100755
--- a/tensorflow/tools/ci_build/builds/test_tutorials.sh
+++ b/tensorflow/tools/ci_build/builds/test_tutorials.sh
@@ -33,8 +33,7 @@
#
# List of all tutorial tests to run, separated by spaces
-TUT_TESTS="mnist_softmax mnist_with_summaries cifar10_train "\
-"word2vec_test word2vec_optimized_test ptb_word_lm translate_test"
+TUT_TESTS="mnist_softmax mnist_with_summaries word2vec estimator_abalone"
if [[ -z "${TUT_TESTS_BLACKLIST}" ]]; then
TF_BUILD_TUT_TEST_BLACKLIST=""
@@ -109,6 +108,7 @@ if [[ ! -d "${TF_INSTALL_PATH}/examples/tutorials/mnist" ]]; then
"${TF_INSTALL_PATH}/examples/tutorials/mnist"
fi
+
# -----------------------------------------------------------
# mnist_softmax
test_mnist_softmax() {
@@ -180,7 +180,7 @@ test_cifar10_train() {
fi
run_in_directory "${TEST_DIR}" "${LOG_FILE}" \
- tensorflow_models/tutorials/image/cifar10/cifar10_train.py \
+ ${TF_MODELS_DIR}/tutorials/image/cifar10/cifar10_train.py \
--data_dir="${TUT_TEST_DATA_DIR}/cifar10" --max_steps=50 \
--train_dir="${TUT_TEST_ROOT}/cifar10_train"
@@ -204,21 +204,21 @@ test_cifar10_train() {
# -----------------------------------------------------------
# word2vec_test
-test_word2vec_test() {
+test_word2vec() {
LOG_FILE=$1
run_in_directory "${TEST_DIR}" "${LOG_FILE}" \
- tensorflow_models/tutorials/embedding/word2vec_test.py
+ tensorflow/examples/tutorials/word2vec/word2vec_basic.py
}
# -----------------------------------------------------------
-# word2vec_optimized_test
-test_word2vec_optimized_test() {
+# Estimator: abalone
+test_estimator_abalone() {
LOG_FILE=$1
run_in_directory "${TEST_DIR}" "${LOG_FILE}" \
- tensorflow_models/tutorials/embedding/word2vec_optimized_test.py
+ "tensorflow/examples/tutorials/estimators/abalone.py"
}
@@ -251,7 +251,7 @@ test_ptb_word_lm() {
fi
run_in_directory "${TEST_DIR}" "${LOG_FILE}" \
- tensorflow_models/tutorials/rnn/ptb/ptb_word_lm.py \
+ "${TF_MODELS_DIR}/tutorials/rnn/ptb/ptb_word_lm.py" \
--data_path="${DATA_DIR}/simple-examples/data" --model test
if [[ $? != 0 ]]; then
@@ -282,7 +282,7 @@ test_translate_test() {
LOG_FILE=$1
run_in_directory "${TEST_DIR}" "${LOG_FILE}" \
- tensorflow_models/tutorials/rnn/translate/translate.py --self_test=True
+ "${TF_MODELS_DIR}/tutorials/rnn/translate/translate.py" --self_test=True
}
diff --git a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh b/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh
deleted file mode 100755
index 1879b13b0f..0000000000
--- a/tensorflow/tools/ci_build/linux/libtensorflow_gpu.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-#
-# Script to build a binary release tarball for the TensorFlow C-library for
-# machines with GPUs.
-set -ex
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-export TF_NEED_CUDA=1
-"${SCRIPT_DIR}/libtensorflow_docker.sh"
diff --git a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
new file mode 100644
index 0000000000..c31d6f92b3
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
@@ -0,0 +1,201 @@
+#!/bin/bash
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# C++ tests
+failing_cpu_cc_tests="\
+ //tensorflow/core/kernels:control_flow_ops_test + \
+ //tensorflow/core:example_example_parser_configuration_test + \
+ //tensorflow/core:lib_core_status_test + \
+ //tensorflow/core:lib_monitoring_collection_registry_test + \
+ //tensorflow/core:lib_strings_numbers_test + \
+ //tensorflow/core:lib_strings_str_util_test + \
+ //tensorflow/core/platform/hadoop:hadoop_file_system_test + \
+ //tensorflow/core:platform_file_system_test + \
+ //tensorflow/core:platform_logging_test + \
+ //tensorflow/core:util_sparse_sparse_tensor_test + \
+ //tensorflow/cc:framework_gradient_checker_test + \
+ //tensorflow/cc:framework_gradients_test + \
+ //tensorflow/cc:gradients_array_grad_test + \
+ //tensorflow/cc:gradients_math_grad_test + \
+ //tensorflow/cc:gradients_nn_grad_test + \
+ //tensorflow/cc/saved_model:loader_test \
+"
+
+broken_cpu_cc_tests="\
+ //tensorflow/core/kernels/hexagon:graph_transferer_test + \
+ //tensorflow/cc:framework_cc_ops_test + \
+ //tensorflow/core/platform/cloud:time_util_test + \
+ //tensorflow/core/platform/cloud:oauth_client_test + \
+ //tensorflow/core/platform/cloud:http_request_test + \
+ //tensorflow/core/platform/cloud:google_auth_provider_test + \
+ //tensorflow/core/platform/cloud:gcs_file_system_test + \
+ //tensorflow/core/kernels/cloud:bigquery_table_accessor_test + \
+ //tensorflow/core/kernels/hexagon:quantized_matmul_op_for_hexagon_test + \
+ //tensorflow/core/kernels:requantize_op_test + \
+ //tensorflow/core/kernels:requantization_range_op_test + \
+ //tensorflow/core/kernels:quantized_reshape_op_test + \
+ //tensorflow/core/kernels:quantized_pooling_ops_test + \
+ //tensorflow/core/kernels:quantized_matmul_op_test + \
+ //tensorflow/core/kernels:quantized_conv_ops_test + \
+ //tensorflow/core/kernels:quantized_concat_op_test + \
+ //tensorflow/core/kernels:quantized_bias_add_op_test + \
+ //tensorflow/core/kernels:quantized_batch_norm_op_test + \
+ //tensorflow/core/kernels:quantized_activation_ops_test + \
+ //tensorflow/core/kernels:quantize_op_test + \
+ //tensorflow/core/kernels:quantize_down_and_shrink_range_op_test + \
+ //tensorflow/core/kernels:quantize_and_dequantize_op_test_gpu + \
+ //tensorflow/core/kernels:quantize_and_dequantize_op_test + \
+ //tensorflow/core/kernels:quantization_utils_test + \
+ //tensorflow/core/kernels:debug_ops_test + \
+ //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test_gpu + \
+ //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test + \
+ //tensorflow/core/distributed_runtime/rpc:grpc_tensor_coding_test + \
+ //tensorflow/core/distributed_runtime/rpc:grpc_session_test_gpu + \
+ //tensorflow/core/distributed_runtime/rpc:grpc_session_test + \
+ //tensorflow/core/distributed_runtime/rpc:grpc_channel_test_gpu + \
+ //tensorflow/core/distributed_runtime/rpc:grpc_channel_test + \
+ //tensorflow/core/distributed_runtime:remote_device_test_gpu + \
+ //tensorflow/core/distributed_runtime:remote_device_test + \
+ //tensorflow/core/distributed_runtime:executor_test_gpu + \
+ //tensorflow/core/distributed_runtime:executor_test + \
+ //tensorflow/core/debug:debug_gateway_test + \
+ //tensorflow/core/debug:debug_grpc_io_utils_test + \
+ //tensorflow/core:util_reporter_test + \
+ //tensorflow/core:util_memmapped_file_system_test + \
+ //tensorflow/core:platform_subprocess_test + \
+ //tensorflow/core:platform_profile_utils_cpu_utils_test + \
+ //tensorflow/core:lib_jpeg_jpeg_mem_unittest + \
+ //tensorflow/core/debug:debug_io_utils_test \
+"
+
+# lib_core_threadpool_test is timeout, but it passes when running alone
+extra_failing_gpu_cc_tests="\
+ //tensorflow/core:lib_core_threadpool_test + \
+ //tensorflow/core:cuda_libdevice_path_test + \
+ //tensorflow/core:common_runtime_direct_session_test + \
+ //tensorflow/core:common_runtime_direct_session_with_tracking_alloc_test + \
+ //tensorflow/core:gpu_tracer_test + \
+ //tensorflow/core:ops_math_grad_test \
+"
+
+exclude_cpu_cc_tests="${failing_cpu_cc_tests} + ${broken_cpu_cc_tests}"
+
+exclude_gpu_cc_tests="${extra_failing_gpu_cc_tests} + ${exclude_cpu_cc_tests}"
+
+# Python tests
+# The first argument is the name of the python test direcotry
+function get_failing_cpu_py_tests() {
+ echo "
+ //$1/tensorflow/python/kernel_tests:rnn_test + \
+ //$1/tensorflow/python/kernel_tests:sets_test + \
+ //$1/tensorflow/python/debug:cli_shared_test + \
+ //$1/tensorflow/python/debug:command_parser_test + \
+ //$1/tensorflow/python/debug:debug_data_test + \
+ //$1/tensorflow/python/debug:debug_utils_test + \
+ //$1/tensorflow/python/debug:debugger_cli_common_test + \
+ //$1/tensorflow/python/debug:framework_test + \
+ //$1/tensorflow/python/debug:local_cli_wrapper_test + \
+ //$1/tensorflow/python/debug:tensor_format_test + \
+ //$1/tensorflow/python:saver_large_variable_test + \
+ //$1/tensorflow/python:session_test + \
+ //$1/tensorflow/python:basic_session_run_hooks_test + \
+ //$1/tensorflow/python:contrib_test + \
+ //$1/tensorflow/python/debug:analyzer_cli_test + \
+ //$1/tensorflow/python/debug:curses_ui_test + \
+ //$1/tensorflow/python/debug:session_debug_file_test + \
+ //$1/tensorflow/python/debug:stepper_test + \
+ //$1/tensorflow/python:dequantize_op_test + \
+ //$1/tensorflow/python:directory_watcher_test + \
+ //$1/tensorflow/python:event_multiplexer_test + \
+ //$1/tensorflow/python:file_io_test + \
+ //$1/tensorflow/python:framework_meta_graph_test + \
+ //$1/tensorflow/python:framework_ops_test + \
+ //$1/tensorflow/python:framework_tensor_util_test + \
+ //$1/tensorflow/python:framework_test_util_test + \
+ //$1/tensorflow/python:image_ops_test + \
+ //$1/tensorflow/python/kernel_tests:as_string_op_test + \
+ //$1/tensorflow/python/kernel_tests:benchmark_test + \
+ //$1/tensorflow/python/kernel_tests:cast_op_test + \
+ //$1/tensorflow/python/kernel_tests:clip_ops_test + \
+ //$1/tensorflow/python/kernel_tests:conv_ops_test + \
+ //$1/tensorflow/python/kernel_tests:decode_image_op_test + \
+ //$1/tensorflow/python/kernel_tests:depthwise_conv_op_test + \
+ //$1/tensorflow/python/kernel_tests:functional_ops_test + \
+ //$1/tensorflow/python/kernel_tests:py_func_test + \
+ //$1/tensorflow/python/kernel_tests:sparse_matmul_op_test + \
+ //$1/tensorflow/python/kernel_tests:string_to_number_op_test + \
+ //$1/tensorflow/python/kernel_tests:summary_ops_test + \
+ //$1/tensorflow/python/kernel_tests:variable_scope_test + \
+ //$1/tensorflow/python:monitored_session_test + \
+ //$1/tensorflow/python:nn_batchnorm_test + \
+ //$1/tensorflow/python:protobuf_compare_test + \
+ //$1/tensorflow/python:quantized_conv_ops_test + \
+ //$1/tensorflow/python:saver_test + \
+ //$1/tensorflow/python:file_system_test \
+ "
+}
+
+function get_failing_gpu_py_tests() {
+ echo "
+ //$1/tensorflow/python/kernel_tests:rnn_test + \
+ //$1/tensorflow/python/kernel_tests:sets_test + \
+ //$1/tensorflow/python/kernel_tests:diag_op_test + \
+ //$1/tensorflow/python/kernel_tests:one_hot_op_test + \
+ //$1/tensorflow/python/kernel_tests:trace_op_test + \
+ $(get_failing_cpu_py_tests $1)
+ "
+}
+
+function clean_output_base() {
+ # TODO(pcloudy): bazel clean --expunge doesn't work on Windows yet.
+ # Clean the output base manually to ensure build correctness
+ bazel clean
+ output_base=$(bazel info output_base)
+ bazel shutdown
+ # Sleep 5s to wait for jvm shutdown completely
+ # otherwise rm will fail with device or resource busy error
+ sleep 5
+ rm -rf ${output_base}
+}
+
+function run_configure_for_cpu_build {
+ export TF_NEED_CUDA=0
+ echo "" | ./configure
+}
+
+function run_configure_for_gpu_build {
+ # Due to a bug in Bazel: https://github.com/bazelbuild/bazel/issues/2182
+ # yes "" | ./configure doesn't work on Windows, so we set all the
+ # environment variables in advance to avoid interact with the script.
+ export TF_NEED_CUDA=1
+ export TF_CUDA_VERSION=8.0
+ export CUDA_TOOLKIT_PATH="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0"
+ export TF_CUDNN_VERSION=5
+ export CUDNN_INSTALL_PATH="C:/tools/cuda"
+ export TF_CUDA_COMPUTE_CAPABILITIES="3.5,5.2"
+ echo "" | ./configure
+}
+
+function create_python_test_dir() {
+ rm -rf "$1"
+ mkdir -p "$1"
+ cmd /c "mklink /J $1\\tensorflow .\\tensorflow"
+}
+
+function reinstall_tensorflow_pip() {
+ echo "y" | pip uninstall tensorflow -q || true
+ pip install ${1}
+}
diff --git a/tensorflow/tools/ci_build/windows/bazel/common_env.sh b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
new file mode 100644
index 0000000000..662de93c16
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# This script assumes the standard setup on tensorflow Jenkins windows machines.
+# It is NOT guaranteed to work on any other machine. Use at your own risk!
+#
+# REQUIREMENTS:
+# * All installed in standard locations:
+# - JDK8, and JAVA_HOME set.
+# - Microsoft Visual Studio 2015 Community Edition
+# - Msys2
+# - Anaconda3
+# * Bazel windows executable copied as "bazel.exe" and included in PATH.
+
+# Use a temporary directory with a short name.
+export TMPDIR="C:/tmp"
+mkdir -p "$TMPDIR"
+
+# Set bash path
+export BAZEL_SH="C:/tools/msys64/usr/bin/bash"
+
+# Set Python path for ./configure
+export PYTHON_BIN_PATH="C:/Program Files/Anaconda3/python"
+
+# Set Python path for cc_configure.bzl
+export BAZEL_PYTHON="C:/Program Files/Anaconda3/python"
+
+# Set Visual Studio path
+export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio 14.0"
+
+# Add python into PATH, it's needed because gen_git_source.py uses
+# '/usr/bin/env python' as a shebang
+export PATH="/c/Program Files/Anaconda3:$PATH"
+
+# Make sure we have pip in PATH
+export PATH="/c/Program Files/Anaconda3/Scripts:$PATH"
+
+# Add Cuda and Cudnn dll directories into PATH
+export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0/bin:$PATH"
+export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0/extras/CUPTI/libx64:$PATH"
+export PATH="/c/tools/cuda/bin:$PATH"
+
+# Set the common build options on Windows
+export BUILD_OPTS='--cpu=x64_windows_msvc --host_cpu=x64_windows_msvc --copt=/w --verbose_failures --experimental_ui'
diff --git a/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh b/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
index 3e882656a9..8c419347d6 100644
--- a/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
@@ -35,106 +35,24 @@ script_dir=$(dirname $0)
cd ${script_dir%%tensorflow/tools/ci_build/windows/cpu/bazel}.
# Setting up the environment variables Bazel and ./configure needs
-source "tensorflow/tools/ci_build/windows/cpu/bazel/common_env.sh" \
+source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
|| { echo "Failed to source common_env.sh" >&2; exit 1; }
-# bazel clean --expunge doesn't work on Windows yet.
-# Clean the output base manually to ensure build correctness
-bazel clean
-output_base=$(bazel info output_base)
-bazel shutdown
-# Sleep 5s to wait for jvm shutdown completely
-# otherwise rm will fail with device or resource busy error
-sleep 5
-rm -rf ${output_base}
+# load bazel_test_lib.sh
+source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
+ || { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
-export TF_NEED_CUDA=0
-echo "" | ./configure
+clean_output_base
-failing_tests="\
- //tensorflow/core:example_example_parser_configuration_test + \
- //tensorflow/core/kernels:sparse_dense_binary_op_shared_test + \
- //tensorflow/core/kernels:sparse_reduce_sum_op_test + \
- //tensorflow/core:lib_core_status_test + \
- //tensorflow/core:lib_monitoring_collection_registry_test + \
- //tensorflow/core:lib_strings_numbers_test + \
- //tensorflow/core:lib_strings_str_util_test + \
- //tensorflow/core/platform/hadoop:hadoop_file_system_test + \
- //tensorflow/core:platform_file_system_test + \
- //tensorflow/core:platform_logging_test + \
- //tensorflow/core:util_sparse_sparse_tensor_test + \
- //tensorflow/cc:framework_gradient_checker_test + \
- //tensorflow/cc:framework_gradients_test + \
- //tensorflow/cc:gradients_array_grad_test + \
- //tensorflow/cc:gradients_math_grad_test + \
- //tensorflow/cc:gradients_nn_grad_test + \
- //tensorflow/cc/saved_model:loader_test
-"
+run_configure_for_cpu_build
-broken_tests="\
- //tensorflow/cc:framework_cc_ops_test + \
- //tensorflow/core/platform/cloud:time_util_test + \
- //tensorflow/core/platform/cloud:oauth_client_test + \
- //tensorflow/core/platform/cloud:http_request_test + \
- //tensorflow/core/platform/cloud:google_auth_provider_test + \
- //tensorflow/core/platform/cloud:gcs_file_system_test + \
- //tensorflow/core/kernels/cloud:bigquery_table_accessor_test + \
- //tensorflow/core/kernels/hexagon:quantized_matmul_op_for_hexagon_test + \
- //tensorflow/core/kernels:sparse_add_op_test + \
- //tensorflow/core/kernels:spacetobatch_benchmark_test_gpu + \
- //tensorflow/core/kernels:spacetobatch_benchmark_test + \
- //tensorflow/core/kernels:requantize_op_test + \
- //tensorflow/core/kernels:requantization_range_op_test + \
- //tensorflow/core/kernels:quantized_reshape_op_test + \
- //tensorflow/core/kernels:quantized_pooling_ops_test + \
- //tensorflow/core/kernels:quantized_matmul_op_test + \
- //tensorflow/core/kernels:quantized_conv_ops_test + \
- //tensorflow/core/kernels:quantized_concat_op_test + \
- //tensorflow/core/kernels:quantized_bias_add_op_test + \
- //tensorflow/core/kernels:quantized_batch_norm_op_test + \
- //tensorflow/core/kernels:quantized_activation_ops_test + \
- //tensorflow/core/kernels:quantize_op_test + \
- //tensorflow/core/kernels:quantize_down_and_shrink_range_op_test + \
- //tensorflow/core/kernels:quantize_and_dequantize_op_test_gpu + \
- //tensorflow/core/kernels:quantize_and_dequantize_op_test + \
- //tensorflow/core/kernels:quantization_utils_test + \
- //tensorflow/core/kernels:debug_ops_test + \
- //tensorflow/core/kernels:control_flow_ops_test + \
- //tensorflow/core/kernels:cast_op_test_gpu + \
- //tensorflow/core/kernels:cast_op_test + \
- //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_tensor_coding_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_session_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:grpc_session_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_channel_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:grpc_channel_test + \
- //tensorflow/core/distributed_runtime:remote_device_test_gpu + \
- //tensorflow/core/distributed_runtime:remote_device_test + \
- //tensorflow/core/distributed_runtime:executor_test_gpu + \
- //tensorflow/core/distributed_runtime:executor_test + \
- //tensorflow/core/debug:debug_gateway_test + \
- //tensorflow/core/debug:debug_grpc_io_utils_test + \
- //tensorflow/core:util_reporter_test + \
- //tensorflow/core:util_memmapped_file_system_test + \
- //tensorflow/core:platform_subprocess_test + \
- //tensorflow/core:platform_profile_utils_cpu_utils_test + \
- //tensorflow/core:platform_port_test + \
- //tensorflow/core:lib_strings_strcat_test + \
- //tensorflow/core:lib_jpeg_jpeg_mem_unittest + \
- //tensorflow/core:lib_core_notification_test + \
- //tensorflow/core:framework_partial_tensor_shape_test + \
- //tensorflow/core/debug:debug_io_utils_test \
-"
-
-exclude_tests="${failing_tests} + ${broken_tests}"
-
-BUILD_OPTS='-c opt --cpu=x64_windows_msvc --host_cpu=x64_windows_msvc --copt=/w --verbose_failures --experimental_ui'
+# Compliling the following test is extremely slow with -c opt
+slow_compiling_test="//tensorflow/core/kernels:eigen_backward_spatial_convolutions_test"
# Find all the passing cc_tests on Windows and store them in a variable
-passing_tests=$(bazel query "kind(cc_test, //tensorflow/cc/... + //tensorflow/core/...) - (${exclude_tests})" |
+passing_tests=$(bazel query "kind(cc_test, //tensorflow/cc/... + //tensorflow/core/...) - (${exclude_cpu_cc_tests}) - ($slow_compiling_test)" |
# We need to strip \r so that the result could be store into a variable under MSYS
tr '\r' ' ')
-bazel test $BUILD_OPTS -k $passing_tests
-
+bazel test $BUILD_OPTS -k $slow_compiling_test --test_output=errors
+bazel test -c opt $BUILD_OPTS -k $passing_tests --test_output=errors
diff --git a/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat b/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
index 11178a5d14..9908762bca 100644
--- a/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
+++ b/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
@@ -43,4 +43,4 @@ if %errorlevel% neq 0 exit /b %errorlevel%
:: Run all python tests if the installation succeeded.
echo Running tests...
-ctest -C Release --output-on-failure
+ctest -C Release --output-on-failure -j 32
diff --git a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
index 28c7475184..34844e60c8 100644
--- a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
@@ -35,25 +35,35 @@ script_dir=$(dirname $0)
cd ${script_dir%%tensorflow/tools/ci_build/windows/cpu/pip}.
# Setting up the environment variables Bazel and ./configure needs
-source "tensorflow/tools/ci_build/windows/cpu/bazel/common_env.sh" \
+source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
|| { echo "Failed to source common_env.sh" >&2; exit 1; }
-# bazel clean --expunge doesn't work on Windows yet.
-# Clean the output base manually to ensure build correctness
-bazel clean
-output_base=$(bazel info output_base)
-bazel shutdown
-# Sleep 5s to wait for jvm shutdown completely
-# otherwise rm will fail with device or resource busy error
-sleep 5
-rm -rf ${output_base}
+# load bazel_test_lib.sh
+source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
+ || { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
-export TF_NEED_CUDA=0
-echo "" | ./configure
+clean_output_base
-BUILD_OPTS='-c opt --cpu=x64_windows_msvc --host_cpu=x64_windows_msvc --copt=/w --verbose_failures --experimental_ui'
+run_configure_for_cpu_build
-bazel build $BUILD_OPTS tensorflow/tools/pip_package:build_pip_package || exit $?
+bazel build -c opt $BUILD_OPTS tensorflow/tools/pip_package:build_pip_package || exit $?
-./bazel-bin/tensorflow/tools/pip_package/build_pip_package $PWD
+# Create a python test directory to avoid package name conflict
+PY_TEST_DIR="py_test_dir"
+create_python_test_dir "${PY_TEST_DIR}"
+./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
+
+# Running python tests on Windows needs pip package installed
+PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
+reinstall_tensorflow_pip ${PIP_NAME}
+
+failing_cpu_py_tests=$(get_failing_cpu_py_tests ${PY_TEST_DIR})
+
+passing_tests=$(bazel query "kind(py_test, //${PY_TEST_DIR}/tensorflow/python/...) - (${failing_cpu_py_tests})" |
+ # We need to strip \r so that the result could be store into a variable under MSYS
+ tr '\r' ' ')
+
+# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
+# which will result testing system installed tensorflow
+bazel test -c opt $BUILD_OPTS -k $passing_tests --define=no_tensorflow_py_deps=true --test_output=errors
diff --git a/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.bat b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.bat
new file mode 100644
index 0000000000..14f2cef4bc
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.bat
@@ -0,0 +1 @@
+c:\tools\msys64\usr\bin\bash -l %cd%/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh %*
diff --git a/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh
new file mode 100644
index 0000000000..3fd960deab
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# This script assumes the standard setup on tensorflow Jenkins windows machines.
+# It is NOT guaranteed to work on any other machine. Use at your own risk!
+#
+# REQUIREMENTS:
+# * All installed in standard locations:
+# - JDK8, and JAVA_HOME set.
+# - Microsoft Visual Studio 2015 Community Edition
+# - Msys2
+# - Anaconda3
+# * Bazel windows executable copied as "bazel.exe" and included in PATH.
+
+# All commands shall pass, and all should be visible.
+set -x
+set -e
+
+# This script is under <repo_root>/tensorflow/tools/ci_build/windows/cpu/bazel
+# Change into repository root.
+script_dir=$(dirname $0)
+cd ${script_dir%%tensorflow/tools/ci_build/windows/gpu/bazel}.
+
+# Setting up the environment variables Bazel and ./configure needs
+source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
+ || { echo "Failed to source common_env.sh" >&2; exit 1; }
+
+# load bazel_test_lib.sh
+source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
+ || { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
+
+clean_output_base
+
+run_configure_for_gpu_build
+
+# Compliling the following test is extremely slow with -c opt
+slow_compiling_test="//tensorflow/core/kernels:eigen_backward_spatial_convolutions_test"
+
+# Find all the passing cc_tests on Windows and store them in a variable
+passing_tests=$(bazel query "kind(cc_test, //tensorflow/cc/... + //tensorflow/core/...) - (${exclude_gpu_cc_tests}) - ($slow_compiling_test)" |
+ # We need to strip \r so that the result could be store into a variable under MSYS
+ tr '\r' ' ')
+
+# TODO(pcloudy): There is a bug in Bazel preventing build with GPU support without -c opt
+# Re-enable this test after it is fixed.
+# bazel test --config=win-cuda $BUILD_OPTS -k $slow_compiling_test --test_output=errors
+bazel test -c opt --config=win-cuda $BUILD_OPTS -k $passing_tests --test_output=errors
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
new file mode 100644
index 0000000000..eaf9ef8158
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# This script assumes the standard setup on tensorflow Jenkins windows machines.
+# It is NOT guaranteed to work on any other machine. Use at your own risk!
+#
+# REQUIREMENTS:
+# * All installed in standard locations:
+# - JDK8, and JAVA_HOME set.
+# - Microsoft Visual Studio 2015 Community Edition
+# - Msys2
+# - Anaconda3
+# * Bazel windows executable copied as "bazel.exe" and included in PATH.
+
+# All commands shall pass, and all should be visible.
+set -x
+set -e
+
+# This script is under <repo_root>/tensorflow/tools/ci_build/windows/gpu/pip/
+# Change into repository root.
+script_dir=$(dirname $0)
+cd ${script_dir%%tensorflow/tools/ci_build/windows/gpu/pip}.
+
+# Setting up the environment variables Bazel and ./configure needs
+source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
+ || { echo "Failed to source common_env.sh" >&2; exit 1; }
+
+# load bazel_test_lib.sh
+source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
+ || { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
+
+clean_output_base
+
+run_configure_for_gpu_build
+
+bazel build -c opt --config=win-cuda $BUILD_OPTS tensorflow/tools/pip_package:build_pip_package || exit $?
+
+# Create a python test directory to avoid package name conflict
+PY_TEST_DIR="py_test_dir"
+create_python_test_dir "${PY_TEST_DIR}"
+
+./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
+
+# Running python tests on Windows needs pip package installed
+PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
+reinstall_tensorflow_pip ${PIP_NAME}
+
+failing_gpu_py_tests=$(get_failing_gpu_py_tests ${PY_TEST_DIR})
+
+passing_tests=$(bazel query "kind(py_test, //${PY_TEST_DIR}/tensorflow/python/...) - (${failing_gpu_py_tests})" |
+ # We need to strip \r so that the result could be store into a variable under MSYS
+ tr '\r' ' ')
+
+# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
+# which will result testing system installed tensorflow
+# GPU tests are very flaky when running concurently, so set local_test_jobs=5
+bazel test -c opt --config=win-cuda $BUILD_OPTS -k $passing_tests --define=no_tensorflow_py_deps=true --test_output=errors --local_test_jobs=5
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/run.bat b/tensorflow/tools/ci_build/windows/gpu/pip/run.bat
new file mode 100644
index 0000000000..a504b7eb7c
--- /dev/null
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/run.bat
@@ -0,0 +1 @@
+c:\tools\msys64\usr\bin\bash -l %cd%/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh %*
diff --git a/tensorflow/tools/docker/parameterized_docker_build.sh b/tensorflow/tools/docker/parameterized_docker_build.sh
index b0c56c8965..35c1218470 100755
--- a/tensorflow/tools/docker/parameterized_docker_build.sh
+++ b/tensorflow/tools/docker/parameterized_docker_build.sh
@@ -327,15 +327,6 @@ if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
sleep 1
"${DOCKER_BINARY}" stop --time=0 ${CONTAINER_ID}
-else
- "${DOCKER_BINARY}" run --rm -p ${CONTAINER_PORT}:${CONTAINER_PORT} \
- -v ${TMP_DIR}/notebooks:/root/notebooks "${IMG}" \
- bash -c \
- "cd /tensorflow; tensorflow/tools/ci_build/builds/test_tutorials.sh"
- if [[ $? != "0" ]]; then
- CHECK_FAILED=1
- fi
-
fi
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 688681572e..e6cfd4a9d7 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '0.12.0-rc0'
+_VERSION = '0.12.0-rc1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
index d3bb93c036..b7d6cc61dd 100755
--- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
+++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
@@ -53,6 +53,7 @@ CURRENT_DIR = os.path.dirname(sys.argv[0])
NVCC_PATH = CURRENT_DIR + '/../../../cuda/bin/nvcc'
LLVM_HOST_COMPILER_PATH = ('/usr/bin/gcc')
PREFIX_DIR = os.path.dirname(GCC_HOST_COMPILER_PATH)
+NVCC_VERSION = '%{cuda_version}'
def Log(s):
print('gpus/crosstool: {0}'.format(s))
@@ -114,6 +115,14 @@ def GetHostCompilerOptions(argv):
return opts
+def _update_options(nvcc_options):
+ if NVCC_VERSION in ("7.0",):
+ return nvcc_options
+
+ update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
+ return [ update_options[opt] if opt in update_options else opt
+ for opt in nvcc_options ]
+
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
@@ -130,7 +139,8 @@ def GetNvccOptions(argv):
args, _ = parser.parse_known_args(argv)
if args.nvcc_options:
- return ' '.join(['--'+a for a in sum(args.nvcc_options, [])])
+ options = _update_options(sum(args.nvcc_options, []))
+ return ' '.join(['--'+a for a in options])
return ''
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index 06694d902c..1db24435e9 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -584,6 +584,18 @@ def _create_dummy_repository(repository_ctx):
"%{curand_lib}": _lib_name("curand", cpu_value),
"%{cupti_lib}": _lib_name("cupti", cpu_value),
})
+ _tpl(repository_ctx, "cuda:BUILD",
+ {
+ "%{cudart_static_lib}": _lib_name("cudart_static", cpu_value,
+ static=True),
+ "%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
+ "%{cudart_lib}": _lib_name("cudart", cpu_value),
+ "%{cublas_lib}": _lib_name("cublas", cpu_value),
+ "%{cudnn_lib}": _lib_name("cudnn", cpu_value),
+ "%{cufft_lib}": _lib_name("cufft", cpu_value),
+ "%{curand_lib}": _lib_name("curand", cpu_value),
+ "%{cupti_lib}": _lib_name("cupti", cpu_value),
+ })
_tpl(repository_ctx, "cuda:platform.bzl",
{
"%{cuda_version}": _DEFAULT_CUDA_VERSION,
@@ -701,6 +713,7 @@ def _create_cuda_repository(repository_ctx):
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
{
"%{cpu_compiler}": str(cc),
+ "%{cuda_version}": cuda_config.cuda_version,
"%{gcc_host_compiler_path}": str(cc),
"%{cuda_compute_capabilities}": ", ".join(
["\"%s\"" % c for c in cuda_config.compute_capabilities]),
diff --git a/third_party/sycl/crosstool/computecpp.tpl b/third_party/sycl/crosstool/computecpp.tpl
index 0c7611d298..e90d51bf87 100755
--- a/third_party/sycl/crosstool/computecpp.tpl
+++ b/third_party/sycl/crosstool/computecpp.tpl
@@ -13,22 +13,28 @@ COMPUTECPP_DRIVER= COMPUTECPP_ROOT + 'bin/compute++'
COMPUTECPP_INCLUDE = COMPUTECPP_ROOT + 'include'
def main():
- computecpp_compiler_flags = [flag for flag in sys.argv[1:]]
+ compiler_flags = []
- output_file_index = computecpp_compiler_flags.index('-o') + 1
- output_file_name = computecpp_compiler_flags[output_file_index]
+ # remove -fsamotoze-coverage from string
+ if CPU_CXX_COMPILER.find("g++") != -1:
+ compiler_flags = [flag for flag in sys.argv[1:] if not flag.startswith(('-Wl,--no-undefined', '-fsanitize-coverage', '-Wno-unused-but-set-variable', '-Wignored-attributes'))]
+ else:
+ compiler_flags = [flag for flag in sys.argv[1:] if not flag.startswith(('-Wl,--no-undefined', '-Wno-unused-but-set-variable', '-Wignored-attributes'))]
+
+ output_file_index = compiler_flags.index('-o') + 1
+ output_file_name = compiler_flags[output_file_index]
if(output_file_index == 1):
# we are linking
- return subprocess.call([CPU_CXX_COMPILER] + computecpp_compiler_flags)
+ return subprocess.call([CPU_CXX_COMPILER] + compiler_flags)
- computecpp_compiler_flags = computecpp_compiler_flags + ['-D_GLIBCXX_USE_CXX11_ABI=0']
+ compiler_flags = compiler_flags + ['-D_GLIBCXX_USE_CXX11_ABI=0', '-DEIGEN_USE_SYCL=1']
# find what we compile
compiling_cpp = 0
- if('-c' in computecpp_compiler_flags):
- compiled_file_index = computecpp_compiler_flags.index('-c') + 1
- compited_file_name = computecpp_compiler_flags[compiled_file_index]
+ if('-c' in compiler_flags):
+ compiled_file_index = compiler_flags.index('-c') + 1
+ compited_file_name = compiler_flags[compiled_file_index]
if(compited_file_name.endswith(('.cc', '.c++', '.cpp', '.CPP', '.C', '.cxx'))):
compiling_cpp = 1;
@@ -36,22 +42,24 @@ def main():
filename, file_extension = os.path.splitext(output_file_name)
bc_out = filename + '.sycl'
- computecpp_compiler_flags = ['--sycl-no-diags', '-sycl-compress-name', '-DTENSORFLOW_USE_SYCL', '-Wno-unused-variable', '-I', COMPUTECPP_INCLUDE, '-isystem',
- COMPUTECPP_INCLUDE, '-std=c++11', '-sycl', '-emit-llvm', '-no-serial-memop'] + computecpp_compiler_flags
+ # strip asan for the device
+ computecpp_device_compiler_flags = [flag for flag in compiler_flags if not flag.startswith(('-fsanitize'))]
+ computecpp_device_compiler_flags = ['-sycl-compress-name', '-DTENSORFLOW_USE_SYCL', '-Wno-unused-variable', '-I', COMPUTECPP_INCLUDE, '-isystem',
+ COMPUTECPP_INCLUDE, '-std=c++11', '-sycl', '-emit-llvm', '-no-serial-memop'] + computecpp_device_compiler_flags
- # dont want that in case of compiling with computecpp first
- host_compiler_flags = [flag for flag in sys.argv[1:]
- if not flag.startswith(('-MF', '-MD',))
- if not '.d' in flag]
-
- x = subprocess.call([COMPUTECPP_DRIVER] + computecpp_compiler_flags )
+ x = subprocess.call([COMPUTECPP_DRIVER] + computecpp_device_compiler_flags )
if(x == 0):
+ # dont want that in case of compiling with computecpp first
+ host_compiler_flags = [flag for flag in compiler_flags
+ if not flag.startswith(('-MF', '-MD',))
+ if not '.d' in flag]
+
host_compiler_flags = ['-D_GLIBCXX_USE_CXX11_ABI=0', '-DTENSORFLOW_USE_SYCL', '-Wno-unused-variable', '-I', COMPUTECPP_INCLUDE, '--include', bc_out] + host_compiler_flags
x = subprocess.call([CPU_CXX_COMPILER] + host_compiler_flags)
return x
else:
# compile for C
- return subprocess.call([CPU_C_COMPILER] + computecpp_compiler_flags)
+ return subprocess.call([CPU_C_COMPILER] + compiler_flags)
if __name__ == '__main__':
sys.exit(main())
diff --git a/third_party/sycl/sycl_configure.bzl b/third_party/sycl/sycl_configure.bzl
index 38bd7759de..6ad498487f 100644
--- a/third_party/sycl/sycl_configure.bzl
+++ b/third_party/sycl/sycl_configure.bzl
@@ -102,7 +102,7 @@ def _tpl(repository_ctx, tpl, substitutions={}, out=None):
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
- Label("//third_party/sycl/%s.tpl" % label),
+ Label("//third_party/sycl/%s" % label),
{})
_DUMMY_CROSSTOOL_BZL_FILE = """
@@ -133,9 +133,9 @@ error_sycl_disabled()
def _create_dummy_repository(repository_ctx):
# Set up BUILD file for sycl/.
- _file(repository_ctx, "sycl:build_defs.bzl")
+ _tpl(repository_ctx, "sycl:build_defs.bzl")
_tpl(repository_ctx, "sycl:BUILD")
- _tpl(repository_ctx, "sycl:LICENSE.text")
+ _file(repository_ctx, "sycl:LICENSE.text")
_tpl(repository_ctx, "sycl:platform.bzl")
# Create dummy files for the SYCL toolkit since they are still required by
@@ -157,10 +157,11 @@ def _sycl_autoconf_imp(repository_ctx):
_create_dummy_repository(repository_ctx)
else:
# copy template files
- _file(repository_ctx, "sycl:build_defs.bzl")
+ _tpl(repository_ctx, "sycl:build_defs.bzl")
_tpl(repository_ctx, "sycl:BUILD")
_tpl(repository_ctx, "sycl:platform.bzl")
- _file(repository_ctx, "crosstool:BUILD")
+ _tpl(repository_ctx, "crosstool:BUILD")
+ _file(repository_ctx, "sycl:LICENSE.text")
_tpl(repository_ctx, "crosstool:computecpp",
{
"%{host_cxx_compiler}" : find_cc(repository_ctx),
diff --git a/tools/bazel.rc.template b/tools/bazel.rc.template
index 8f99bf02fa..a1fb55632c 100644
--- a/tools/bazel.rc.template
+++ b/tools/bazel.rc.template
@@ -5,6 +5,9 @@ build:win-cuda --define=using_cuda=true --define=using_cuda_nvcc=true
build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain
build:sycl --define=using_sycl=true
+build:sycl_asan --crosstool_top=@local_config_sycl//crosstool:toolchain
+build:sycl_asan --define=using_sycl=true --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --copt -fsanitize=address --copt -DGPR_NO_DIRECT_SYSCALLS --linkopt -fPIC --linkopt -lasan
+
build --force_python=py$PYTHON_MAJOR_VERSION
build --host_force_python=py$PYTHON_MAJOR_VERSION
build --python$PYTHON_MAJOR_VERSION_path=$PYTHON_BINARY