aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Jonathan Hseu <jhseu@google.com>2016-12-22 15:38:30 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-22 15:48:41 -0800
commitbed8383c27a0a7225e6fc7ff59a2cd6388fb4d09 (patch)
treeb70cfc88f95f318195f8610ffb960e98604348d1
parent1e5bd8cdd62033d1f7ea928fcbec521bb48bb1f5 (diff)
Merge changes from github.
Change: 142805270
-rw-r--r--README.md8
-rwxr-xr-xconfigure14
-rw-r--r--tensorflow/c/c_api.cc6
-rw-r--r--tensorflow/c/c_api.h5
-rw-r--r--tensorflow/c/c_api_test.cc6
-rw-r--r--tensorflow/cc/framework/scope.cc3
-rw-r--r--tensorflow/contrib/cmake/CMakeLists.txt1
-rw-r--r--tensorflow/contrib/cmake/setup.py2
-rw-r--r--tensorflow/contrib/cmake/tf_cc_ops.cmake2
-rw-r--r--tensorflow/contrib/cmake/tf_core_cpu.cmake4
-rw-r--r--tensorflow/contrib/cmake/tf_core_ops.cmake1
-rw-r--r--tensorflow/contrib/cmake/tf_models.cmake21
-rw-r--r--tensorflow/contrib/cmake/tf_python.cmake3
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/student_t_test.py26
-rw-r--r--tensorflow/contrib/distributions/python/ops/student_t.py12
-rw-r--r--tensorflow/contrib/framework/python/ops/arg_scope.py2
-rw-r--r--tensorflow/contrib/ios_examples/README.md9
-rw-r--r--tensorflow/contrib/learn/python/learn/README.md11
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/BUILD13
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/__init__.py41
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/synthetic.py202
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/synthetic_test.py127
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/run_config.py2
-rw-r--r--tensorflow/contrib/losses/python/losses/loss_ops.py4
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_device.cc32
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_device_context.cc2
-rw-r--r--tensorflow/core/kernels/aggregate_ops.cc18
-rw-r--r--tensorflow/core/kernels/aggregate_ops_cpu.h113
-rw-r--r--tensorflow/core/kernels/constant_op.cc5
-rw-r--r--tensorflow/core/kernels/control_flow_ops.cc112
-rw-r--r--tensorflow/core/kernels/cwise_op_exp.cc1
-rw-r--r--tensorflow/core/kernels/cwise_op_floor_div.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_neg.cc12
-rw-r--r--tensorflow/core/kernels/cwise_op_sub.cc12
-rw-r--r--tensorflow/core/kernels/cwise_ops_sycl_common.h24
-rw-r--r--tensorflow/core/kernels/debug_ops.h1
-rw-r--r--tensorflow/core/kernels/pack_op.cc25
-rw-r--r--tensorflow/core/kernels/reduction_ops_common.h29
-rw-r--r--tensorflow/core/kernels/reduction_ops_sum.cc27
-rw-r--r--tensorflow/core/kernels/scatter_functor.h19
-rw-r--r--tensorflow/core/kernels/scatter_op.cc17
-rw-r--r--tensorflow/core/kernels/session_ops.cc31
-rw-r--r--tensorflow/core/kernels/shape_ops.cc37
-rw-r--r--tensorflow/core/kernels/slice_op.cc57
-rw-r--r--tensorflow/core/kernels/slice_op_cpu_impl.h12
-rw-r--r--tensorflow/core/kernels/split_lib.h11
-rw-r--r--tensorflow/core/kernels/split_lib_cpu.cc19
-rw-r--r--tensorflow/core/kernels/split_op.cc85
-rw-r--r--tensorflow/core/kernels/tile_ops.cc25
-rw-r--r--tensorflow/core/kernels/tile_ops_cpu_impl.h24
-rw-r--r--tensorflow/core/kernels/training_ops.cc17
-rw-r--r--tensorflow/core/kernels/transpose_functor_cpu.cc24
-rw-r--r--tensorflow/core/kernels/unpack_op.cc25
-rw-r--r--tensorflow/core/ops/array_ops.cc47
-rw-r--r--tensorflow/core/platform/default/logging.cc42
-rw-r--r--tensorflow/core/platform/default/logging.h16
-rw-r--r--tensorflow/examples/how_tos/__init__.py3
-rw-r--r--tensorflow/examples/udacity/README.md3
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md244
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md17
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md49
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md237
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md521
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md22
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md26
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md27
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md207
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md37
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/summary.md481
-rw-r--r--tensorflow/g3doc/api_docs/python/test.md521
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md99
-rw-r--r--tensorflow/g3doc/how_tos/embedding_viz/index.md2
-rw-r--r--tensorflow/g3doc/how_tos/hadoop/index.md30
-rw-r--r--tensorflow/g3doc/resources/index.md1
-rw-r--r--tensorflow/g3doc/tutorials/image_recognition/index.md4
-rw-r--r--tensorflow/g3doc/tutorials/recurrent/index.md2
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Session.java2
-rw-r--r--tensorflow/python/kernel_tests/control_flow_ops_py_test.py19
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_test.py4
-rw-r--r--tensorflow/python/kernel_tests/decode_image_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py16
-rw-r--r--tensorflow/python/kernel_tests/session_ops_test.py6
-rw-r--r--tensorflow/python/kernel_tests/split_op_test.py8
-rw-r--r--tensorflow/python/ops/array_ops.py4
-rw-r--r--tensorflow/python/ops/losses/losses.py4
-rw-r--r--tensorflow/python/ops/math_ops.py6
-rw-r--r--tensorflow/python/ops/nn.py15
-rw-r--r--tensorflow/python/ops/rnn_cell_impl.py1
-rw-r--r--tensorflow/python/platform/test.py3
-rw-r--r--tensorflow/python/training/saver_test.py7
-rwxr-xr-xtensorflow/tools/ci_build/builds/test_tutorials.sh4
-rwxr-xr-xtensorflow/tools/ci_build/protobuf/protobuf_optimized_pip.sh3
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--tensorflow/workspace.bzl8
-rw-r--r--third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint9
-rw-r--r--third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX2.h2
-rw-r--r--third_party/gpus/cuda_configure.bzl4
-rwxr-xr-xthird_party/sycl/crosstool/BUILD.tpl2
-rwxr-xr-xthird_party/sycl/crosstool/CROSSTOOL.tpl2
-rwxr-xr-xthird_party/sycl/crosstool/computecpp.tpl2
104 files changed, 2873 insertions, 1259 deletions
diff --git a/README.md b/README.md
index f8f5de8e0e..fd61605b0e 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/configure b/configure
index d6d3e19afa..c2401252dd 100755
--- a/configure
+++ b/configure
@@ -23,9 +23,13 @@ function bazel_clean_and_fetch() {
# TODO(pcloudy): Re-enable it after bazel clean --expunge is fixed.
if ! is_windows; then
bazel clean --expunge
+ # TODO(https://github.com/bazelbuild/bazel/issues/2220) Remove the nested `bazel query`.
+ bazel fetch $(bazel query "//tensorflow/... -//tensorflow/examples/android/...")
+ else
+ # TODO(pcloudy): Also filter out //tensorflow/examples/android/... on Windows after
+ # https://github.com/bazelbuild/bazel/issues/2248 is fixed.
+ bazel fetch //tensorflow/...
fi
- # TODO(https://github.com/bazelbuild/bazel/issues/2220) Remove the nested `bazel query`.
- bazel fetch $(bazel query "//tensorflow/... -//tensorflow/examples/android/...")
}
## Set up python-related environment settings
@@ -280,7 +284,7 @@ while true; do
TF_CUDNN_VERSION=${BASH_REMATCH[1]}
echo "libcudnn.so resolves to libcudnn${TF_CUDNN_EXT}"
elif [[ "$REALVAL" =~ ([0-9]*).dylib ]]; then
- TF_CUDNN_EXT="."${BASH_REMATCH[1]}".dylib"
+ TF_CUDNN_EXT=${BASH_REMATCH[1]}".dylib"
TF_CUDNN_VERSION=${BASH_REMATCH[1]}
echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
fi
@@ -387,7 +391,7 @@ if [ "$TF_NEED_OPENCL" == "1" ]; then
while true; do
fromuser=""
if [ -z "$HOST_CXX_COMPILER" ]; then
- default_cxx_host_compiler=$(which g++|| true)
+ default_cxx_host_compiler=$(which clang++-3.6 || true)
read -p "Please specify which C++ compiler should be used as the host C++ compiler. [Default is $default_cxx_host_compiler]: " HOST_CXX_COMPILER
fromuser="1"
if [ -z "$HOST_CXX_COMPILER" ]; then
@@ -410,7 +414,7 @@ done
while true; do
fromuser=""
if [ -z "$HOST_C_COMPILER" ]; then
- default_c_host_compiler=$(which gcc|| true)
+ default_c_host_compiler=$(which clang-3.6 || true)
read -p "Please specify which C compiler should be used as the host C compiler. [Default is $default_c_host_compiler]: " HOST_C_COMPILER
fromuser="1"
if [ -z "$HOST_C_COMPILER" ]; then
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index a1138d5783..9fc399a771 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -82,6 +82,12 @@ extern "C" {
const char* TF_Version() { return TF_VERSION_STRING; }
// --------------------------------------------------------------------------
+size_t TF_DataTypeSize(TF_DataType dt) {
+ return static_cast<size_t>(
+ tensorflow::DataTypeSize(static_cast<DataType>(dt)));
+}
+
+// --------------------------------------------------------------------------
struct TF_Status {
Status status;
};
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 7e411e9622..3ea2d31699 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -100,6 +100,11 @@ typedef enum {
TF_RESOURCE = 20,
} TF_DataType;
+// TF_DataTypeSize returns the sizeof() for the underlying type corresponding
+// to the given TF_DataType enum value. Returns 0 for variable length types
+// (eg. TF_STRING) or on failure.
+extern size_t TF_DataTypeSize(TF_DataType dt);
+
// --------------------------------------------------------------------------
// TF_Code holds an error code. The enum values here are identical to
// corresponding values in error_codes.proto.
diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc
index 4b43b74ce6..4ea53ab230 100644
--- a/tensorflow/c/c_api_test.cc
+++ b/tensorflow/c/c_api_test.cc
@@ -211,6 +211,12 @@ TEST(CAPI, DataTypeEnum) {
EXPECT_EQ(TF_UINT16, static_cast<TF_DataType>(tensorflow::DT_UINT16));
EXPECT_EQ(TF_COMPLEX128, static_cast<TF_DataType>(tensorflow::DT_COMPLEX128));
EXPECT_EQ(TF_HALF, static_cast<TF_DataType>(tensorflow::DT_HALF));
+ EXPECT_EQ(TF_DataTypeSize(TF_DOUBLE),
+ tensorflow::DataTypeSize(tensorflow::DT_DOUBLE));
+ EXPECT_EQ(TF_DataTypeSize(TF_STRING),
+ tensorflow::DataTypeSize(tensorflow::DT_STRING));
+ // Test with invalid type; should always return 0 as documented
+ EXPECT_EQ(TF_DataTypeSize(static_cast<TF_DataType>(0)), 0);
}
TEST(CAPI, StatusEnum) {
diff --git a/tensorflow/cc/framework/scope.cc b/tensorflow/cc/framework/scope.cc
index 63ffeaef2a..2bce24f2fc 100644
--- a/tensorflow/cc/framework/scope.cc
+++ b/tensorflow/cc/framework/scope.cc
@@ -29,7 +29,8 @@ Scope::Scope(Graph* graph, Status* status, Scope::NameMap* name_map,
status_(status),
name_map_(name_map),
refiner_(refiner),
- scope_used_(nullptr) {}
+ scope_used_(nullptr),
+ colocation_constraints_() {}
Scope Scope::NewRootScope() {
Graph* graph = new Graph(OpRegistry::Global());
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index ec6be97151..691c87457c 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -197,7 +197,6 @@ if (tensorflow_ENABLE_GPU)
endif()
include(tf_core_cpu.cmake)
-include(tf_models.cmake)
include(tf_core_ops.cmake)
include(tf_core_direct_session.cmake)
include(tf_core_kernels.cmake)
diff --git a/tensorflow/contrib/cmake/setup.py b/tensorflow/contrib/cmake/setup.py
index 8d66e192ec..12dea2ea4f 100644
--- a/tensorflow/contrib/cmake/setup.py
+++ b/tensorflow/contrib/cmake/setup.py
@@ -26,7 +26,7 @@ from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.12.0-rc1-cmake-experimental'
+_VERSION = '0.12.0-cmake-experimental'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/contrib/cmake/tf_cc_ops.cmake b/tensorflow/contrib/cmake/tf_cc_ops.cmake
index b33f318ed3..1eee73b7e4 100644
--- a/tensorflow/contrib/cmake/tf_cc_ops.cmake
+++ b/tensorflow/contrib/cmake/tf_cc_ops.cmake
@@ -93,8 +93,6 @@ file(GLOB_RECURSE tf_cc_srcs
"${tensorflow_source_dir}/tensorflow/cc/client/*.cc"
"${tensorflow_source_dir}/tensorflow/cc/gradients/*.h"
"${tensorflow_source_dir}/tensorflow/cc/gradients/*.cc"
- "${tensorflow_source_dir}/tensorflow/cc/saved_model/*.h"
- "${tensorflow_source_dir}/tensorflow/cc/saved_model/*.cc"
"${tensorflow_source_dir}/tensorflow/cc/training/*.h"
"${tensorflow_source_dir}/tensorflow/cc/training/*.cc"
)
diff --git a/tensorflow/contrib/cmake/tf_core_cpu.cmake b/tensorflow/contrib/cmake/tf_core_cpu.cmake
index f850c405d0..320dfbf68c 100644
--- a/tensorflow/contrib/cmake/tf_core_cpu.cmake
+++ b/tensorflow/contrib/cmake/tf_core_cpu.cmake
@@ -2,6 +2,8 @@
# tf_core_cpu library
########################################################
file(GLOB_RECURSE tf_core_cpu_srcs
+ "${tensorflow_source_dir}/tensorflow/cc/saved_model/*.h"
+ "${tensorflow_source_dir}/tensorflow/cc/saved_model/*.cc"
"${tensorflow_source_dir}/tensorflow/core/common_runtime/*.h"
"${tensorflow_source_dir}/tensorflow/core/common_runtime/*.cc"
"${tensorflow_source_dir}/tensorflow/core/graph/*.h"
@@ -10,6 +12,8 @@ file(GLOB_RECURSE tf_core_cpu_srcs
)
file(GLOB_RECURSE tf_core_cpu_exclude_srcs
+ "${tensorflow_source_dir}/tensorflow/cc/saved_model/*test*.h"
+ "${tensorflow_source_dir}/tensorflow/cc/saved_model/*test*.cc"
"${tensorflow_source_dir}/tensorflow/core/*test*.h"
"${tensorflow_source_dir}/tensorflow/core/*test*.cc"
"${tensorflow_source_dir}/tensorflow/core/*main.cc"
diff --git a/tensorflow/contrib/cmake/tf_core_ops.cmake b/tensorflow/contrib/cmake/tf_core_ops.cmake
index 14b6b17e61..0e61bfc5cf 100644
--- a/tensorflow/contrib/cmake/tf_core_ops.cmake
+++ b/tensorflow/contrib/cmake/tf_core_ops.cmake
@@ -9,7 +9,6 @@ set(tf_op_lib_names
"io_ops"
"linalg_ops"
"logging_ops"
- "losses"
"math_ops"
"nn_ops"
"no_op"
diff --git a/tensorflow/contrib/cmake/tf_models.cmake b/tensorflow/contrib/cmake/tf_models.cmake
deleted file mode 100644
index 4349f7d09b..0000000000
--- a/tensorflow/contrib/cmake/tf_models.cmake
+++ /dev/null
@@ -1,21 +0,0 @@
-########################################################
-# tf_models_word2vec_ops library
-########################################################
-file(GLOB tf_models_word2vec_ops_srcs
- "${tensorflow_source_dir}/tensorflow_models/tutorials/embedding/word2vec_ops.cc"
-)
-
-add_library(tf_models_word2vec_ops OBJECT ${tf_models_word2vec_ops_srcs})
-
-add_dependencies(tf_models_word2vec_ops tf_core_framework)
-
-########################################################
-# tf_models_word2vec_kernels library
-########################################################
-file(GLOB tf_models_word2vec_kernels_srcs
- "${tensorflow_source_dir}/tensorflow_models/tutorials/embedding/word2vec_kernels.cc"
-)
-
-add_library(tf_models_word2vec_kernels OBJECT ${tf_models_word2vec_kernels_srcs})
-
-add_dependencies(tf_models_word2vec_kernels tf_core_cpu)
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 8672e17443..1ba98cd147 100644
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -310,6 +310,8 @@ add_python_module("tensorflow/contrib/learn/python/learn/tests")
add_python_module("tensorflow/contrib/learn/python/learn/tests/dataframe")
add_python_module("tensorflow/contrib/learn/python/learn/utils")
add_python_module("tensorflow/contrib/legacy_seq2seq")
+add_python_module("tensorflow/contrib/legacy_seq2seq/python")
+add_python_module("tensorflow/contrib/legacy_seq2seq/python/ops")
add_python_module("tensorflow/contrib/linalg")
add_python_module("tensorflow/contrib/linalg/python")
add_python_module("tensorflow/contrib/linalg/python/ops")
@@ -489,7 +491,6 @@ GENERATE_PYTHON_OP_LIB("image_ops")
GENERATE_PYTHON_OP_LIB("io_ops")
GENERATE_PYTHON_OP_LIB("linalg_ops")
GENERATE_PYTHON_OP_LIB("logging_ops")
-GENERATE_PYTHON_OP_LIB("losses")
GENERATE_PYTHON_OP_LIB("nn_ops")
GENERATE_PYTHON_OP_LIB("parsing_ops")
GENERATE_PYTHON_OP_LIB("random_ops")
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/student_t_test.py b/tensorflow/contrib/distributions/python/kernel_tests/student_t_test.py
index 050a71d9c8..116d4e1a41 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/student_t_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/student_t_test.py
@@ -79,6 +79,32 @@ class StudentTTest(tf.test.TestCase):
self.assertAllClose(expected_pdf, pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
+ def testStudentCDFAndLogCDF(self):
+ with self.test_session():
+ batch_size = 6
+ df = tf.constant([3.] * batch_size)
+ mu = tf.constant([7.] * batch_size)
+ sigma = tf.constant([8.] * batch_size)
+ df_v = 3.
+ mu_v = 7.
+ sigma_v = 8.
+ t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
+ student = tf.contrib.distributions.StudentT(df, mu=mu, sigma=sigma)
+
+ log_cdf = student.log_cdf(t)
+ self.assertEquals(log_cdf.get_shape(), (6,))
+ log_cdf_values = log_cdf.eval()
+ cdf = student.cdf(t)
+ self.assertEquals(cdf.get_shape(), (6,))
+ cdf_values = cdf.eval()
+
+ expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
+ expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
+ self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5)
+ self.assertAllClose(np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5)
+ self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
+ self.assertAllClose(np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5)
+
def testStudentEntropy(self):
df_v = np.array([[2., 3., 7.]]) # 1x3
mu_v = np.array([[1., -1, 0]]) # 1x3
diff --git a/tensorflow/contrib/distributions/python/ops/student_t.py b/tensorflow/contrib/distributions/python/ops/student_t.py
index e894bd0f42..dbf270bf44 100644
--- a/tensorflow/contrib/distributions/python/ops/student_t.py
+++ b/tensorflow/contrib/distributions/python/ops/student_t.py
@@ -21,6 +21,9 @@ from __future__ import print_function
import math
import numpy as np
+import tensorflow as tf
+
+from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
@@ -217,6 +220,15 @@ class StudentT(distribution.Distribution):
(math_ops.sqrt(self.df) * math.sqrt(math.pi) * self.sigma) *
math_ops.pow(1. + math_ops.square(y) / self.df, -(0.5 + half_df)))
+ def _cdf(self, x):
+ # we use the same notation here as in wikipedia for the
+ t = (x - self.mu)/self.sigma
+ x_t = self.df / (math_ops.square(t) + self.df)
+ # The cdf is defined differently for positive and negative t
+ positive_cdf = 1. - 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
+ negative_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
+ return tf.where(tf.less(t, 0), negative_cdf, positive_cdf)
+
def _entropy(self):
u = array_ops.expand_dims(self.df * self._ones(), -1)
v = array_ops.expand_dims(self._ones(), -1)
diff --git a/tensorflow/contrib/framework/python/ops/arg_scope.py b/tensorflow/contrib/framework/python/ops/arg_scope.py
index e65ed8c233..b7ec9ba936 100644
--- a/tensorflow/contrib/framework/python/ops/arg_scope.py
+++ b/tensorflow/contrib/framework/python/ops/arg_scope.py
@@ -33,7 +33,7 @@
The first call to conv2d will use predefined args:
layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', ..., scope='conv1')
- The second call to Conv will overwrite padding:
+ The second call to conv2d will overwrite padding:
layers.conv2d(inputs, 256, [5, 5], padding='SAME', ..., scope='conv2')
Example of how to reuse an arg_scope:
diff --git a/tensorflow/contrib/ios_examples/README.md b/tensorflow/contrib/ios_examples/README.md
index 996f4be2b2..a802007c5d 100644
--- a/tensorflow/contrib/ios_examples/README.md
+++ b/tensorflow/contrib/ios_examples/README.md
@@ -41,10 +41,9 @@ cp ~/graphs/inception5h/* tensorflow/contrib/ios_examples/simple/data/
If you're hitting problems, here's a checklist of common things to investigate:
- - Make sure that you've run the `download_dependencies.sh` and
- `compile_ios_protobuf.sh` scripts before you run `compile_ios_tensorflow`.
- (These should be called by `build_all_ios.sh` if you are using it, but check
- if they have run successful.)
+ - Make sure that you've run the `build_all_ios.sh` script
+ This will run `download_dependencies.sh`,`compile_ios_protobuf.sh` and `compile_ios_tensorflow.sh`.
+ (check each one if they have run successful.)
- Check that you have version 7.3 of Xcode.
@@ -138,4 +137,4 @@ After that, you can manually look at modifying the list of kernels
included in tensorflow/contrib/makefile/tf_op_files.txt to reduce the number of
implementations to the ones you're actually using in your own model. We're
hoping to automate this step in the future, but for now manually removing them
-is the best approach. \ No newline at end of file
+is the best approach.
diff --git a/tensorflow/contrib/learn/python/learn/README.md b/tensorflow/contrib/learn/python/learn/README.md
index 6afcfbf376..c7e0dc816e 100644
--- a/tensorflow/contrib/learn/python/learn/README.md
+++ b/tensorflow/contrib/learn/python/learn/README.md
@@ -34,7 +34,7 @@ Optionally you can install [scikit-learn](http://scikit-learn.org/stable/) and [
### Usage
-Below are a few simple examples of the API. For more examples, please see [examples](https://www.tensorflow.org/code/tensorflow/examples/skflow).
+Below are a few simple examples of the API. For more examples, please see [examples](https://www.tensorflow.org/code/tensorflow/examples/learn).
General tips:
@@ -210,10 +210,9 @@ and then load the reported URL.
## More examples
-See the [examples folder](https://www.tensorflow.org/code/tensorflow/examples/skflow) for:
+See the [examples folder](https://www.tensorflow.org/code/tensorflow/examples/learn) for:
-- An easy way to handle [categorical variables](https://www.tensorflow.org/code/tensorflow/examples/skflow/text_classification.py) (words are just an example of a categorical variable)
-- Text Classification: see examples for [RNN](https://www.tensorflow.org/code/tensorflow/examples/skflow/text_classification_character_rnn.py) and [CNN](https://www.tensorflow.org/code/tensorflow/examples/skflow/text_classification_character_cnn.py) on characters
-- [Language modeling and text sequence to sequence](https://www.tensorflow.org/code/tensorflow/examples/skflow/language_model.py)
-- [Digit recognition using a CNN](https://www.tensorflow.org/code/tensorflow/examples/skflow/digits.py)
+- An easy way to handle [categorical variables](https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification.py) (words are just an example of a categorical variable)
+- Text Classification: see examples for [RNN](https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification_character_rnn.py) and [CNN](https://www.tensorflow.org/code/tensorflow/examples/learn/text_classification_character_cnn.py) on characters
+- [Digit recognition using a CNN](https://www.tensorflow.org/code/tensorflow/examples/learn/mnist.py)
- And much more!
diff --git a/tensorflow/contrib/learn/python/learn/datasets/BUILD b/tensorflow/contrib/learn/python/learn/datasets/BUILD
index 68760e2653..3ece16170a 100644
--- a/tensorflow/contrib/learn/python/learn/datasets/BUILD
+++ b/tensorflow/contrib/learn/python/learn/datasets/BUILD
@@ -18,6 +18,7 @@ py_library(
"base.py",
"mnist.py",
"produce_small_datasets.py",
+ "synthetic.py",
"text_datasets.py",
],
data = [":data_csv"],
@@ -76,3 +77,15 @@ py_test(
"//tensorflow/python:framework_test_lib",
],
)
+
+py_test(
+ name = "synthetic_test",
+ size = "small",
+ srcs = ["synthetic_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/learn",
+ "//tensorflow/python:framework_test_lib",
+ ],
+)
diff --git a/tensorflow/contrib/learn/python/learn/datasets/__init__.py b/tensorflow/contrib/learn/python/learn/datasets/__init__.py
index 80a0af5f52..a3521b4109 100644
--- a/tensorflow/contrib/learn/python/learn/datasets/__init__.py
+++ b/tensorflow/contrib/learn/python/learn/datasets/__init__.py
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
-"""Module includes reference datasets and utilities to load datasets."""
+"""Dataset utilities and synthetic/reference datasets."""
from __future__ import absolute_import
from __future__ import division
@@ -26,6 +26,7 @@ import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets import mnist
+from tensorflow.contrib.learn.python.learn.datasets import synthetic
from tensorflow.contrib.learn.python.learn.datasets import text_datasets
# Export load_iris and load_boston.
@@ -43,6 +44,12 @@ DATASETS = {
'dbpedia': text_datasets.load_dbpedia,
}
+# List of all synthetic datasets
+SYNTHETIC = {
+ # All of these will return ['data', 'target'] -> base.Dataset
+ 'circles': synthetic.circles,
+ 'spirals': synthetic.spirals
+}
def load_dataset(name, size='small', test_with_fake_data=False):
"""Loads dataset by name.
@@ -64,3 +71,35 @@ def load_dataset(name, size='small', test_with_fake_data=False):
return DATASETS[name](size, test_with_fake_data)
else:
return DATASETS[name]()
+
+
+def make_dataset(name, n_samples=100, noise=None, seed=42, *args, **kwargs):
+ """Creates binary synthetic datasets
+
+ Args:
+ name: str, name of the dataset to generate
+ n_samples: int, number of datapoints to generate
+ noise: float or None, standard deviation of the Gaussian noise added
+ seed: int or None, seed for noise
+
+ Returns:
+ Shuffled features and labels for given synthetic dataset of type `base.Dataset`
+
+ Raises:
+ ValueError: Raised if `name` not found
+
+ Note:
+ - This is a generic synthetic data generator - individual generators might have more parameters!
+ See documentation for individual parameters
+ - Note that the `noise` parameter uses `numpy.random.normal` and depends on `numpy`'s seed
+
+ TODO:
+ - Support multiclass datasets
+ - Need shuffling routine. Currently synthetic datasets are reshuffled to avoid train/test correlation,
+ but that hurts reprodusability
+ """
+ # seed = kwargs.pop('seed', None)
+ if name not in SYNTHETIC:
+ raise ValueError('Synthetic dataset not found or not implemeted: %s' % name)
+ else:
+ return SYNTHETIC[name](n_samples=n_samples, noise=noise, seed=seed, *args, **kwargs)
diff --git a/tensorflow/contrib/learn/python/learn/datasets/synthetic.py b/tensorflow/contrib/learn/python/learn/datasets/synthetic.py
new file mode 100644
index 0000000000..907dc0f3df
--- /dev/null
+++ b/tensorflow/contrib/learn/python/learn/datasets/synthetic.py
@@ -0,0 +1,202 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Synthetic dataset generators."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.learn.python.learn.datasets.base import Dataset
+
+def circles(n_samples=100, noise=None, seed=None, factor=0.8, n_classes=2, *args, **kwargs):
+ """Create circles separated by some value
+
+ Args:
+ n_samples: int, number of datapoints to generate
+ noise: float or None, standard deviation of the Gaussian noise added
+ seed: int or None, seed for the noise
+ factor: float, size factor of the inner circles with respect to the outer ones
+ n_classes: int, number of classes to generate
+
+ Returns:
+ Shuffled features and labels for 'circles' synthetic dataset of type `base.Dataset`
+
+ Note:
+ The multi-class support might not work as expected if `noise` is enabled
+
+ TODO:
+ - Generation of unbalanced data
+
+ Credit goes to (under BSD 3 clause):
+ B. Thirion,
+ G. Varoquaux,
+ A. Gramfort,
+ V. Michel,
+ O. Grisel,
+ G. Louppe,
+ J. Nothman
+ """
+ if seed is not None:
+ np.random.seed(seed)
+ # Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle
+ linspace = np.linspace(0, 2*np.pi, n_samples // n_classes)
+ circ_x = np.empty(0, dtype=np.int32)
+ circ_y = np.empty(0, dtype=np.int32)
+ base_cos = np.cos(linspace)
+ base_sin = np.sin(linspace)
+
+ y = np.empty(0, dtype=np.int32)
+ for label in range(n_classes):
+ circ_x = np.append(circ_x, base_cos)
+ circ_y = np.append(circ_y, base_sin)
+ base_cos *= factor
+ base_sin *= factor
+ y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32))
+
+ # Add more points if n_samples is not divisible by n_classes (unbalanced!)
+ extras = n_samples % n_classes
+ circ_x = np.append(circ_x, np.cos(np.random.rand(extras)*2*np.pi))
+ circ_y = np.append(circ_y, np.sin(np.random.rand(extras)*2*np.pi))
+ y = np.append(y, np.zeros(extras, dtype=np.int32))
+
+ # Reshape the features/labels
+ X = np.vstack((circ_x, circ_y)).T
+ y = np.hstack(y)
+
+ # Shuffle the data
+ indices = np.random.permutation(range(n_samples))
+ if noise is not None:
+ X += np.random.normal(scale=noise, size=X.shape)
+ return Dataset(data=X[indices], target=y[indices])
+
+
+def spirals(n_samples=100, noise=None, seed=None,
+ mode = 'archimedes',
+ n_loops = 2,
+ *args, **kwargs):
+ """Create spirals
+
+ Currently only binary classification is supported for spiral generation
+
+ Args:
+ n_samples: int, number of datapoints to generate
+ noise: float or None, standard deviation of the Gaussian noise added
+ seed: int or None, seed for the noise
+ n_loops: int, number of spiral loops, doesn't play well with 'bernoulli'
+ mode: str, how the spiral should be generated. Current implementations:
+ 'archimedes': a spiral with equal distances between branches
+ 'bernoulli': logarithmic spiral with branch distances increasing
+ 'fermat': a spiral with branch distances decreasing (sqrt)
+
+ Returns:
+ Shuffled features and labels for 'spirals' synthetic dataset of type `base.Dataset`
+
+ Raises:
+ ValueError: If the generation `mode` is not valid
+
+ TODO:
+ - Generation of unbalanced data
+ """
+ n_classes = 2 # I am not sure how to make it multiclass
+
+ _modes = {
+ 'archimedes': _archimedes_spiral,
+ 'bernoulli': _bernoulli_spiral,
+ 'fermat': _fermat_spiral
+ }
+
+ if mode is None or mode not in _modes:
+ raise ValueError("Cannot generate spiral with mode %s"%mode)
+
+ if seed is not None:
+ np.random.seed(seed)
+ linspace = np.linspace(0, 2*n_loops*np.pi, n_samples // n_classes)
+ spir_x = np.empty(0, dtype=np.int32)
+ spir_y = np.empty(0, dtype=np.int32)
+
+ y = np.empty(0, dtype=np.int32)
+ for label in range(n_classes):
+ base_cos, base_sin = _modes[mode](linspace, label*np.pi, *args, **kwargs)
+ spir_x = np.append(spir_x, base_cos)
+ spir_y = np.append(spir_y, base_sin)
+ y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32))
+
+ # Add more points if n_samples is not divisible by n_classes (unbalanced!)
+ extras = n_samples % n_classes
+ if extras > 0:
+ x_exrta, y_extra = _modes[mode](np.random.rand(extras)*2*np.pi, *args, **kwargs)
+ spir_x = np.append(spir_x, x_extra)
+ spir_y = np.append(spir_y, y_extra)
+ y = np.append(y, np.zeros(extras, dtype=np.int32))
+
+ # Reshape the features/labels
+ X = np.vstack((spir_x, spir_y)).T
+ y = np.hstack(y)
+
+ # Shuffle the data
+ indices = np.random.permutation(range(n_samples))
+ if noise is not None:
+ X += np.random.normal(scale=noise, size=X.shape)
+ return Dataset(data=X[indices], target=y[indices])
+
+
+def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs):
+ """Return Archimedes spiral
+
+ Args:
+ theta: array-like, angles from polar coordinates to be converted
+ theta_offset: float, angle offset in radians (2*pi = 0)
+ """
+ x, y = theta*np.cos(theta + theta_offset), theta*np.sin(theta + theta_offset)
+ x_norm = np.max(np.abs(x))
+ y_norm = np.max(np.abs(y))
+ x, y = x / x_norm, y / y_norm
+ return x, y
+
+
+def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
+ """Return Equiangular (Bernoulli's) spiral
+
+ Args:
+ theta: array-like, angles from polar coordinates to be converted
+ theta_offset: float, angle offset in radians (2*pi = 0)
+
+ Kwargs:
+ exp_scale: growth rate of the exponential
+ """
+ exp_scale = kwargs.pop('exp_scale', 0.1)
+
+ x, y = np.exp(exp_scale*theta)*np.cos(theta + theta_offset), np.exp(exp_scale*theta)*np.sin(theta + theta_offset)
+ x_norm = np.max(np.abs(x))
+ y_norm = np.max(np.abs(y))
+ x, y = x / x_norm, y / y_norm
+ return x, y
+
+
+def _fermat_spiral(theta, theta_offset=0., *args, **kwargs):
+ """Return Parabolic (Fermat's) spiral
+
+ Args:
+ theta: array-like, angles from polar coordinates to be converted
+ theta_offset: float, angle offset in radians (2*pi = 0)
+ """
+ x, y = np.sqrt(theta)*np.cos(theta + theta_offset), np.sqrt(theta)*np.sin(theta + theta_offset)
+ x_norm = np.max(np.abs(x))
+ y_norm = np.max(np.abs(y))
+ x, y = x / x_norm, y / y_norm
+ return x, y
diff --git a/tensorflow/contrib/learn/python/learn/datasets/synthetic_test.py b/tensorflow/contrib/learn/python/learn/datasets/synthetic_test.py
new file mode 100644
index 0000000000..5340afab46
--- /dev/null
+++ b/tensorflow/contrib/learn/python/learn/datasets/synthetic_test.py
@@ -0,0 +1,127 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import six
+
+import numpy as np
+from tensorflow.python.platform import test
+from tensorflow.contrib.learn.python.learn import datasets
+from tensorflow.contrib.learn.python.learn.datasets import synthetic
+
+class SyntheticTest(test.TestCase):
+ """Test synthetic dataset generation"""
+
+ def test_make_dataset(self):
+ """Test if the synthetic routine wrapper complains about the name"""
+ self.assertRaises(ValueError, datasets.make_dataset, name='_non_existing_name')
+
+ def test_all_datasets_callable(self):
+ """Test if all methods inside the `SYNTHETIC` are callable"""
+ self.assertIsInstance(datasets.SYNTHETIC, dict)
+ if len(datasets.SYNTHETIC) > 0:
+ for name, method in six.iteritems(datasets.SYNTHETIC):
+ self.assertTrue(callable(method))
+
+ def test_circles(self):
+ """Test if the circles are generated correctly
+
+ Tests:
+ - return type is `Dataset`
+ - returned `data` shape is (n_samples, n_features)
+ - returned `target` shape is (n_samples,)
+ - set of unique classes range is [0, n_classes)
+
+ TODO:
+ - all points have the same radius, if no `noise` specified
+ """
+ n_samples = 100
+ n_classes = 2
+ circ = synthetic.circles(n_samples = n_samples, noise = None, n_classes = n_classes)
+ self.assertIsInstance(circ, datasets.base.Dataset)
+ self.assertTupleEqual(circ.data.shape, (n_samples,2))
+ self.assertTupleEqual(circ.target.shape, (n_samples,))
+ self.assertSetEqual(set(circ.target), set(range(n_classes)))
+
+ def test_circles_replicable(self):
+ """Test if the data generation is replicable with a specified `seed`
+
+ Tests:
+ - return the same value if raised with the same seed
+ - return different values if noise or seed is different
+ """
+ seed = 42
+ noise = 0.1
+ circ0 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed)
+ circ1 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed)
+ np.testing.assert_array_equal(circ0.data, circ1.data)
+ np.testing.assert_array_equal(circ0.target, circ1.target)
+
+ circ1 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed+1)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.data, circ1.data)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.target, circ1.target)
+
+ circ1 = synthetic.circles(n_samples = 100, noise = noise/2., n_classes = 2, seed = seed)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.data, circ1.data)
+
+ def test_spirals(self):
+ """Test if the circles are generated correctly
+
+ Tests:
+ - if mode is unknown, ValueError is raised
+ - return type is `Dataset`
+ - returned `data` shape is (n_samples, n_features)
+ - returned `target` shape is (n_samples,)
+ - set of unique classes range is [0, n_classes)
+ """
+ self.assertRaises(ValueError, synthetic.spirals, mode='_unknown_mode_spiral_')
+ n_samples = 100
+ modes = ('archimedes', 'bernoulli', 'fermat')
+ for mode in modes:
+ spir = synthetic.spirals(n_samples = n_samples, noise = None, mode = mode)
+ self.assertIsInstance(spir, datasets.base.Dataset)
+ self.assertTupleEqual(spir.data.shape, (n_samples,2))
+ self.assertTupleEqual(spir.target.shape, (n_samples,))
+ self.assertSetEqual(set(spir.target), set(range(2)))
+
+ def test_spirals_replicable(self):
+ """Test if the data generation is replicable with a specified `seed`
+
+ Tests:
+ - return the same value if raised with the same seed
+ - return different values if noise or seed is different
+ """
+ seed = 42
+ noise = 0.1
+ modes = ('archimedes', 'bernoulli', 'fermat')
+ for mode in modes:
+ spir0 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed)
+ spir1 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed)
+ np.testing.assert_array_equal(spir0.data, spir1.data)
+ np.testing.assert_array_equal(spir0.target, spir1.target)
+
+ spir1 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed+1)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.data, spir1.data)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.target, spir1.target)
+
+ spir1 = synthetic.spirals(n_samples = 1000, noise = noise/2., seed = seed)
+ self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.data, spir1.data)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/learn/python/learn/estimators/run_config.py b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
index c5f8c110e6..c07736e701 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/run_config.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
@@ -84,7 +84,7 @@ class ClusterConfig(object):
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps({
{'cluster': cluster,
- 'task_id': {'type': 'worker', 'index': 1}}})
+ 'task': {'type': 'worker', 'index': 1}}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
diff --git a/tensorflow/contrib/losses/python/losses/loss_ops.py b/tensorflow/contrib/losses/python/losses/loss_ops.py
index 15956554f7..ed4469773b 100644
--- a/tensorflow/contrib/losses/python/losses/loss_ops.py
+++ b/tensorflow/contrib/losses/python/losses/loss_ops.py
@@ -368,7 +368,7 @@ def softmax_cross_entropy(
scope: the scope for the operations performed in computing the loss.
Returns:
- A scalar `Tensor` representing the loss value.
+ A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
@@ -410,7 +410,7 @@ def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
scope: the scope for the operations performed in computing the loss.
Returns:
- A scalar `Tensor` representing the loss value.
+ A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
diff --git a/tensorflow/core/common_runtime/sycl/sycl_device.cc b/tensorflow/core/common_runtime/sycl/sycl_device.cc
index 19d39056ff..0abe25c373 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_device.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_device.cc
@@ -23,7 +23,8 @@ limitations under the License.
namespace tensorflow {
-static std::unordered_set<SYCLDevice *> live_devices;
+static std::unordered_set<SYCLDevice*> live_devices;
+static bool first_time = true;
void ShutdownSycl() {
for (auto device : live_devices) {
@@ -31,7 +32,6 @@ void ShutdownSycl() {
}
live_devices.clear();
}
-bool first_time = true;
void SYCLDevice::RegisterDevice() {
if (first_time) {
@@ -44,17 +44,27 @@ void SYCLDevice::RegisterDevice() {
SYCLDevice::~SYCLDevice() {
device_context_->Unref();
sycl_allocator_->EnterLameDuckMode();
- delete sycl_device_;
- delete sycl_queue_;
+ if (sycl_device_) {
+ sycl_device_->synchronize();
+ delete sycl_device_;
+ }
+ if (sycl_queue_) {
+ delete sycl_queue_;
+ }
live_devices.erase(this);
}
void SYCLDevice::EnterLameDuckMode() {
sycl_allocator_->EnterLameDuckMode();
- delete sycl_device_;
- sycl_device_ = nullptr;
- delete sycl_queue_;
- sycl_queue_ = nullptr;
+ if (sycl_device_) {
+ sycl_device_->synchronize();
+ delete sycl_device_;
+ sycl_device_ = nullptr;
+ }
+ if (sycl_queue_) {
+ delete sycl_queue_;
+ sycl_queue_ = nullptr;
+ }
}
void SYCLDevice::Compute(OpKernel *op_kernel, OpKernelContext *context) {
@@ -110,7 +120,11 @@ Status SYCLDevice::FillContextMap(const Graph *graph,
Status SYCLDevice::Sync() {
sycl_device_->synchronize();
- return Status::OK();
+ if (sycl_device_->ok()) {
+ return Status::OK();
+ } else {
+ return errors::Internal("Unknown error detected on device ", name());
+ }
}
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/sycl/sycl_device_context.cc b/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
index b49420b1b5..a6be9195d4 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_device_context.cc
@@ -95,6 +95,7 @@ void SYCLDeviceContext::CopyCPUTensorToDevice(const Tensor *cpu_tensor,
assert(false && "unsupported type");
}
}
+ device->eigen_sycl_device()->synchronize();
done(Status::OK());
}
@@ -172,6 +173,7 @@ void SYCLDeviceContext::CopyDeviceTensorToCPU(const Tensor *device_tensor,
assert(false && "unsupported type");
}
}
+ device->eigen_sycl_device()->synchronize();
done(Status::OK());
}
diff --git a/tensorflow/core/kernels/aggregate_ops.cc b/tensorflow/core/kernels/aggregate_ops.cc
index b41e438b2b..50d0cc1727 100644
--- a/tensorflow/core/kernels/aggregate_ops.cc
+++ b/tensorflow/core/kernels/aggregate_ops.cc
@@ -28,6 +28,9 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
template <typename Device, typename T>
class AddNOp : public OpKernel {
@@ -152,6 +155,21 @@ REGISTER_KERNEL_BUILDER(Name("AddN")
AddNOp<CPUDevice, int32>);
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_ADDN(float, SYCL);
+REGISTER_ADDN(double, SYCL);
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("AddN")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .HostMemory("inputs")
+ .HostMemory("sum"),
+ AddNOp<CPUDevice, int32>);
+#endif // TENSORFLOW_USE_SYCL
+
#undef REGISTER_ADDN
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/aggregate_ops_cpu.h b/tensorflow/core/kernels/aggregate_ops_cpu.h
index ba5ebb7f0f..dfa3fe585e 100644
--- a/tensorflow/core/kernels/aggregate_ops_cpu.h
+++ b/tensorflow/core/kernels/aggregate_ops_cpu.h
@@ -23,6 +23,10 @@ limitations under the License.
typedef Eigen::ThreadPoolDevice CPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
+
namespace tensorflow {
// Partial specializations for a CPUDevice, that uses the Eigen implementation
@@ -133,6 +137,115 @@ struct Add9Functor<CPUDevice, T> {
}
};
+#ifdef TENSORFLOW_USE_SYCL
+// Partial specializations for a SYCLDevice, that uses the Eigen implementation
+// from AddNEigenImpl.
+template <typename T>
+struct Add2Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2) {
+ Add2EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2);
+ }
+};
+template <typename T>
+struct Add3Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3) {
+ Add3EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3);
+ }
+};
+template <typename T>
+struct Add4Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3,
+ typename TTypes<T>::ConstFlat in4) {
+ Add4EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4);
+ }
+};
+template <typename T>
+struct Add5Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3,
+ typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5) {
+ Add5EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5);
+ }
+};
+template <typename T>
+struct Add6Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3,
+ typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5,
+ typename TTypes<T>::ConstFlat in6) {
+ Add6EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6);
+ }
+};
+template <typename T>
+struct Add7Functor<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1,
+ typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3,
+ typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5,
+ typename TTypes<T>::ConstFlat in6,
+ typename TTypes<T>::ConstFlat in7) {
+ Add7EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
+ in7);
+ }
+};
+
+template <typename T>
+struct Add8Functor<SYCLDevice, T> {
+ void operator()(
+ const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1, typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3, typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
+ typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) {
+ Add8EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
+ in7, in8);
+ }
+};
+
+template <typename T>
+struct Add8pFunctor<SYCLDevice, T> {
+ void operator()(
+ const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1, typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3, typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
+ typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) {
+ Add8pEigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
+ in7, in8);
+ }
+};
+
+template <typename T>
+struct Add9Functor<SYCLDevice, T> {
+ void operator()(
+ const SYCLDevice& d, typename TTypes<T>::Flat out,
+ typename TTypes<T>::ConstFlat in1, typename TTypes<T>::ConstFlat in2,
+ typename TTypes<T>::ConstFlat in3, typename TTypes<T>::ConstFlat in4,
+ typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
+ typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8,
+ typename TTypes<T>::ConstFlat in9) {
+ Add9EigenImpl<SYCLDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
+ in7, in8, in9);
+ }
+};
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/constant_op.cc b/tensorflow/core/kernels/constant_op.cc
index e479b97109..3f8717f77f 100644
--- a/tensorflow/core/kernels/constant_op.cc
+++ b/tensorflow/core/kernels/constant_op.cc
@@ -305,4 +305,9 @@ REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_GPU), PlaceholderOp);
REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_GPU),
PlaceholderOp);
+#if TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_SYCL), PlaceholderOp);
+REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_SYCL),
+ PlaceholderOp);
+#endif // TENSORFLOW_USE_SYCL
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/control_flow_ops.cc b/tensorflow/core/kernels/control_flow_ops.cc
index b01263f288..5241a4d916 100644
--- a/tensorflow/core/kernels/control_flow_ops.cc
+++ b/tensorflow/core/kernels/control_flow_ops.cc
@@ -121,8 +121,20 @@ REGISTER_GPU_HOST_REF_KERNEL(string);
SwitchOp)
REGISTER_SYCL_KERNEL(bool);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+
+#define REGISTER_SYCL_REF_SWITCH(type) \
+ REGISTER_KERNEL_BUILDER(Name("RefSwitch") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("pred") \
+ .TypeConstraint<type>("T"), \
+ SwitchOp)
+REGISTER_SYCL_REF_SWITCH(bool);
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_SWITCH);
+
#undef REGISTER_SYCL_KERNEL
-#endif
+#undef REGISTER_SYCL_REF_SWITCH
+
+#endif // TENSORFLOW_USE_SYCL
class RefSelectOp : public OpKernel {
public:
@@ -230,8 +242,18 @@ REGISTER_GPU_REF_KERNEL(bool);
MergeOp)
REGISTER_SYCL_KERNEL(bool);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+
+#define REGISTER_SYCL_REF_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("RefMerge") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .HostMemory("value_index"), \
+ MergeOp)
+REGISTER_SYCL_REF_KERNEL(bool);
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
#undef REGISTER_SYCL_KERNEL
-#endif
+#undef REGISTER_SYCL_REF_KERNEL
+#endif // TENSORFLOW_USE_SYCL
// Special GPU kernels for int32 and string.
// TODO(b/25387198): Also enable int32 in device memory. This kernel
@@ -289,7 +311,15 @@ REGISTER_GPU_REF_KERNEL(bool);
Name("Enter").Device(DEVICE_SYCL).TypeConstraint<type>("T"), EnterOp)
REGISTER_SYCL_KERNEL(bool);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+
+#define REGISTER_SYCL_REF_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("RefEnter").Device(DEVICE_SYCL).TypeConstraint<type>("T"), EnterOp)
+REGISTER_SYCL_REF_KERNEL(bool);
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
+
#undef REGISTER_SYCL_KERNEL
+#undef REGISTER_SYCL_REF_KERNEL
#endif
// Special GPU kernels for int32 and string.
@@ -349,8 +379,37 @@ REGISTER_GPU_KERNEL(bool);
Name("Exit").Device(DEVICE_SYCL).TypeConstraint<type>("T"), ExitOp)
REGISTER_SYCL_KERNEL(bool);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+
+#define REGISTER_SYCL_REF_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("RefExit").Device(DEVICE_SYCL).TypeConstraint<type>("T"), ExitOp)
+REGISTER_SYCL_REF_KERNEL(bool);
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
+
#undef REGISTER_SYCL_KERNEL
-#endif
+#undef REGISTER_SYCL_REF_KERNEL
+
+// Special GPU kernels for int32 and string.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+#define REGISTER_SYCL_HOST_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Exit") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("data") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ ExitOp); \
+ REGISTER_KERNEL_BUILDER(Name("RefExit") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("data") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ ExitOp)
+
+REGISTER_SYCL_HOST_KERNEL(int32);
+REGISTER_SYCL_HOST_KERNEL(string);
+#undef REGISTER_SYCL_HOST_KERNEL
+#endif // TENSORFLOW_USE_SYCL
// Special GPU kernels for int32 and string.
// TODO(b/25387198): Also enable int32 in device memory. This kernel
@@ -432,8 +491,39 @@ REGISTER_GPU_HOST_KERNEL(string);
NextIterationOp)
REGISTER_SYCL_KERNEL(bool);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+#define REGISTER_SYCL_REF_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("RefNextIteration") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("data") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ NextIterationOp)
+ REGISTER_SYCL_REF_KERNEL(bool);
+ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL);
#undef REGISTER_SYCL_KERNEL
-#endif
+#undef REGISTER_SYCL_REF_KERNEL
+
+// Special GPU kernels for int32 and string.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+#define REGISTER_SYCL_HOST_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("NextIteration") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("data") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ NextIterationOp); \
+ REGISTER_KERNEL_BUILDER(Name("RefNextIteration") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("data") \
+ .HostMemory("output") \
+ .TypeConstraint<type>("T"), \
+ NextIterationOp)
+
+REGISTER_SYCL_HOST_KERNEL(int32);
+REGISTER_SYCL_HOST_KERNEL(string);
+#undef REGISTER_SYCL_HOST_KERNEL
+#endif // TENSORFLOW_USE_SYCL
// A LoopCond op has one input and one output. The input is a boolean
// scalar representing the taken branches of the "pivot" Switch that
@@ -461,6 +551,14 @@ REGISTER_KERNEL_BUILDER(Name("LoopCond")
.HostMemory("output"),
LoopCondOp);
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name("LoopCond")
+ .Device(DEVICE_SYCL)
+ .HostMemory("input")
+ .HostMemory("output"),
+ LoopCondOp);
+#endif // TENSORFLOW_USE_SYCL
+
// ControlTrigger kernels
REGISTER_KERNEL_BUILDER(Name("ControlTrigger").Device(DEVICE_CPU),
ControlTriggerOp);
@@ -468,6 +566,11 @@ REGISTER_KERNEL_BUILDER(Name("ControlTrigger").Device(DEVICE_CPU),
REGISTER_KERNEL_BUILDER(Name("ControlTrigger").Device(DEVICE_GPU),
ControlTriggerOp);
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name("ControlTrigger").Device(DEVICE_SYCL),
+ ControlTriggerOp);
+#endif // TENSORFLOW_USE_SYCL
+
// When called, abort op will abort the current process. This can be used to
// abort remote PSs when needed.
class AbortOp : public OpKernel {
@@ -493,4 +596,5 @@ class AbortOp : public OpKernel {
};
REGISTER_KERNEL_BUILDER(Name("Abort").Device(DEVICE_CPU), AbortOp);
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_exp.cc b/tensorflow/core/kernels/cwise_op_exp.cc
index 0ee47f7dee..2e3a60cf79 100644
--- a/tensorflow/core/kernels/cwise_op_exp.cc
+++ b/tensorflow/core/kernels/cwise_op_exp.cc
@@ -27,6 +27,7 @@ REGISTER5(UnaryOp, CPU, "Exp", functor::exp, float, Eigen::half, double,
.TypeConstraint<TYPE>("T"), \
UnaryOp<SYCLDevice, functor::exp<TYPE>>);
REGISTER_SYCL_KERNEL(float);
+REGISTER_SYCL_KERNEL(double);
#undef REGISTER_SYCL_KERNEL
#endif // TENSORFLOW_USE_SYCL
diff --git a/tensorflow/core/kernels/cwise_op_floor_div.cc b/tensorflow/core/kernels/cwise_op_floor_div.cc
index 69dbb70b83..8a600f8f95 100644
--- a/tensorflow/core/kernels/cwise_op_floor_div.cc
+++ b/tensorflow/core/kernels/cwise_op_floor_div.cc
@@ -27,7 +27,7 @@ REGISTER3(BinaryOp, CPU, "FloorDiv", functor::floor_div_real, float,
Name("FloorDiv") \
.Device(DEVICE_SYCL) \
.TypeConstraint<TYPE>("T"), \
- BinaryOp<SYCLDevice, functor::floor_div<TYPE>>);
+ BinaryOp<SYCLDevice, functor::floor_div_real<TYPE>>);
REGISTER_SYCL_KERNEL(float)
#undef REGISTER_SYCL_KERNEL
#endif // TENSORFLOW_USE_SYCL
diff --git a/tensorflow/core/kernels/cwise_op_neg.cc b/tensorflow/core/kernels/cwise_op_neg.cc
index 4221fc0710..c4a9b22883 100644
--- a/tensorflow/core/kernels/cwise_op_neg.cc
+++ b/tensorflow/core/kernels/cwise_op_neg.cc
@@ -27,6 +27,18 @@ REGISTER7(UnaryOp, CPU, "Neg", functor::neg, float, Eigen::half, double, int32,
.TypeConstraint<TYPE>("T"), \
UnaryOp<SYCLDevice, functor::neg<TYPE>>);
REGISTER_SYCL_KERNEL(float);
+REGISTER_SYCL_KERNEL(double);
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Neg")
+ .Device(DEVICE_SYCL)
+ .HostMemory("x")
+ .HostMemory("y")
+ .TypeConstraint<int32>("T"),
+ UnaryOp<CPUDevice, functor::neg<int32>>);
+
#undef REGISTER_SYCL_KERNEL
#endif // TENSORFLOW_USE_SYCL
diff --git a/tensorflow/core/kernels/cwise_op_sub.cc b/tensorflow/core/kernels/cwise_op_sub.cc
index e1326dbed1..eab1e2a09c 100644
--- a/tensorflow/core/kernels/cwise_op_sub.cc
+++ b/tensorflow/core/kernels/cwise_op_sub.cc
@@ -32,6 +32,18 @@ REGISTER(BinaryOp, CPU, "Sub", functor::sub, int32);
.TypeConstraint<TYPE>("T"), \
BinaryOp<SYCLDevice, functor::sub<TYPE>>);
REGISTER_SYCL_KERNEL(float);
+ REGISTER_SYCL_KERNEL(double);
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Sub")
+ .Device(DEVICE_SYCL)
+ .HostMemory("x")
+ .HostMemory("y")
+ .HostMemory("z")
+ .TypeConstraint<int32>("T"),
+ BinaryOp<CPUDevice, functor::sub<int32>>);
#undef REGISTER_SYCL_KERNEL
#endif // TENSORFLOW_USE_SYCL
#if GOOGLE_CUDA
diff --git a/tensorflow/core/kernels/cwise_ops_sycl_common.h b/tensorflow/core/kernels/cwise_ops_sycl_common.h
index a0decbce87..3f6ff7303d 100644
--- a/tensorflow/core/kernels/cwise_ops_sycl_common.h
+++ b/tensorflow/core/kernels/cwise_ops_sycl_common.h
@@ -31,14 +31,6 @@ namespace functor {
typedef Eigen::SyclDevice SYCLDevice;
-template <typename Index, int N> Eigen::array<Index, N> GenerateArrayOfOnes() {
- Eigen::array<Index, N> result;
- for (int i = 0; i < N; ++i) {
- result[i] = 1;
- }
- return result;
-}
-
template <typename OUT, typename RHS>
void Assign(const SYCLDevice& d, OUT out, RHS rhs) {
out.device(d) = rhs;
@@ -67,11 +59,9 @@ struct BinaryFunctor<SYCLDevice, Functor, NDIMS, has_errors> {
typename Functor::tin_type in, bool* error) {
typedef typename Functor::func Binary;
constexpr int NumDims = Functor::tin_type::NumDimensions;
- typedef typename Functor::tin_type::Scalar T;
- typedef typename Functor::tin_type::Index Index;
- Eigen::array<Index, NumDims> scalar_dim = GenerateArrayOfOnes<Index, NumDims>();
- Eigen::TensorMap<Eigen::Tensor<T, NumDims, Eigen::RowMajor>> tmp(scalar.data(), scalar_dim);
- out.device(d) = tmp.broadcast(in.dimensions()).binaryExpr(in, Binary());
+ static_assert(NumDims == 1, "Unexpected size");
+ Eigen::Sizes<1> scalar_dim;
+ out.device(d) = scalar.reshape(scalar_dim).broadcast(in.dimensions()).binaryExpr(in, Binary());
}
void Right(const SYCLDevice& d, typename Functor::tout_type out,
@@ -79,11 +69,9 @@ struct BinaryFunctor<SYCLDevice, Functor, NDIMS, has_errors> {
typename Functor::tscalar_type scalar, bool* error) {
typedef typename Functor::func Binary;
constexpr int NumDims = Functor::tin_type::NumDimensions;
- typedef typename Functor::tin_type::Scalar T;
- typedef typename Functor::tin_type::Index Index;
- Eigen::array<Index, NumDims> scalar_dim = GenerateArrayOfOnes<Index, NumDims>();
- Eigen::TensorMap<Eigen::Tensor<T, NumDims, Eigen::RowMajor>> tmp(scalar.data(), scalar_dim);
- out.device(d) = in.binaryExpr(tmp.broadcast(in.dimensions()), Binary());
+ static_assert(NumDims == 1, "Unexpected size");
+ Eigen::Sizes<1> scalar_dim;
+ out.device(d) = in.binaryExpr(scalar.reshape(scalar_dim).broadcast(in.dimensions()), Binary());
}
void BCast(const SYCLDevice& d,
diff --git a/tensorflow/core/kernels/debug_ops.h b/tensorflow/core/kernels/debug_ops.h
index 848896a64a..aa47315f55 100644
--- a/tensorflow/core/kernels/debug_ops.h
+++ b/tensorflow/core/kernels/debug_ops.h
@@ -188,7 +188,6 @@ class DebugNumericSummaryOp : public OpKernel {
const T* input_flat = input.template flat<T>().data();
element_count = input_shape.num_elements();
- const double element_count_double = static_cast<double>(element_count);
for (int64 i = 0; i < element_count; ++i) {
T x = input_flat[i];
if (Eigen::numext::isnan(x)) {
diff --git a/tensorflow/core/kernels/pack_op.cc b/tensorflow/core/kernels/pack_op.cc
index e072eb36b3..4977ad1d7c 100644
--- a/tensorflow/core/kernels/pack_op.cc
+++ b/tensorflow/core/kernels/pack_op.cc
@@ -34,6 +34,9 @@ typedef Eigen::ThreadPoolDevice CPUDevice;
#if GOOGLE_CUDA
typedef Eigen::GpuDevice GPUDevice;
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
// --------------------------------------------------------------------------
template <typename Device, typename T>
@@ -156,4 +159,26 @@ REGISTER_KERNEL_BUILDER(Name("Pack")
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+
+#define REGISTER_SYCL(type) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("Pack").Device(DEVICE_SYCL).TypeConstraint<type>("T"), \
+ PackOp<SYCLDevice, type>)
+
+REGISTER_SYCL(float);
+#undef REGISTER_SYCL
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Pack")
+ .Device(DEVICE_SYCL)
+ .HostMemory("values")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T"),
+ PackOp<CPUDevice, int32>);
+
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/reduction_ops_common.h b/tensorflow/core/kernels/reduction_ops_common.h
index 1bb1a9fc50..625cea4228 100644
--- a/tensorflow/core/kernels/reduction_ops_common.h
+++ b/tensorflow/core/kernels/reduction_ops_common.h
@@ -40,6 +40,9 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
template <typename Device>
struct Constants {
@@ -60,13 +63,16 @@ struct Constants {
};
#if defined(EIGEN_HAS_INDEX_LIST)
-template <>
-struct Constants<CPUDevice> {
+struct ConstantsBase {
const Eigen::IndexList<Eigen::type2index<0>> kZero;
const Eigen::IndexList<Eigen::type2index<1>> kOne;
const Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<2>> kZeroTwo;
};
-#endif
+template<> struct Constants<CPUDevice> : ConstantsBase{};
+#ifdef TENSORFLOW_USE_SYCL
+template<> struct Constants<SYCLDevice> : ConstantsBase{};
+#endif // TENSORFLOW_USE_SYCL
+#endif // EIGEN_HAS_INDEX_LIST
class ReductionHelper {
public:
@@ -239,22 +245,31 @@ class ReductionOp : public OpKernel {
namespace functor {
-template <typename Reducer>
-struct ReduceFunctor<CPUDevice, Reducer> {
+template <typename Device, typename Reducer>
+struct ReduceFunctorBase {
template <typename OUT_T, typename IN_T, typename ReductionAxes>
- static void Reduce(const CPUDevice& d, OUT_T out, IN_T in,
+ static void Reduce(const Device& d, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const Reducer& reducer) {
ReduceEigenImpl(d, out, in, reduction_axes, reducer);
}
template <typename OUT_T>
- static void FillIdentity(const CPUDevice& d, OUT_T out,
+ static void FillIdentity(const Device& d, OUT_T out,
const Reducer& reducer) {
FillIdentityEigenImpl(d, out, reducer);
}
};
+template <typename Reducer>
+struct ReduceFunctor<CPUDevice, Reducer>
+ : ReduceFunctorBase<CPUDevice, Reducer>{};
+#if TENSORFLOW_USE_SYCL
+template <typename Reducer>
+struct ReduceFunctor<SYCLDevice, Reducer>
+ : ReduceFunctorBase<SYCLDevice, Reducer>{};
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/reduction_ops_sum.cc b/tensorflow/core/kernels/reduction_ops_sum.cc
index c7c7949fed..3aa38f418e 100644
--- a/tensorflow/core/kernels/reduction_ops_sum.cc
+++ b/tensorflow/core/kernels/reduction_ops_sum.cc
@@ -64,4 +64,31 @@ REGISTER_KERNEL_BUILDER(
#endif
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNELS(type) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("Sum") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .TypeConstraint<int32>("Tidx") \
+ .HostMemory("reduction_indices"), \
+ ReductionOp<SYCLDevice, type, Eigen::internal::SumReducer<type>>);
+REGISTER_SYCL_KERNELS(float);
+REGISTER_SYCL_KERNELS(double);
+#undef REGISTER_SYCL_KERNELS
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(
+ Name("Sum")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int32>("Tidx")
+ .HostMemory("input")
+ .HostMemory("output")
+ .HostMemory("reduction_indices"),
+ ReductionOp<CPUDevice, int32, Eigen::internal::SumReducer<int32>>);
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/scatter_functor.h b/tensorflow/core/kernels/scatter_functor.h
index a84d89c296..a27cc83e4c 100644
--- a/tensorflow/core/kernels/scatter_functor.h
+++ b/tensorflow/core/kernels/scatter_functor.h
@@ -25,6 +25,9 @@ namespace tensorflow {
class OpKernelContext;
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
namespace scatter_op {
@@ -82,10 +85,9 @@ struct ScatterFunctor {
typename TTypes<Index>::ConstFlat indices);
};
-// Specializations of scatter functor for CPU.
-template <typename T, typename Index, scatter_op::UpdateOp op>
-struct ScatterFunctor<CPUDevice, T, Index, op> {
- Index operator()(OpKernelContext* c, const CPUDevice& d,
+template <typename Device, typename T, typename Index, scatter_op::UpdateOp op>
+struct ScatterFunctorBase {
+ Index operator()(OpKernelContext* c, const Device& d,
typename TTypes<T>::Matrix params,
typename TTypes<T>::ConstMatrix updates,
typename TTypes<Index>::ConstFlat indices) {
@@ -106,6 +108,15 @@ struct ScatterFunctor<CPUDevice, T, Index, op> {
}
};
+template <typename T, typename Index, scatter_op::UpdateOp op>
+struct ScatterFunctor<CPUDevice, T, Index, op>
+ : ScatterFunctorBase<CPUDevice, T, Index, op>{};
+#if TENSORFLOW_USE_SYCL
+template<typename T, typename Index, scatter_op::UpdateOp op>
+struct ScatterFunctor<SYCLDevice, T, Index, op>
+ : ScatterFunctorBase<SYCLDevice, T, Index, op>{};
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/scatter_op.cc b/tensorflow/core/kernels/scatter_op.cc
index 604f753db1..827eb7dbca 100644
--- a/tensorflow/core/kernels/scatter_op.cc
+++ b/tensorflow/core/kernels/scatter_op.cc
@@ -27,6 +27,9 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
// Check whether updates.shape = indices.shape + params.shape[1:]
static bool ValidShapes(const Tensor& params, const Tensor& updates,
@@ -170,6 +173,20 @@ TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SCATTER_UPDATE_GPU);
#endif // GOOGLE_CUDA
+// Registers GPU kernels.
+#if TENSORFLOW_USE_SYCL
+#define REGISTER_SCATTER_ARITHEMTIC_SYCL(type) \
+ REGISTER_SCATTER_ARITHEMTIC(type, SYCL);
+
+#define REGISTER_SCATTER_UPDATE_SYCL(type) REGISTER_SCATTER_UPDATE(type, SYCL);
+
+REGISTER_SCATTER_ARITHEMTIC_SYCL(float);
+REGISTER_SCATTER_UPDATE_SYCL(float);
+
+#undef REGISTER_SCATTER_ARITHEMTIC_SYCL
+#undef REGISTER_SCATTER_UPDATE_SYCL
+#endif // TENSORFLOW_USE_SYCL
+
#undef REGISTER_SCATTER_ARITHEMTIC
#undef REGISTER_SCATTER_ARITHEMTIC_CPU
#undef REGISTER_SCATTER_ARITHEMTIC_GPU
diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc
index 3f1538164c..4550115c19 100644
--- a/tensorflow/core/kernels/session_ops.cc
+++ b/tensorflow/core/kernels/session_ops.cc
@@ -67,6 +67,19 @@ TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL);
REGISTER_GPU_KERNEL(bool);
#undef REGISTER_GPU_KERNEL
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("GetSessionHandle") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("handle") \
+ .TypeConstraint<type>("T"), \
+ GetSessionHandleOp)
+
+TF_CALL_NUMBER_TYPES(REGISTER_SYCL_KERNEL);
+REGISTER_SYCL_KERNEL(bool);
+#undef REGISTER_SYCL_KERNEL
+#endif // TENSORFLOW_USE_SYCL
+
class GetSessionTensorOp : public OpKernel {
public:
explicit GetSessionTensorOp(OpKernelConstruction* context)
@@ -97,6 +110,19 @@ TF_CALL_NUMBER_TYPES(REGISTER_GPU_KERNEL);
REGISTER_GPU_KERNEL(bool);
#undef REGISTER_GPU_KERNEL
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("GetSessionTensor") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("handle") \
+ .TypeConstraint<type>("dtype"), \
+ GetSessionTensorOp)
+
+TF_CALL_NUMBER_TYPES(REGISTER_SYCL_KERNEL);
+REGISTER_SYCL_KERNEL(bool);
+#undef REGISTER_SYCL_KERNEL
+#endif // TENSORFLOW_USE_SYCL
+
class DeleteSessionTensorOp : public OpKernel {
public:
explicit DeleteSessionTensorOp(OpKernelConstruction* context)
@@ -117,4 +143,9 @@ REGISTER_KERNEL_BUILDER(
Name("DeleteSessionTensor").Device(DEVICE_GPU).HostMemory("handle"),
DeleteSessionTensorOp);
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(
+ Name("DeleteSessionTensor").Device(DEVICE_SYCL).HostMemory("handle"),
+ DeleteSessionTensorOp);
+#endif // TENSORFLOW_USE_SYCL
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/shape_ops.cc b/tensorflow/core/kernels/shape_ops.cc
index 7ff812cf27..496865de02 100644
--- a/tensorflow/core/kernels/shape_ops.cc
+++ b/tensorflow/core/kernels/shape_ops.cc
@@ -210,6 +210,43 @@ REGISTER_KERNEL_BUILDER(Name("ShapeN")
ShapeNOp<int64>);
#endif
+#if TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNEL(type) \
+ REGISTER_KERNEL_BUILDER(Name("ShapeN") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("output") \
+ .TypeConstraint<int32>("out_type") \
+ .TypeConstraint<type>("T"), \
+ ShapeNOp<int32>); \
+ REGISTER_KERNEL_BUILDER(Name("ShapeN") \
+ .Device(DEVICE_SYCL) \
+ .HostMemory("output") \
+ .TypeConstraint<int64>("out_type") \
+ .TypeConstraint<type>("T"), \
+ ShapeNOp<int64>)
+
+TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL);
+#undef REGISTER_SYCL_KERNEL
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("ShapeN")
+ .Device(DEVICE_SYCL)
+ .HostMemory("input")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int32>("out_type"),
+ ShapeNOp<int32>);
+REGISTER_KERNEL_BUILDER(Name("ShapeN")
+ .Device(DEVICE_SYCL)
+ .HostMemory("input")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int64>("out_type"),
+ ShapeNOp<int64>);
+#endif // TENSORFLOW_USE_SYCL
+
class RankOp : public OpKernel {
public:
explicit RankOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
diff --git a/tensorflow/core/kernels/slice_op.cc b/tensorflow/core/kernels/slice_op.cc
index dc33a25cec..e2978eccbd 100644
--- a/tensorflow/core/kernels/slice_op.cc
+++ b/tensorflow/core/kernels/slice_op.cc
@@ -56,6 +56,9 @@ gtl::InlinedVector<int64, 4> IntTensorToInt64Vec(const Tensor& tensor) {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
// Shared code that is not dependent on the type of T. We do this to reduce
// code size by not duplicating all this for all T (float, double, int32, etc.)
@@ -300,4 +303,58 @@ REGISTER_KERNEL_BUILDER(Name("Slice")
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+// Forward declarations of the functor specializations for SYCL.
+namespace functor {
+#define DECLARE_SYCL_SPEC(T, NDIM) \
+ template <> \
+ void Slice<SYCLDevice, T, NDIM>::operator()( \
+ const SYCLDevice& d, typename TTypes<T, NDIM>::Tensor output,\
+ typename TTypes<T, NDIM>::ConstTensor input, \
+ const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices, \
+ const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes); \
+ extern template struct Slice<SYCLDevice, T, NDIM>;
+
+#define DECLARE_FOR_N(T) \
+ DECLARE_SYCL_SPEC(T, 1); \
+ DECLARE_SYCL_SPEC(T, 2); \
+ DECLARE_SYCL_SPEC(T, 3); \
+ DECLARE_SYCL_SPEC(T, 4); \
+ DECLARE_SYCL_SPEC(T, 5); \
+ DECLARE_SYCL_SPEC(T, 6);
+
+TF_CALL_GPU_NUMBER_TYPES(DECLARE_FOR_N);
+DECLARE_FOR_N(int32);
+
+#undef DECLARE_FOR_N
+#undef DECLARE_SYCL_SPEC
+} // namespace functor
+
+#define REGISTER_SYCL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Slice") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .HostMemory("begin") \
+ .HostMemory("size") \
+ .TypeConstraint<int32>("Index"), \
+ SliceOp<SYCLDevice, type>)
+
+TF_CALL_GPU_NUMBER_TYPES(REGISTER_SYCL);
+
+// A special GPU kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Slice")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<int32>("T")
+ .TypeConstraint<int32>("Index")
+ .HostMemory("input")
+ .HostMemory("begin")
+ .HostMemory("size")
+ .HostMemory("output"),
+ SliceOp<CPUDevice, int32>);
+
+#undef REGISTER_SYCL
+
+#endif // TENSORFLOW_USE_SYCL
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/slice_op_cpu_impl.h b/tensorflow/core/kernels/slice_op_cpu_impl.h
index 0b0700ec36..a70805658e 100644
--- a/tensorflow/core/kernels/slice_op_cpu_impl.h
+++ b/tensorflow/core/kernels/slice_op_cpu_impl.h
@@ -34,6 +34,18 @@ DEFINE_CPU_KERNELS(bfloat16);
#undef DEFINE_CPU_KERNELS
+#ifdef TENSORFLOW_USE_SYCL
+using SyclDevice = Eigen::SyclDevice;
+
+#define DEFINE_SYCL_KERNELS(T) \
+ template struct functor::Slice<SyclDevice, T, CPU_PROVIDED_IXDIM>;
+
+TF_CALL_GPU_NUMBER_TYPES(DEFINE_SYCL_KERNELS);
+DEFINE_SYCL_KERNELS(int32);
+
+#undef DEFINE_SYCL_KERNELS
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace tensorflow
#endif // THIRD_PARTY_TENSORFLOW_CORE_KERNELS_SLICE_OP_CPU_IMPL_H_
diff --git a/tensorflow/core/kernels/split_lib.h b/tensorflow/core/kernels/split_lib.h
index 240cce46e0..ff92ffeeb3 100644
--- a/tensorflow/core/kernels/split_lib.h
+++ b/tensorflow/core/kernels/split_lib.h
@@ -48,6 +48,17 @@ struct Split<Eigen::ThreadPoolDevice, T> {
const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_sizes);
};
+#ifdef TENSORFLOW_USE_SYCL
+template <typename T>
+struct Split<Eigen::SyclDevice, T> {
+ void operator()(const Eigen::SyclDevice& d,
+ typename TTypes<T, 3>::Tensor output,
+ typename TTypes<T, 3>::ConstTensor input,
+ const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_indices,
+ const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_sizes);
+};
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/split_lib_cpu.cc b/tensorflow/core/kernels/split_lib_cpu.cc
index 41b2d6f0f5..e377e4d97a 100644
--- a/tensorflow/core/kernels/split_lib_cpu.cc
+++ b/tensorflow/core/kernels/split_lib_cpu.cc
@@ -43,5 +43,24 @@ TF_CALL_ALL_TYPES(DEFINE_CPU_KERNELS)
DEFINE_CPU_KERNELS(quint8)
DEFINE_CPU_KERNELS(bfloat16)
+#ifdef TENSORFLOW_USE_SYCL
+template <typename T>
+void Split<Eigen::SyclDevice, T>::operator()(
+ const Eigen::SyclDevice& d, typename TTypes<T, 3>::Tensor output,
+ typename TTypes<T, 3>::ConstTensor input,
+ const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_indices,
+ const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_sizes) {
+ if (output.size() < 131072) {
+ output = input.slice(slice_indices, slice_sizes);
+ } else {
+ output.device(d) = input.slice(slice_indices, slice_sizes);
+ }
+}
+
+#define DEFINE_SYCL_KERNELS(T) template struct Split<Eigen::SyclDevice, T>;
+
+TF_CALL_GPU_NUMBER_TYPES(DEFINE_SYCL_KERNELS)
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/split_op.cc b/tensorflow/core/kernels/split_op.cc
index 4b12e1f995..cca2fc41c2 100644
--- a/tensorflow/core/kernels/split_op.cc
+++ b/tensorflow/core/kernels/split_op.cc
@@ -36,6 +36,9 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
template <typename Device, typename T>
class SplitOpBase : public OpKernel {
@@ -243,6 +246,75 @@ class SplitOpGPU : public SplitOpBase<GPUDevice, T> {
};
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+
+template <typename T>
+class SplitOpSYCL : public SplitOpBase<SYCLDevice, T> {
+ public:
+ typedef SplitOpBase<SYCLDevice, T> Base;
+ explicit SplitOpSYCL(OpKernelConstruction* c) : Base(c) {}
+
+ void Compute(OpKernelContext* context) override {
+ bool done = false;
+ Base::ComputeEasyCases(context, &done);
+ if (!context->status().ok() || done) {
+ return;
+ }
+ const int32 split_dim = context->input(0).flat<int32>()(0);
+ const int32 num_split = Base::num_outputs();
+ const Tensor& input = context->input(1);
+ const TensorShape& input_shape = input.shape();
+
+ // Android also uses int32 indexing, so check here also.
+ OP_REQUIRES(
+ context, FastBoundsCheck(input.NumElements(),
+ std::numeric_limits<Eigen::DenseIndex>::max()),
+ errors::InvalidArgument("Split requires input size < ",
+ std::numeric_limits<Eigen::DenseIndex>::max()));
+
+ Eigen::DenseIndex prefix_dim_size;
+ Eigen::DenseIndex split_dim_size;
+ Eigen::DenseIndex suffix_dim_size;
+
+ std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) =
+ Base::template SetDims<Eigen::DenseIndex>(input_shape, split_dim);
+ auto input_reshaped =
+ input.shaped<T, 3>({prefix_dim_size, split_dim_size, suffix_dim_size});
+
+ const int64 split_dim_output_size = split_dim_size / num_split;
+ TensorShape output_shape(input_shape);
+ output_shape.set_dim(split_dim, split_dim_output_size);
+
+ Eigen::DSizes<Eigen::DenseIndex, 3> indices{0, 0, 0};
+ Eigen::DSizes<Eigen::DenseIndex, 3> sizes{
+ prefix_dim_size, split_dim_output_size, suffix_dim_size};
+
+ for (int i = 0; i < num_split; ++i) {
+ Tensor* result = nullptr;
+ OP_REQUIRES_OK(context,
+ context->allocate_output(i, output_shape, &result));
+ if (prefix_dim_size * split_dim_output_size * suffix_dim_size > 0) {
+ Eigen::DSizes<Eigen::DenseIndex, 3> slice_indices;
+ Eigen::DSizes<Eigen::DenseIndex, 3> slice_sizes;
+ for (int j = 0; j < 3; ++j) {
+ slice_indices[j] = indices[j];
+ slice_sizes[j] = sizes[j];
+ }
+
+ auto result_shaped = result->shaped<T, 3>(
+ {prefix_dim_size, split_dim_output_size, suffix_dim_size});
+
+ functor::Split<SYCLDevice, T>()(context->eigen_device<SYCLDevice>(),
+ result_shaped, input_reshaped,
+ slice_indices, slice_sizes);
+ }
+ indices[1] += split_dim_output_size;
+ }
+ }
+};
+
+#endif // TENSORFLOW_USE_SYCL
+
#define REGISTER_SPLIT(type) \
REGISTER_KERNEL_BUILDER(Name("Split") \
.Device(DEVICE_CPU) \
@@ -269,4 +341,17 @@ TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU);
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL(type) \
+ REGISTER_KERNEL_BUILDER(Name("Split") \
+ .Device(DEVICE_SYCL) \
+ .TypeConstraint<type>("T") \
+ .HostMemory("split_dim"), \
+ SplitOpSYCL<type>)
+
+TF_CALL_GPU_NUMBER_TYPES(REGISTER_SYCL);
+#undef REGISTER_SYCL
+
+#endif // TENSORFLOW_USE_SYCL
+
} // end namespace tensorflow
diff --git a/tensorflow/core/kernels/tile_ops.cc b/tensorflow/core/kernels/tile_ops.cc
index e49a319aed..36cabaaf7d 100644
--- a/tensorflow/core/kernels/tile_ops.cc
+++ b/tensorflow/core/kernels/tile_ops.cc
@@ -40,6 +40,9 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
// Forward declarations of functors that will be defined in
// tile_ops_cpu_impl*.cc and tile_ops_gpu.cu.cc.
@@ -225,6 +228,11 @@ inline void TileOp<Device>::HandleCase(
#define HANDLE_TYPE_NAME_GPU(T) \
HANDLE_CASE_DIM(GPUDevice, T, DataTypeToEnum<T>::value);
+#ifdef TENSORFLOW_USE_SYCL
+#define HANDLE_TYPE_NAME_SYCL(T) \
+ HANDLE_CASE_DIM(SYCLDevice, T, DataTypeToEnum<T>::value);
+#endif // TENSORFLOW_USE_SYCL
+
TF_CALL_bool(HANDLE_TYPE_NAME_CPU);
TF_CALL_float(HANDLE_TYPE_NAME_CPU);
TF_CALL_double(HANDLE_TYPE_NAME_CPU);
@@ -248,8 +256,15 @@ TF_CALL_complex64(HANDLE_TYPE_NAME_GPU);
TF_CALL_complex128(HANDLE_TYPE_NAME_GPU);
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+TF_CALL_float(HANDLE_TYPE_NAME_SYCL);
+#endif // TENSORFLOW_USE_SYCL
+
#undef HANDLE_TYPE_NAME_CPU
#undef HANDLE_TYPE_NAME_GPU
+#ifdef TENSORFLOW_USE_SYCL
+#undef HANDLE_TYPE_NAME_SYCL
+#endif // TENSORFLOW_USE_SYCL
#undef HANDLE_CASE_DIM
#undef HANDLE_CASE
@@ -578,4 +593,14 @@ REGISTER_KERNEL_BUILDER(Name("TileGrad")
TileGradientOp<GPUDevice>);
#endif // GOOGLE_CUDA
+
+#ifdef TENSORFLOW_USE_SYCL
+REGISTER_KERNEL_BUILDER(Name("Tile")
+ .Device(DEVICE_SYCL)
+ .TypeConstraint<float>("T")
+ .TypeConstraint<int32>("Tmultiples")
+ .HostMemory("multiples"),
+ TileOp<SYCLDevice>);
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/tile_ops_cpu_impl.h b/tensorflow/core/kernels/tile_ops_cpu_impl.h
index 9cdf69ad0b..650c739ed5 100644
--- a/tensorflow/core/kernels/tile_ops_cpu_impl.h
+++ b/tensorflow/core/kernels/tile_ops_cpu_impl.h
@@ -62,6 +62,30 @@ TF_CALL_complex128(DEFINE_TYPE);
#undef DEFINE_DIM
#undef DEFINE_TYPE
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+
+// Register functors used for TileOp.
+#define DEFINE_DIM(T, NDIM) template struct Tile<SYCLDevice, T, NDIM>;
+#define DEFINE_TYPE(T) DEFINE_DIM(T, CPU_PROVIDED_IXDIM)
+
+TF_CALL_float(DEFINE_TYPE);
+
+#undef DEFINE_DIM
+#undef DEFINE_TYPE
+
+// Register functors used for TileGradientOp.
+#define DEFINE_DIM(T, NDIM) \
+ template struct TileGrad<SYCLDevice, T, NDIM>; \
+ template struct ReduceAndReshape<SYCLDevice, T, NDIM, 1>;
+#define DEFINE_TYPE(T) DEFINE_DIM(T, CPU_PROVIDED_IXDIM)
+
+TF_CALL_float(DEFINE_TYPE);
+
+#undef DEFINE_DIM
+#undef DEFINE_TYPE
+#endif // TENSORFLOW_USE_SYCL
+
} // end namespace functor
} // end namespace tensorflow
diff --git a/tensorflow/core/kernels/training_ops.cc b/tensorflow/core/kernels/training_ops.cc
index 172449a998..641c991a7e 100644
--- a/tensorflow/core/kernels/training_ops.cc
+++ b/tensorflow/core/kernels/training_ops.cc
@@ -46,6 +46,17 @@ struct ApplyGradientDescent<CPUDevice, T> {
}
};
+#ifdef TENSORFLOW_USE_SYCL
+template <typename T>
+struct ApplyGradientDescent<SYCLDevice, T> {
+ void operator()(const SYCLDevice& d, typename TTypes<T>::Flat var,
+ typename TTypes<T>::ConstScalar lr,
+ typename TTypes<T>::ConstFlat grad) {
+ var.device(d) -= grad * lr();
+ }
+};
+#endif
+
template <typename T>
struct ApplyAdadelta<CPUDevice, T> {
void operator()(const CPUDevice& d, typename TTypes<T>::Flat var,
@@ -357,6 +368,12 @@ TF_CALL_half(REGISTER_CPU_KERNELS);
TF_CALL_float(REGISTER_CPU_KERNELS);
TF_CALL_double(REGISTER_CPU_KERNELS);
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL_KERNELS(T) REGISTER_KERNELS(SYCL, T);
+TF_CALL_float(REGISTER_SYCL_KERNELS);
+#undef REGISTER_SYCL_KERNELS
+#endif
+
#if GOOGLE_CUDA
// Forward declarations of the functor specializations for GPU.
namespace functor {
diff --git a/tensorflow/core/kernels/transpose_functor_cpu.cc b/tensorflow/core/kernels/transpose_functor_cpu.cc
index f8c87e7e2e..30b82f1843 100644
--- a/tensorflow/core/kernels/transpose_functor_cpu.cc
+++ b/tensorflow/core/kernels/transpose_functor_cpu.cc
@@ -114,4 +114,28 @@ Status DoTranspose<Device>(const Device& d, const Tensor& in,
return Status::OK();
}
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+
+template <>
+Status DoTranspose<SYCLDevice>(const SYCLDevice& d, const Tensor& in,
+ const gtl::ArraySlice<int32> perm, Tensor* out) {
+ CHECK_GE(in.dims(), 2);
+ CHECK_EQ(in.dims(), out->dims());
+ CHECK_EQ(in.dims(), perm.size());
+ CHECK_EQ(in.dtype(), out->dtype());
+ switch (in.dtype()) {
+
+ case DT_FLOAT:
+ case DT_INT32:
+ internal::Transpose<SYCLDevice, uint32>(d, in, perm, out);
+ break;
+
+ default:
+ return errors::Unimplemented("Unsupported dtype on SYCL: ", in.dtype());
+ }
+ return Status::OK();
+}
+#endif // TENSORFLOW_USE_SYCL
+
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/unpack_op.cc b/tensorflow/core/kernels/unpack_op.cc
index 29959cb187..2a14fa3265 100644
--- a/tensorflow/core/kernels/unpack_op.cc
+++ b/tensorflow/core/kernels/unpack_op.cc
@@ -32,6 +32,10 @@ namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+#endif // TENSORFLOW_USE_SYCL
+
template <typename Device, typename T>
class UnpackOp : public OpKernel {
public:
@@ -149,4 +153,25 @@ REGISTER_KERNEL_BUILDER(Name("Unpack")
#endif // GOOGLE_CUDA
+#ifdef TENSORFLOW_USE_SYCL
+#define REGISTER_SYCL(type) \
+ REGISTER_KERNEL_BUILDER( \
+ Name("Unpack").Device(DEVICE_SYCL).TypeConstraint<type>("T"), \
+ UnpackOp<SYCLDevice, type>)
+
+REGISTER_SYCL(float);
+#undef REGISTER_SYCL
+
+// A special SYCL kernel for int32.
+// TODO(b/25387198): Also enable int32 in device memory. This kernel
+// registration requires all int32 inputs and outputs to be in host memory.
+REGISTER_KERNEL_BUILDER(Name("Unpack")
+ .Device(DEVICE_SYCL)
+ .HostMemory("value")
+ .HostMemory("output")
+ .TypeConstraint<int32>("T"),
+ UnpackOp<CPUDevice, int32>);
+
+#endif // TENSORFLOW_USE_SYCL
+
} // end namespace tensorflow
diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc
index a3b0512304..4741bc968a 100644
--- a/tensorflow/core/ops/array_ops.cc
+++ b/tensorflow/core/ops/array_ops.cc
@@ -416,24 +416,49 @@ REGISTER_OP("SplitV")
.Attr("T: type")
.Attr("Tlen: {int32, int64} = DT_INT64")
.SetShapeFn([](InferenceContext* c) {
- ShapeHandle unused;
+ DimensionHandle split_dimension;
+ TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(2, &split_dimension));
int32 num_outputs = c->num_outputs();
- // Return unknown shapes with the same rank as the input
- // or unknown rank if input's rank isn't known
- // can't determine exact shapes until runtime because
- // we don't know where the tensor containing the split sizes
- // is located
- int32 rank = c->Rank(c->input(0));
+ ShapeHandle input = c->input(0);
+ int32 rank = c->Rank(input);
ShapeHandle output_shape;
+ const Tensor* size_splits = c->input_tensor(1);
if (rank == InferenceContext::kUnknownRank) {
+ // If the rank of input tensor is unknown, then return unkown shapes.
output_shape = c->UnknownShape();
+ for (int i = 0; i < num_outputs; ++i) {
+ c->set_output(i, output_shape);
+ }
} else if (rank == 0) {
+ // Throw error if input is a scalar.
return errors::InvalidArgument("Can't split scalars");
- } else {
+ } else if (size_splits == nullptr || !c->ValueKnown(split_dimension)) {
+ // If split dimension or tensor containing the split sizes is unkown,
+ // then return unknown shapes of same rank as input.
output_shape = c->UnknownShapeOfRank(rank);
- }
- for (int i = 0; i < num_outputs; ++i) {
- c->set_output(i, output_shape);
+ for (int i = 0; i < num_outputs; ++i) {
+ c->set_output(i, output_shape);
+ }
+ } else {
+ // Determine the output shape if split dimension and split sizes are known
+ int64 split_dim = c->Value(split_dimension);
+ TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input));
+ std::vector<int64> data;
+ if (size_splits->dtype() == DT_INT32) {
+ data = AsInt64<int32>(size_splits, size_splits->shape().dim_size(0));
+ } else {
+ data = AsInt64<int64>(size_splits, size_splits->shape().dim_size(0));
+ }
+ if (num_outputs != data.size()) {
+ return errors::InvalidArgument(
+ "Length of size_splits should be equal to num_outputs");
+ }
+ for (int i = 0; i < num_outputs; ++i) {
+ output_shape = c->UnknownShapeOfRank(rank);
+ TF_RETURN_IF_ERROR(
+ c->ReplaceDim(input, split_dim, c->MakeDim(data[i]), &output_shape));
+ c->set_output(i, output_shape);
+ }
}
return Status::OK();
diff --git a/tensorflow/core/platform/default/logging.cc b/tensorflow/core/platform/default/logging.cc
index 1d03725c78..56985eec15 100644
--- a/tensorflow/core/platform/default/logging.cc
+++ b/tensorflow/core/platform/default/logging.cc
@@ -84,39 +84,49 @@ void LogMessage::GenerateLogMessage() {
namespace {
-int64 MinLogLevel() {
- const char* tf_env_var_val = getenv("TF_CPP_MIN_LOG_LEVEL");
+// Parse log level (int64) from environment variable (char*)
+int64 LogLevelStrToInt(const char* tf_env_var_val) {
if (tf_env_var_val == nullptr) {
return 0;
}
// Ideally we would use env_var / safe_strto64, but it is
// hard to use here without pulling in a lot of dependencies,
- // so we do a poor-man's parsing.
+ // so we use std:istringstream instead
string min_log_level(tf_env_var_val);
- if (min_log_level == "1") {
- // Maps to WARNING
- return 1;
- } else if (min_log_level == "2") {
- // Maps to ERROR
- return 2;
- } else if (min_log_level == "3") {
- // Maps to FATAL
- return 3;
- } else {
- // Maps to INFO (the default).
- return 0;
+ std::istringstream ss(min_log_level);
+ int64 level;
+ if (!(ss >> level)) {
+ // Invalid vlog level setting, set level to default (0)
+ level = 0;
}
+
+ return level;
+}
+
+int64 MinLogLevelFromEnv() {
+ const char* tf_env_var_val = getenv("TF_CPP_MIN_LOG_LEVEL");
+ return LogLevelStrToInt(tf_env_var_val);
+}
+
+int64 MinVLogLevelFromEnv() {
+ const char* tf_env_var_val = getenv("TF_CPP_MIN_VLOG_LEVEL");
+ return LogLevelStrToInt(tf_env_var_val);
}
} // namespace
LogMessage::~LogMessage() {
// Read the min log level once during the first call to logging.
- static int64 min_log_level = MinLogLevel();
+ static int64 min_log_level = MinLogLevelFromEnv();
if (TF_PREDICT_TRUE(severity_ >= min_log_level)) GenerateLogMessage();
}
+int64 LogMessage::MinVLogLevel() {
+ static int64 min_vlog_level = MinVLogLevelFromEnv();
+ return min_vlog_level;
+}
+
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, FATAL) {}
LogMessageFatal::~LogMessageFatal() {
diff --git a/tensorflow/core/platform/default/logging.h b/tensorflow/core/platform/default/logging.h
index 961fb8b4ad..04ff9e12b6 100644
--- a/tensorflow/core/platform/default/logging.h
+++ b/tensorflow/core/platform/default/logging.h
@@ -41,6 +41,11 @@ class LogMessage : public std::basic_ostringstream<char> {
LogMessage(const char* fname, int line, int severity);
~LogMessage();
+ // Returns the minimum log level for VLOG statements.
+ // E.g., if MinVLogLevel() is 2, then VLOG(2) statements will produce output,
+ // but VLOG(3) will not. Defaults to 0.
+ static int64 MinVLogLevel();
+
protected:
void GenerateLogMessage();
@@ -71,11 +76,18 @@ class LogMessageFatal : public LogMessage {
#define LOG(severity) _TF_LOG_##severity
-// TODO(jeff): Define a proper implementation of VLOG_IS_ON
+#ifdef IS_MOBILE_PLATFORM
+// Turn VLOG off when under mobile devices for considerations of binary size.
#define VLOG_IS_ON(lvl) ((lvl) <= 0)
+#else
+// Otherwise, Set TF_CPP_MIN_VLOG_LEVEL environment to update minimum log level
+// of VLOG
+#define VLOG_IS_ON(lvl) \
+ ((lvl) <= ::tensorflow::internal::LogMessage::MinVLogLevel())
+#endif
#define VLOG(lvl) \
- if (VLOG_IS_ON(lvl)) \
+ if (TF_PREDICT_FALSE(VLOG_IS_ON(lvl))) \
::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
// CHECK dies with a fatal error if condition is not true. It is *not*
diff --git a/tensorflow/examples/how_tos/__init__.py b/tensorflow/examples/how_tos/__init__.py
new file mode 100644
index 0000000000..878841c184
--- /dev/null
+++ b/tensorflow/examples/how_tos/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
diff --git a/tensorflow/examples/udacity/README.md b/tensorflow/examples/udacity/README.md
index 2814e5c62a..6e40c3bae6 100644
--- a/tensorflow/examples/udacity/README.md
+++ b/tensorflow/examples/udacity/README.md
@@ -54,6 +54,9 @@ to get the ip of the new virtual machine. To switch from default virtual machine
Note that `docker-machine env tensorflow` outputs some environment variables such like `DOCKER_HOST`. Then your docker client is now connected to the docker host in virtual machine `tensorflow`
+* **I'm getting the error - docker: Cannot connect to the Docker daemon. Is the docker daemon running on this host? - when I run 'docker run'.**
+
+This is a permissions issue, and a popular answer is provided for Linux and Max OSX [here](http://stackoverflow.com/questions/21871479/docker-cant-connect-to-docker-daemon) on StackOverflow.
Notes for anyone needing to build their own containers (mostly instructors)
===========================================================================
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
index 788d2066ad..8dc62c4c18 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
@@ -1,8 +1,252 @@
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
new file mode 100644
index 0000000000..bf17320a5a
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
@@ -0,0 +1,17 @@
+### `tf.merge_all_summaries(*args, **kwargs)` {#merge_all_summaries}
+
+Merges all summaries collected in the default graph. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge_all.
+
+ Args:
+ key: `GraphKey` used to collect the summaries. Defaults to
+ `GraphKeys.SUMMARIES`.
+
+ Returns:
+ If no summaries were collected, returns None. Otherwise returns a scalar
+ `Tensor` of type `string` containing the serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
new file mode 100644
index 0000000000..6220d3641b
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
@@ -0,0 +1,49 @@
+### `tf.image_summary(*args, **kwargs)` {#image_summary}
+
+Outputs a `Summary` protocol buffer with images. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.image. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.
+
+ The summary has up to `max_images` summary values containing images. The
+ images are built from `tensor` which must be 4-D with shape `[batch_size,
+ height, width, channels]` and where `channels` can be:
+
+ * 1: `tensor` is interpreted as Grayscale.
+ * 3: `tensor` is interpreted as RGB.
+ * 4: `tensor` is interpreted as RGBA.
+
+ The images have the same number of channels as the input tensor. For float
+ input, the values are normalized one image at a time to fit in the range
+ `[0, 255]`. `uint8` values are unchanged. The op uses two different
+ normalization algorithms:
+
+ * If the input values are all positive, they are rescaled so the largest one
+ is 255.
+
+ * If any input value is negative, the values are shifted so input value 0.0
+ is at 127. They are then rescaled so that either the smallest value is 0,
+ or the largest one is 255.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_images` is 1, the summary value tag is '*tag*/image'.
+ * If `max_images` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
+ width, channels]` where `channels` is 1, 3, or 4.
+ max_images: Max number of batch elements to generate images for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
index 19532f7cc3..bce704ef4f 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
@@ -1,8 +1,245 @@
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
index c6dfad01d3..a73cf08bc6 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
@@ -175,125 +175,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -314,49 +195,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -440,11 +282,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -457,30 +298,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -552,13 +369,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -589,33 +399,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -653,20 +436,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -741,38 +510,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -797,71 +534,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -869,79 +541,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -962,26 +561,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1033,51 +612,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1100,13 +634,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1166,9 +693,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1222,13 +749,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
@@ -1243,20 +763,6 @@ Return any properties that the user has recorded.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1280,18 +786,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
new file mode 100644
index 0000000000..3ffd9260c7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
@@ -0,0 +1,22 @@
+### `tf.scalar_summary(*args, **kwargs)` {#scalar_summary}
+
+Outputs a `Summary` protocol buffer with scalar values. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
+
+ The input `tags` and `values` must have the same shape. The generated
+ summary has a summary value for each tag-value pair in `tags` and `values`.
+
+ Args:
+ tags: A `string` `Tensor`. Tags for the summaries.
+ values: A real numeric Tensor. Values for the summaries.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
new file mode 100644
index 0000000000..3cfd7103d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
new file mode 100644
index 0000000000..570d7b712c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
@@ -0,0 +1,26 @@
+### `tf.histogram_summary(*args, **kwargs)` {#histogram_summary}
+
+Outputs a `Summary` protocol buffer with a histogram. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
+
+ The generated
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ has one summary value containing a histogram for `values`.
+
+ This op reports an `InvalidArgument` error if any value is not finite.
+
+ Args:
+ tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
+ values: A real numeric `Tensor`. Any shape. Values to use to
+ build the histogram.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
new file mode 100644
index 0000000000..ccb984f5ab
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
@@ -0,0 +1,27 @@
+### `tf.merge_summary(*args, **kwargs)` {#merge_summary}
+
+Merges summaries. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge.
+
+ This op creates a
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ protocol buffer that contains the union of all the values in the input
+ summaries.
+
+ When the Op is run, it reports an `InvalidArgument` error if multiple values
+ in the summaries to merge use the same tag.
+
+ Args:
+ inputs: A list of `string` `Tensor` objects containing serialized `Summary`
+ protocol buffers.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
new file mode 100644
index 0000000000..24a3b3f10c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
new file mode 100644
index 0000000000..f2d0c042d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
new file mode 100644
index 0000000000..e9bdda200f
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
@@ -0,0 +1,207 @@
+
+- - -
+
+#### `tf.train.SummaryWriter.__init__(*args, **kwargs)` {#SummaryWriter.__init__}
+
+Creates a `SummaryWriter` and an event file. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.
+
+ This class is deprecated, and should be replaced with tf.summary.FileWriter.
+
+ On construction the summary writer creates a new event file in `logdir`.
+ This event file will contain `Event` protocol buffers constructed when you
+ call one of the following functions: `add_summary()`, `add_session_log()`,
+ `add_event()`, or `add_graph()`.
+
+ If you pass a `Graph` to the constructor it is added to
+ the event file. (This is equivalent to calling `add_graph()` later).
+
+ TensorBoard will pick the graph from the file and display it graphically so
+ you can interactively explore the graph you built. You will usually pass
+ the graph from the session in which you launched it:
+
+ ```python
+ ...create a graph...
+ # Launch the graph in a session.
+ sess = tf.Session()
+ # Create a summary writer, add the 'graph' to the event file.
+ writer = tf.train.SummaryWriter(<some-directory>, sess.graph)
+ ```
+
+ The other arguments to the constructor control the asynchronous writes to
+ the event file:
+
+ * `flush_secs`: How often, in seconds, to flush the added summaries
+ and events to disk.
+ * `max_queue`: Maximum number of summaries or events pending to be
+ written to disk before one of the 'add' calls block.
+
+ Args:
+ logdir: A string. Directory where event file will be written.
+ graph: A `Graph` object, such as `sess.graph`.
+ max_queue: Integer. Size of the queue for pending events and summaries.
+ flush_secs: Number. How often, in seconds, to flush the
+ pending events and summaries to disk.
+ graph_def: DEPRECATED: Use the `graph` argument instead.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_event(event)` {#SummaryWriter.add_event}
+
+Adds an event to the event file.
+
+##### Args:
+
+
+* <b>`event`</b>: An `Event` protocol buffer.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_graph(graph, global_step=None, graph_def=None)` {#SummaryWriter.add_graph}
+
+Adds a `Graph` to the event file.
+
+The graph described by the protocol buffer will be displayed by
+TensorBoard. Most users pass a graph in the constructor instead.
+
+##### Args:
+
+
+* <b>`graph`</b>: A `Graph` object, such as `sess.graph`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+* <b>`graph_def`</b>: DEPRECATED. Use the `graph` parameter instead.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If both graph and graph_def are passed to the method.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_meta_graph(meta_graph_def, global_step=None)` {#SummaryWriter.add_meta_graph}
+
+Adds a `MetaGraphDef` to the event file.
+
+The `MetaGraphDef` allows running the given graph via
+`saver.import_meta_graph()`.
+
+##### Args:
+
+
+* <b>`meta_graph_def`</b>: A `MetaGraphDef` object, often as retured by
+ `saver.export_meta_graph()`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+
+##### Raises:
+
+
+* <b>`TypeError`</b>: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_run_metadata(run_metadata, tag, global_step=None)` {#SummaryWriter.add_run_metadata}
+
+Adds a metadata information for a single session.run() call.
+
+##### Args:
+
+
+* <b>`run_metadata`</b>: A `RunMetadata` protobuf object.
+* <b>`tag`</b>: The tag name for this metadata.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ StepStats.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If the provided tag was already used for this type of event.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_session_log(session_log, global_step=None)` {#SummaryWriter.add_session_log}
+
+Adds a `SessionLog` protocol buffer to the event file.
+
+This method wraps the provided session in an `Event` protocol buffer
+and adds it to the event file.
+
+##### Args:
+
+
+* <b>`session_log`</b>: A `SessionLog` protocol buffer.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` {#SummaryWriter.add_summary}
+
+Adds a `Summary` protocol buffer to the event file.
+
+This method wraps the provided summary in an `Event` protocol buffer
+and adds it to the event file.
+
+You can pass the result of evaluating any summary op, using
+[`Session.run()`](client.md#Session.run) or
+[`Tensor.eval()`](framework.md#Tensor.eval), to this
+function. Alternatively, you can pass a `tf.Summary` protocol
+buffer that you populate with your own data. The latter is
+commonly done to report evaluation results in event files.
+
+##### Args:
+
+
+* <b>`summary`</b>: A `Summary` protocol buffer, optionally serialized as a string.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.close()` {#SummaryWriter.close}
+
+Flushes the event file to disk and close the file.
+
+Call this method when you do not need the summary writer anymore.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.flush()` {#SummaryWriter.flush}
+
+Flushes the event file to disk.
+
+Call this method to make sure that all pending events have been written to
+disk.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.get_logdir()` {#SummaryWriter.get_logdir}
+
+Returns the directory where event file will be written.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.reopen()` {#SummaryWriter.reopen}
+
+Reopens the EventFileWriter.
+
+Can be called after `close()` to add more events in the same directory.
+The events will go into a new events file.
+
+Does nothing if the EventFileWriter was not closed.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
new file mode 100644
index 0000000000..c5830ab550
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
@@ -0,0 +1,37 @@
+### `tf.audio_summary(*args, **kwargs)` {#audio_summary}
+
+Outputs a `Summary` protocol buffer with audio. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.audio. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.
+
+ The summary has up to `max_outputs` summary values containing audio. The
+ audio is built from `tensor` which must be 3-D with shape `[batch_size,
+ frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+ assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
+ `sample_rate`.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+ * If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
+ or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
+ sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
+ signal in hertz.
+ max_outputs: Max number of batch elements to generate audio for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
new file mode 100644
index 0000000000..613f4ebd73
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/summary.md b/tensorflow/g3doc/api_docs/python/summary.md
index 8d344036db..be029f4290 100644
--- a/tensorflow/g3doc/api_docs/python/summary.md
+++ b/tensorflow/g3doc/api_docs/python/summary.md
@@ -487,11 +487,248 @@ metadata is stored in its NodeDef. This method retrieves the description.
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
- - -
@@ -500,9 +737,253 @@ Support the pickle protocol.
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/test.md b/tensorflow/g3doc/api_docs/python/test.md
index 4eaf136b80..cd60df01fd 100644
--- a/tensorflow/g3doc/api_docs/python/test.md
+++ b/tensorflow/g3doc/api_docs/python/test.md
@@ -215,125 +215,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -354,49 +235,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -480,11 +322,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -497,30 +338,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -592,13 +409,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -629,33 +439,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -693,20 +476,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -781,38 +550,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -837,71 +574,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -909,79 +581,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -1002,26 +601,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1073,51 +652,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1140,13 +674,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1206,9 +733,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1262,13 +789,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
@@ -1283,20 +803,6 @@ Return any properties that the user has recorded.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1320,18 +826,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index 0d98427edd..430bcc38da 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -61,58 +61,54 @@ $ sudo easy_install pip
$ sudo easy_install --upgrade six
```
-We have also uploaded the CPU version of the binaries to Pypi, so you can
-simply install on Linux, Mac or Windows with:
+We have also uploaded the binaries to Pypi, so you can
+simply install tensorflow on Linux, Mac or Windows with pip install. Note you will need pip version 8.1 or later for the following commands to work on Linux :
```bash
$ pip install tensorflow
```
-Note that you will need pip version 8.1 or later for the above command to work on Linux.
+For installing the version with GPU support, please use:
-For Windows users, you can also install the GPU version of the binary with:
```bash
$ pip install tensorflow-gpu
```
-Unfortunately, this command is not yet available for Linux or Mac GPU binaries
-due to their sizes exceeding the Pypi limit.
-If the above commands do not work on your system or you want to install the GPU version
-of the binary on Linux or Mac, you can follow these instructions:
+If the above commands do not work on your system, you can follow these instructions:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
```
Install TensorFlow:
@@ -154,14 +150,14 @@ Both distributions include pip. To install the CPU-only version of
TensorFlow, enter the following command at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.0rc1-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.0-cp35-cp35m-win_amd64.whl
```
To install the GPU version of TensorFlow, enter the following command
at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-win_amd64.whl
```
You can now [test your installation](#test-the-tensorflow-installation).
@@ -216,37 +212,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -368,37 +364,37 @@ select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0rc1-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0rc1-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0rc1-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -466,7 +462,7 @@ code.
code.
We also have tags with `latest` replaced by a released version (e.g.,
-`0.12.0-rc1-gpu`).
+`0.12.0-gpu`).
With Docker the installation is as follows:
@@ -651,6 +647,12 @@ sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda/lib64/
sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
```
+##### Install other dependencies
+
+```bash
+$ sudo apt-get install libcupti-dev
+```
+
#### Optional: Install OpenCL (Experimental, Linux only)
In order to build or run TensorFlow with OpenCL support, both OpenCL (>= 1.2)
@@ -755,7 +757,7 @@ export PATH="$CUDA_HOME/bin:$PATH"
```
Finally, you will also want to install the [CUDA Deep Neural
-Network](https://developer.nvidia.com/cudnn) (cuDNN v5) library which currently
+Network](https://developer.nvidia.com/cudnn) (cuDNN v5.1) library which currently
requires an [Accelerated Computing Developer
Program](https://developer.nvidia.com/accelerated-computing-developer) account.
Once you have it downloaded locally, you can unzip and move the header and
@@ -868,7 +870,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.0rc1-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.0-py2-none-any.whl
```
## Optimizing CPU performance
@@ -1112,6 +1114,17 @@ Step #1 might already solve the problem, however if it still persists, execute s
This issue occurs with new Anaconda installations when `pip` tries to remove `easy-install.pth`.
This file is not included in Anaconda packages, which causes the `pip` installation to fail.
+#### Cupti_wrapper.cc: Could not find cuptiActivityRegisterCallbacksin libcupti DSO
+
+If, when running a TensorFlow Python script, you encounter the following error:
+```
+c:\tf_jenkins\home\workspace\nightly-win\device\gpu\os\windows\tensorflow\core\platform\default\gpu\cupti_wrapper.cc:59] Check failed: ::tensorflow::Status::OK() == (::tensorflow::Env::Default()->GetSymbolFromLibrary( GetDsoHandle(), kName, &f )) (OK vs. Not found: cuptiActivityRegisterCallbacks not found)could not find cuptiActivityRegisterCallbacksin libcupti DSO
+```
+
+Add `<path-to-cuda-folder>\NVIDIA GPU Computing Toolkit\CUDA\v8.0\extras\CUPTI\libx64` to your `PATH`.
+
+This issue occurs because on CUDA 8.0 the location of the file `cupti64_80.dll` is not on `PATH` by default.
+
### Linux issues
diff --git a/tensorflow/g3doc/how_tos/embedding_viz/index.md b/tensorflow/g3doc/how_tos/embedding_viz/index.md
index 4d84280c31..529efb247f 100644
--- a/tensorflow/g3doc/how_tos/embedding_viz/index.md
+++ b/tensorflow/g3doc/how_tos/embedding_viz/index.md
@@ -166,7 +166,7 @@ to find it:
```python
embedding.sprite.image_path = PATH_TO_SPRITE_IMAGE
# Specify the width and height of a single thumbnail.
-embedding.single_image_dim.extend([w, h])
+embedding.sprite.single_image_dim.extend([w, h])
```
## Interaction
diff --git a/tensorflow/g3doc/how_tos/hadoop/index.md b/tensorflow/g3doc/how_tos/hadoop/index.md
index a2dd67babd..2f01843604 100644
--- a/tensorflow/g3doc/how_tos/hadoop/index.md
+++ b/tensorflow/g3doc/how_tos/hadoop/index.md
@@ -28,26 +28,38 @@ be set:
* **HADOOP_HDFS_HOME**: The location of your HDFS installation. You can also
set this environment variable by running:
-```shell
-source ${HADOOP_HOME}/libexec/hadoop-config.sh
-```
+ ```shell
+ source ${HADOOP_HOME}/libexec/hadoop-config.sh
+ ```
* **LD_LIBRARY_PATH**: To include the path to libjvm.so, and optionally the path
to libhdfs.so if your Hadoop distribution does not install libhdfs.so in
`$HADOOP_HDFS_HOME/lib/native`. On Linux:
-```shell
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${JAVA_HOME}/jre/lib/amd64/server
-```
+ ```shell
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${JAVA_HOME}/jre/lib/amd64/server
+ ```
* **CLASSPATH**: The Hadoop jars must be added prior to running your
TensorFlow program. The CLASSPATH set by
`${HADOOP_HOME}/libexec/hadoop-config.sh` is insufficient. Globs must be
expanded as described in the libhdfs documentation:
-```shell
-CLASSPATH=$($HADOOP_HDFS_HOME}/bin/hadoop classpath --glob) python your_script.py
-```
+ ```shell
+ CLASSPATH=$($HADOOP_HDFS_HOME}/bin/hadoop classpath --glob) python your_script.py
+ ```
+ For older version of Hadoop/libhdfs (older than 2.6.0), you have to expand the
+ classpath wildcard manually. For more details, see
+ [HADOOP-10903](https://issues.apache.org/jira/browse/HADOOP-10903).
+
+If the Hadoop cluster is in secure mode, the following environment variable must
+be set:
+
+* **KERB_TICKET_CACHE_PATH**: The path of Kerberos ticket cache file. For example:
+
+ ```shell
+ export KERB_TICKET_CACHE_PATH=/tmp/krb5cc_10002
+ ```
If you are running [Distributed TensorFlow](../distributed/index.md), then all
workers must have the environment variables set and Hadoop installed.
diff --git a/tensorflow/g3doc/resources/index.md b/tensorflow/g3doc/resources/index.md
index 8c5ecd1cea..f4f78cc857 100644
--- a/tensorflow/g3doc/resources/index.md
+++ b/tensorflow/g3doc/resources/index.md
@@ -43,6 +43,7 @@ something amazing with TensorFlow, we'd like to hear about it!
The TensorFlow community has created many great projects around TensorFlow, including:
+* [Machine Learning with TensorFlow (Book & Code)](http://tensorflowbook.com)
* [@jtoy's awesome "Awesome TensorFlow" list of awesome things](https://github.com/jtoy/awesome-tensorflow)
* [TensorFlow tutorials](https://github.com/pkmital/tensorflow_tutorials)
* [Scikit Flow - Simplified Interface for TensorFlow](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/learn/python/learn)
diff --git a/tensorflow/g3doc/tutorials/image_recognition/index.md b/tensorflow/g3doc/tutorials/image_recognition/index.md
index 1d20d7ddb3..d4fa5ba780 100644
--- a/tensorflow/g3doc/tutorials/image_recognition/index.md
+++ b/tensorflow/g3doc/tutorials/image_recognition/index.md
@@ -42,8 +42,8 @@ For example, here are the results from [AlexNet] classifying some images:
To compare models, we examine how often the model fails to predict the
correct answer as one of their top 5 guesses -- termed "top-5 error rate".
[AlexNet] achieved by setting a top-5 error rate of 15.3% on the 2012
-validation data set; [BN-Inception-v2] achieved 6.66%;
-[Inception-v3] reaches 3.46%.
+validation data set; [Inception (GoogLeNet)] achieved 6.67%;
+[BN-Inception-v2] achieved 4.9%; [Inception-v3] reaches 3.46%.
> How well do humans do on ImageNet Challenge? There's a [blog post] by
Andrej Karpathy who attempted to measure his own performance. He reached
diff --git a/tensorflow/g3doc/tutorials/recurrent/index.md b/tensorflow/g3doc/tutorials/recurrent/index.md
index 687638068c..993b1c345b 100644
--- a/tensorflow/g3doc/tutorials/recurrent/index.md
+++ b/tensorflow/g3doc/tutorials/recurrent/index.md
@@ -11,7 +11,7 @@ In this tutorial we will show how to train a recurrent neural network on
a challenging task of language modeling. The goal of the problem is to fit a
probabilistic model which assigns probabilities to sentences. It does so by
predicting next words in a text given a history of previous words. For this
-purpose we will use the [Penn Tree Bank](http://www.cis.upenn.edu/~treebank/)
+purpose we will use the [Penn Tree Bank](https://catalog.ldc.upenn.edu/ldc99t42)
(PTB) dataset, which is a popular benchmark for measuring quality of these
models, whilst being small and relatively fast to train.
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Session.java b/tensorflow/java/src/main/java/org/tensorflow/Session.java
index ca9f96b1a8..88d7be385b 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Session.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Session.java
@@ -225,7 +225,7 @@ public final class Session implements AutoCloseable {
public Reference() {
synchronized (nativeHandleLock) {
if (nativeHandle == 0) {
- throw new IllegalStateException("run() called after the Session was close()d");
+ throw new IllegalStateException("run() cannot be called on the Session after close()");
}
++numActiveRuns;
}
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
index 54b26057e2..ec681d613f 100644
--- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -255,7 +255,7 @@ class ControlFlowTest(test.TestCase):
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
@@ -289,7 +289,7 @@ class ControlFlowTest(test.TestCase):
add_i = math_ops.add(switch_i[1], enter_one)
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
@@ -567,7 +567,7 @@ class ControlFlowTest(test.TestCase):
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
@@ -1341,12 +1341,15 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
+ gpu_dev_name = test.gpu_device_name() if test.is_gpu_available() else "/gpu:0"
+ gpu_short_name = gpu_dev_name.split('/')[-1]
+
with self.test_session(graph=ops.Graph()) as sess:
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
- with ops.device("/gpu:0"):
+ with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
@@ -1360,12 +1363,12 @@ class ControlFlowTest(test.TestCase):
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
- self.assertTrue("gpu:0" in dev)
+ self.assertTrue(gpu_short_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
- self.assertTrue("gpu:0" in dev)
+ self.assertTrue(gpu_short_name in dev)
else:
- self.assertFalse("gpu:0" in dev)
+ self.assertFalse(gpu_short_name in dev)
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
@@ -2566,7 +2569,7 @@ class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
index 35e0a39840..65fa69bb65 100644
--- a/tensorflow/python/kernel_tests/conv_ops_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -181,8 +181,8 @@ def GetTestConfigs():
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
- if test.is_gpu_available():
- # "NCHW" format is not currently supported on CPU.
+ if test.is_gpu_available(cuda_only=True):
+ # "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
diff --git a/tensorflow/python/kernel_tests/decode_image_op_test.py b/tensorflow/python/kernel_tests/decode_image_op_test.py
index 52f48c3368..6d19e1963a 100644
--- a/tensorflow/python/kernel_tests/decode_image_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_image_op_test.py
@@ -28,7 +28,7 @@ from tensorflow.python.ops import io_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
-prefix_path = "tensorflow/core/lib"
+prefix_path = os.path.join("tensorflow", "core", "lib")
class DecodeImageOpTest(test.TestCase):
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index eb51d8023e..7f5d43e33d 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -598,8 +598,8 @@ class PoolingTest(test.TestCase):
self.assertAllClose(cpu_val, gpu_val)
def testMaxPoolingWithArgmax(self):
- # MaxPoolWithArgMax is implemented only on GPU.
- if not test.is_gpu_available():
+ # MaxPoolWithArgMax is implemented only on CUDA.
+ if not test.is_gpu_available(cuda_only=True):
return
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
@@ -617,8 +617,8 @@ class PoolingTest(test.TestCase):
self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
def testMaxPoolingGradWithArgmax(self):
- # MaxPoolWithArgMax is implemented only on GPU.
- if not test.is_gpu_available():
+ # MaxPoolWithArgMax is implemented only on CUDA.
+ if not test.is_gpu_available(cuda_only=True):
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
@@ -1250,8 +1250,8 @@ class PoolingTest(test.TestCase):
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
- # MaxPoolWithArgMax is implemented only on GPU.
- if not test.is_gpu_available():
+ # MaxPoolWithArgMax is implemented only on CUDA.
+ if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
@@ -1261,8 +1261,8 @@ def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
- # MaxPoolWithArgMax is implemented only on GPU.
- if not test.is_gpu_available():
+ # MaxPoolWithArgMax is implemented only on CUDA.
+ if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
padding)
diff --git a/tensorflow/python/kernel_tests/session_ops_test.py b/tensorflow/python/kernel_tests/session_ops_test.py
index 5ce9c76818..25d60c5259 100644
--- a/tensorflow/python/kernel_tests/session_ops_test.py
+++ b/tensorflow/python/kernel_tests/session_ops_test.py
@@ -138,7 +138,7 @@ class SessionOpsTest(test.TestCase):
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = sess.run(h)
@@ -169,7 +169,7 @@ class SessionOpsTest(test.TestCase):
def testMultiDevices(self):
with self.test_session() as sess:
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = sess.run(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
@@ -194,7 +194,7 @@ class SessionOpsTest(test.TestCase):
x_handle = sess.run(session_ops.get_session_handle(one))
# addition lives on GPU
- with ops.device("/gpu:0"):
+ with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py
index 5f8a3f3ab2..8ea2d7ecda 100644
--- a/tensorflow/python/kernel_tests/split_op_test.py
+++ b/tensorflow/python/kernel_tests/split_op_test.py
@@ -128,6 +128,14 @@ class SplitOpTest(test.TestCase):
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
+ def testOutputShape(self):
+ with self.test_session(use_gpu=False):
+ tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
+ size_splits = [3, 7, 2]
+ outputs = array_ops.split(tensor, size_splits, 1)
+ for i, output in enumerate(outputs):
+ self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
+
def _compare(self, x, dim, num, use_gpu):
np_ans = np.split(x, num, dim)
with self.test_session(use_gpu=use_gpu) as sess:
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index dcfe8f342a..73ae4c1096 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -1176,8 +1176,8 @@ def boolean_mask(tensor, mask, name="boolean_mask"):
name: A name for this operation (optional).
Returns:
- Tensor populated by entries in `tensor` corresponding to `True` values in
- `mask`.
+ (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
+ to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
diff --git a/tensorflow/python/ops/losses/losses.py b/tensorflow/python/ops/losses/losses.py
index a2f5d1d375..9da1def981 100644
--- a/tensorflow/python/ops/losses/losses.py
+++ b/tensorflow/python/ops/losses/losses.py
@@ -603,7 +603,7 @@ def softmax_cross_entropy(
loss_collection: collection to which the loss will be added.
Returns:
- A scalar `Tensor` representing the loss value.
+ A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
@@ -652,7 +652,7 @@ def sparse_softmax_cross_entropy(labels, logits, weights=1.0, scope=None,
loss_collection: collection to which the loss will be added.
Returns:
- A scalar `Tensor` representing the loss value.
+ A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index 20b9e438a1..0dafb5d78b 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -1673,13 +1673,15 @@ def matmul(a,
# 3-D tensor `a`
- a = tf.constant(np.arange(1,13), shape=[2, 2, 3]) => [[[ 1. 2. 3.]
+ a = tf.constant(np.arange(1, 13, dtype=np.int32),
+ shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
- b = tf.constant(np.arange(13,25), shape=[2, 3, 2]) => [[[13. 14.]
+ b = tf.constant(np.arange(13, 25, dtype=np.int32),
+ shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
diff --git a/tensorflow/python/ops/nn.py b/tensorflow/python/ops/nn.py
index 692b03f083..00626eba03 100644
--- a/tensorflow/python/ops/nn.py
+++ b/tensorflow/python/ops/nn.py
@@ -68,12 +68,15 @@ For the `'SAME'` padding, the output height and width are computed as:
and the padding on the top and left are computed as:
- pad_along_height = ((out_height - 1) * strides[1] +
- filter_height - in_height)
- pad_along_width = ((out_width - 1) * strides[2] +
- filter_width - in_width)
- pad_top = pad_along_height / 2
- pad_left = pad_along_width / 2
+ pad_along_height = max((out_height - 1) * strides[1] +
+ filter_height - in_height, 0)
+ pad_along_width = max((out_width - 1) * strides[2] +
+ filter_width - in_width, 0)
+ pad_top = pad_along_height // 2
+ pad_bottom = pad_along_height - pad_top
+ pad_left = pad_along_width // 2
+ pad_right = pad_along_width - pad_left
+
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
diff --git a/tensorflow/python/ops/rnn_cell_impl.py b/tensorflow/python/ops/rnn_cell_impl.py
index 1075de9aee..e61705751e 100644
--- a/tensorflow/python/ops/rnn_cell_impl.py
+++ b/tensorflow/python/ops/rnn_cell_impl.py
@@ -138,4 +138,3 @@ class _RNNCell(object):
zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None]))
return zeros
-
diff --git a/tensorflow/python/platform/test.py b/tensorflow/python/platform/test.py
index 1bd91dba09..b6b06f9eb9 100644
--- a/tensorflow/python/platform/test.py
+++ b/tensorflow/python/platform/test.py
@@ -48,6 +48,7 @@ methods. We will document these methods soon.
@@get_temp_dir
@@is_built_with_cuda
@@is_gpu_available
+@@gpu_device_name
## Gradient checking
@@ -141,7 +142,7 @@ def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in _device_lib.list_local_devices():
if x.device_type == 'GPU' or x.device_type == 'SYCL':
- return x.name()
+ return x.name
return ''
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index 0afc1ba70f..0a14af04de 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -413,20 +413,17 @@ class SaverTest(test.TestCase):
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
- with sess.graph.device("/gpu:0"):
+ with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
- with sess.graph.device("/gpu:0"):
+ with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
- self.assertAllClose(543.21, v0_2.eval())
- save.restore(sess, save_path)
- self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
diff --git a/tensorflow/tools/ci_build/builds/test_tutorials.sh b/tensorflow/tools/ci_build/builds/test_tutorials.sh
index dc89919d73..67e5af5564 100755
--- a/tensorflow/tools/ci_build/builds/test_tutorials.sh
+++ b/tensorflow/tools/ci_build/builds/test_tutorials.sh
@@ -230,7 +230,9 @@ test_ptb_word_lm() {
PTB_DATA_URL="http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz"
DATA_DIR="${TUT_TEST_DATA_DIR}/ptb"
- if [[ ! -d "${DATA_DIR}/simple-examples/data" ]]; then
+ if [[ ! -f "${DATA_DIR}/simple-examples/data/ptb.train.txt" ]] || \
+ [[ ! -f "${DATA_DIR}/simple-examples/data/ptb.valid.txt" ]] || \
+ [[ ! -f "${DATA_DIR}/simple-examples/data/ptb.test.txt" ]]; then
# Download and extract data
echo "Downloading and extracting PTB data from \"${PTB_DATA_URL}\" to "\
"${DATA_DIR}"
diff --git a/tensorflow/tools/ci_build/protobuf/protobuf_optimized_pip.sh b/tensorflow/tools/ci_build/protobuf/protobuf_optimized_pip.sh
index 48f3dcdf7a..078aed7154 100755
--- a/tensorflow/tools/ci_build/protobuf/protobuf_optimized_pip.sh
+++ b/tensorflow/tools/ci_build/protobuf/protobuf_optimized_pip.sh
@@ -15,6 +15,7 @@
# ==============================================================================
PROTOBUF_VERSION="3.1.0"
+PYTHON_BIN=${PYTHON_BIN:-python}
DIR=${PWD}/protobuf
set -ex
@@ -29,4 +30,4 @@ CXXFLAGS="-fPIC -g -O2" ./configure
make -j8
export PROTOC=$DIR/src/protoc
cd python
-python setup.py bdist_wheel --cpp_implementation --compile_static_extension
+$PYTHON_BIN setup.py bdist_wheel --cpp_implementation --compile_static_extension
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index e6cfd4a9d7..709b7c9c08 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '0.12.0-rc1'
+_VERSION = '0.12.0'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index c1ac7e1ac3..0d80f1824a 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -17,11 +17,11 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
native.new_http_archive(
name = "eigen_archive",
urls = [
- "https://bitbucket.org/eigen/eigen/get/c362228c7087.tar.gz",
- "http://bazel-mirror.storage.googleapis.com/bitbucket.org/eigen/eigen/get/c362228c7087.tar.gz",
+ "http://bazel-mirror.storage.googleapis.com/bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
+ "https://bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
],
- sha256 = "ea091e0a1d78b0c6714f7109a6a80af30c4745f3ff8c398675cf42271acc0c77",
- strip_prefix = "eigen-eigen-c362228c7087",
+ sha256 = "7527cda827aff351981ebd910012e16be4d899c28a9ae7f143ae60e7f3f7b83d",
+ strip_prefix = "eigen-eigen-60578b474802",
build_file = str(Label("//:eigen.BUILD")),
)
diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint b/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
index 9d6b9c3f01..8e55a1f3e8 100644
--- a/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
+++ b/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint
@@ -31,8 +31,15 @@
#include "src/FixedPoint/FixedPointTypes.h"
// Use optimized implementations whenever available
-#ifdef EIGEN_VECTORIZE_AVX2
+#ifdef EIGEN_VECTORIZE_AVX512
+#include "src/Tensor/TensorContractionThreadPool.h"
+#include "src/FixedPoint/PacketMathAVX512.h"
+#include "src/FixedPoint/TypeCastingAVX512.h"
+
+#elif defined EIGEN_VECTORIZE_AVX2
#define EIGEN_USE_OPTIMIZED_INT8_UINT8_MAT_MAT_PRODUCT
+#define EIGEN_USE_OPTIMIZED_INT16_INT16_MAT_MAT_PRODUCT
+#include "src/Tensor/TensorContractionThreadPool.h"
#include "src/FixedPoint/PacketMathAVX2.h"
#include "src/FixedPoint/MatMatProductAVX2.h"
#include "src/FixedPoint/TypeCastingAVX2.h"
diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX2.h b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX2.h
index e71c2d8aea..98deb1742e 100644
--- a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX2.h
+++ b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX2.h
@@ -46,6 +46,7 @@ typedef struct Packet4q32i {
Packet4q32i(__m128i val) : val(val) {}
} Packet4q32i;
+#ifndef EIGEN_VECTORIZE_AVX512
template <>
struct packet_traits<QInt8> : default_packet_traits {
typedef Packet32q8i type;
@@ -112,6 +113,7 @@ struct packet_traits<QInt32> : default_packet_traits {
HasSetLinear = 0
};
};
+#endif
template <>
struct unpacket_traits<Packet32q8i> {
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index 1db24435e9..02b8796f33 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -192,8 +192,8 @@ def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value):
environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
if environ_version and version != environ_version:
auto_configure_fail(
- "CUDA version detected from nvcc (%s) does not match " +
- "TF_CUDA_VERSION (%s)" % (version, environ_version))
+ ("CUDA version detected from nvcc (%s) does not match " +
+ "TF_CUDA_VERSION (%s)") % (version, environ_version))
if cpu_value == "Windows":
version = "64_" + version.replace(".", "")
diff --git a/third_party/sycl/crosstool/BUILD.tpl b/third_party/sycl/crosstool/BUILD.tpl
index f539a376c8..cd8df93cd6 100755
--- a/third_party/sycl/crosstool/BUILD.tpl
+++ b/third_party/sycl/crosstool/BUILD.tpl
@@ -20,7 +20,7 @@ cc_toolchain(
objcopy_files = ":empty",
static_runtime_libs = [":empty"],
strip_files = ":empty",
- supports_param_files = 0,
+ supports_param_files = 1,
)
filegroup(
diff --git a/third_party/sycl/crosstool/CROSSTOOL.tpl b/third_party/sycl/crosstool/CROSSTOOL.tpl
index d767b8ca4a..19b6f3ae32 100755
--- a/third_party/sycl/crosstool/CROSSTOOL.tpl
+++ b/third_party/sycl/crosstool/CROSSTOOL.tpl
@@ -34,6 +34,7 @@ toolchain {
# Use "-std=c++11" for nvcc. For consistency, force both the host compiler
# and the device compiler to use "-std=c++11".
cxx_flag: "-std=c++11"
+ linker_flag: "-Wl,-no-as-needed"
linker_flag: "-lstdc++"
linker_flag: "-B/usr/bin/"
@@ -100,4 +101,3 @@ toolchain {
compiler_flag: "-DNDEBUG"
}
}
-
diff --git a/third_party/sycl/crosstool/computecpp.tpl b/third_party/sycl/crosstool/computecpp.tpl
index e90d51bf87..532d7779f9 100755
--- a/third_party/sycl/crosstool/computecpp.tpl
+++ b/third_party/sycl/crosstool/computecpp.tpl
@@ -45,7 +45,7 @@ def main():
# strip asan for the device
computecpp_device_compiler_flags = [flag for flag in compiler_flags if not flag.startswith(('-fsanitize'))]
computecpp_device_compiler_flags = ['-sycl-compress-name', '-DTENSORFLOW_USE_SYCL', '-Wno-unused-variable', '-I', COMPUTECPP_INCLUDE, '-isystem',
- COMPUTECPP_INCLUDE, '-std=c++11', '-sycl', '-emit-llvm', '-no-serial-memop'] + computecpp_device_compiler_flags
+ COMPUTECPP_INCLUDE, '-std=c++11', '-sycl', '-emit-llvm', '-no-serial-memop', '-Xclang', '-cl-denorms-are-zero', '-Xclang', '-cl-fp32-correctly-rounded-divide-sqrt'] + computecpp_device_compiler_flags
x = subprocess.call([COMPUTECPP_DRIVER] + computecpp_device_compiler_flags )
if(x == 0):