aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--README.md6
-rw-r--r--RELEASE.md30
-rwxr-xr-xconfigure13
-rw-r--r--tensorflow/contrib/framework/python/framework/tensor_util.py16
-rw-r--r--tensorflow/contrib/ios_examples/simple/RunModelViewController.mm2
-rw-r--r--tensorflow/contrib/learn/python/learn/io/graph_io_test.py8
-rw-r--r--tensorflow/contrib/makefile/Makefile2
-rwxr-xr-xtensorflow/contrib/makefile/build_all_ios.sh12
-rwxr-xr-xtensorflow/contrib/makefile/compile_ios_protobuf.sh21
-rwxr-xr-xtensorflow/contrib/makefile/compile_ios_tensorflow.sh20
-rw-r--r--tensorflow/contrib/makefile/tf_cc_files.txt2
-rw-r--r--tensorflow/contrib/session_bundle/example/BUILD30
-rw-r--r--tensorflow/core/framework/op_kernel.cc7
-rw-r--r--tensorflow/core/framework/op_kernel.h4
-rw-r--r--tensorflow/core/public/version.h2
-rwxr-xr-xtensorflow/examples/skflow/examples_test.sh2
-rw-r--r--tensorflow/examples/skflow/iris_run_config.py2
-rw-r--r--tensorflow/examples/skflow/text_classification_character_cnn.py2
-rw-r--r--tensorflow/examples/skflow/text_classification_cnn.py2
-rw-r--r--tensorflow/examples/udacity/2_fullyconnected.ipynb2
-rw-r--r--tensorflow/g3doc/api_docs/python/contrib.framework.md15
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.ctc_loss.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.pad_to_bounding_box.md3
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.resize_image_with_crop_or_pad.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.crop_to_bounding_box.md3
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.framework.is_tensor.md15
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.tanh.md8
-rw-r--r--tensorflow/g3doc/api_docs/python/image.md8
-rw-r--r--tensorflow/g3doc/api_docs/python/nn.md10
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md180
-rw-r--r--tensorflow/g3doc/resources/roadmap.md14
-rw-r--r--tensorflow/models/embedding/word2vec_test.py4
-rw-r--r--tensorflow/python/kernel_tests/cwise_ops_test.py6
-rw-r--r--tensorflow/python/kernel_tests/rnn_test.py109
-rw-r--r--tensorflow/python/ops/ctc_ops.py2
-rw-r--r--tensorflow/python/ops/data_flow_ops.py2
-rw-r--r--tensorflow/python/ops/image_ops.py236
-rw-r--r--tensorflow/python/ops/image_ops_test.py629
-rw-r--r--tensorflow/python/ops/math_ops.py15
-rw-r--r--tensorflow/python/ops/nn.py2
-rw-r--r--tensorflow/python/ops/nn_conv_test.py189
-rw-r--r--tensorflow/python/ops/nn_grad.py28
-rw-r--r--tensorflow/python/ops/nn_ops.py67
-rw-r--r--tensorflow/python/ops/rnn.py119
-rw-r--r--tensorflow/python/training/saver_test.py2
-rw-r--r--tensorflow/tensorboard/README.md10
-rwxr-xr-xtensorflow/tools/ci_build/builds/test_installation.sh1
-rw-r--r--tensorflow/tools/dist_test/Dockerfile2
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile2
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile.test2
-rw-r--r--tensorflow/tools/docker/Dockerfile2
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu2
-rw-r--r--tensorflow/tools/docker/README.md8
-rwxr-xr-xtensorflow/tools/docker/docker_run_gpu.sh29
-rwxr-xr-x[-rw-r--r--]tensorflow/tools/docker/parameterized_docker_build.sh3
-rw-r--r--tensorflow/tools/pip_package/BUILD6
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--tensorflow/tools/proto_text/BUILD7
-rw-r--r--tensorflow/workspace.bzl4
-rwxr-xr-xthird_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc6
60 files changed, 1491 insertions, 450 deletions
diff --git a/README.md b/README.md
index 2327f8e92c..ae8fe81370 100644
--- a/README.md
+++ b/README.md
@@ -33,9 +33,9 @@ and discussion.**
People who are a little bit adventurous can also try our nightly binaries:
-* Linux CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Linux CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
* [Android](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/RELEASE.md b/RELEASE.md
index ea368e037a..3843d543e9 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,8 +1,12 @@
# Changes Since Last Release
+## Features and Improvements
+* Connectionist Temporal Classification ops are now "official" (see, e.g.,
+ `tf.nn.ctc_loss`)
+
## Breaking Changes to the API
-* env.h replaces use of New*File() functions to use std::unique_ptr return
- arguments, removing the old raw pointer returns.
+* `env.h` replaces use of `New*File()` functions to use `std::unique_ptr`
+ return arguments, removing the old raw pointer returns.
# Release 0.9.0
@@ -52,28 +56,6 @@ Aaron Schumacher, Aidan Dang, Akihiko ITOH, Aki Sukegawa, Arbit Chen, Aziz Alto,
We are also grateful to all who filed issues or helped resolve them, asked and
answered questions, and were part of inspiring discussions.
-
-## Features & Improvements
-* Connectionist Temporal Classification ops are now "official" (see, e.g.,
- `tf.nn.ctc_loss`)
-* The RNN api is finally "official" (see, e.g., `tf.nn.dynamic_rnn`,
- `tf.nn.rnn`, and the classes in `tf.nn.rnn_cell`).
-* TensorBoard now has an Audio Dashboard, with associated audio summaries.
-* TensorBoard now has a reload button, and supports auto-reloading
-* TensorBoard scalar charts now show tooltips with more information
-* TensorBoard now supports run filtering
-* TensorBoard has color changes: the same run always gets the same hue
-* Tensorboard graph visualizer now supports run metadata. Clicking on nodes
- while viewing a stats for a particular run will show runtime statistics, such
- as memory or compute usage. Unused nodes will be faded out.
-
-## Bug Fixes and Other Changes
-* TensorBoard now displays graphs with only one data point
-* TensorBoard now visually displays NaN values
-* `tf.nn.moments()` now accepts a `shift` argument. Shifting by a good estimate
- of the mean improves numerical stability. Also changes the behavior of the
- `shift` argument to `tf.nn.sufficient_statistics()`.
-
# Release 0.8.0
## Major Features and Improvements
diff --git a/configure b/configure
index 98048ba91d..20bbf123ab 100755
--- a/configure
+++ b/configure
@@ -174,6 +174,19 @@ while true; do
if [[ -z "$TF_CUDNN_VERSION" ]]; then
TF_CUDNN_EXT=""
+ # Resolve to the SONAME of the symlink. Use readlink without -f
+ # to resolve exactly once to the SONAME. E.g, libcudnn.so ->
+ # libcudnn.so.4
+ REALVAL=`readlink ${CUDNN_INSTALL_PATH}/lib64/libcudnn.so`
+
+ # Extract the version of the SONAME, if it was indeed symlinked to
+ # the SONAME version of the file.
+ if [[ "$REALVAL" =~ .so[.]+([0-9]*) ]];
+ then
+ TF_CUDNN_EXT="."${BASH_REMATCH[1]}
+ TF_CUDNN_VERSION=${BASH_REMATCH[1]}
+ echo "libcudnn.so resolves to libcudnn${TF_CUDNN_EXT}"
+ fi
else
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
fi
diff --git a/tensorflow/contrib/framework/python/framework/tensor_util.py b/tensorflow/contrib/framework/python/framework/tensor_util.py
index 466ff38aa0..d46e00cf96 100644
--- a/tensorflow/contrib/framework/python/framework/tensor_util.py
+++ b/tensorflow/contrib/framework/python/framework/tensor_util.py
@@ -216,9 +216,19 @@ def with_same_shape(expected_tensor, tensor):
return with_shape(expected_shape, tensor)
-def is_tensor(t):
- """Check if `t` is a tensor: `Tensor`, `SparseTensor`, or `Variable`."""
- return isinstance(t, (ops.Tensor, ops.SparseTensor, variables.Variable))
+def is_tensor(x):
+ """Check for tensor types.
+ Check whether an object is a tensor. Equivalent to
+ `isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
+
+ Args:
+ x: An python object to check.
+
+ Returns:
+ `True` if `x` is a tensor, `False` if not.
+ """
+ tensor_types = (ops.Tensor, ops.SparseTensor, variables.Variable)
+ return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
diff --git a/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm b/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm
index 2e389b39d4..2ebb197399 100644
--- a/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm
+++ b/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm
@@ -25,6 +25,7 @@
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include "google/protobuf/message_lite.h"
+#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/env.h"
@@ -218,6 +219,7 @@ NSString* RunInferenceOnImage() {
{output_layer}, {}, &outputs);
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
+ tensorflow::LogAllRegisteredKernels();
result = @"Error running model";
return result;
}
diff --git a/tensorflow/contrib/learn/python/learn/io/graph_io_test.py b/tensorflow/contrib/learn/python/learn/io/graph_io_test.py
index a991344b91..334ef425ef 100644
--- a/tensorflow/contrib/learn/python/learn/io/graph_io_test.py
+++ b/tensorflow/contrib/learn/python/learn/io/graph_io_test.py
@@ -124,8 +124,8 @@ class GraphIOTest(tf.test.TestCase):
_VALID_FILE_PATTERN, batch_size, features, randomize_input=False,
queue_capacity=queue_capacity, reader_num_threads=2,
parser_num_threads=2, name=name)
- self.assertEquals("%s/parse_example_batch_join:1" % name,
- features["feature"].name)
+ self.assertEqual("%s/parse_example_batch_join:1" % name,
+ features["feature"].name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/fifo_queue" % name
@@ -153,7 +153,7 @@ class GraphIOTest(tf.test.TestCase):
reader=tf.TFRecordReader, randomize_input=True,
num_epochs=1,
queue_capacity=queue_capacity, name=name)
- self.assertEquals("%s:1" % name, inputs.name)
+ self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_name_queue_limit_name = (
"%s/limit_epochs/epochs" % file_name_queue_name)
@@ -182,7 +182,7 @@ class GraphIOTest(tf.test.TestCase):
_VALID_FILE_PATTERN, batch_size,
reader=tf.TFRecordReader, randomize_input=True,
queue_capacity=queue_capacity, name=name)
- self.assertEquals("%s:1" % name, inputs.name)
+ self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
diff --git a/tensorflow/contrib/makefile/Makefile b/tensorflow/contrib/makefile/Makefile
index c9b4641afa..6bb1655186 100644
--- a/tensorflow/contrib/makefile/Makefile
+++ b/tensorflow/contrib/makefile/Makefile
@@ -400,7 +400,7 @@ $(BENCHMARK_NAME): $(BENCHMARK_OBJS) $(LIB_PATH)
$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
# Matches on the normal hand-written TensorFlow C++ source files.
-$(OBJDIR)%.o: %.cc
+$(OBJDIR)%.o: %.cc | $(PBT_GEN_FILES)
@mkdir -p $(dir $@)
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
diff --git a/tensorflow/contrib/makefile/build_all_ios.sh b/tensorflow/contrib/makefile/build_all_ios.sh
index 39d859aad6..6b6ed389fc 100755
--- a/tensorflow/contrib/makefile/build_all_ios.sh
+++ b/tensorflow/contrib/makefile/build_all_ios.sh
@@ -27,6 +27,14 @@ fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd ${SCRIPT_DIR}/../../../
+# You can set the parallelism of the make process with the first argument, with
+# a default of four if nothing is supplied.
+if [ "$#" -gt 1 ]; then
+ JOBS_COUNT=$1
+else
+ JOBS_COUNT=4
+fi
+
# Remove any old files first.
make -f tensorflow/contrib/makefile/Makefile clean
rm -rf tensorflow/contrib/makefile/downloads
@@ -35,10 +43,10 @@ rm -rf tensorflow/contrib/makefile/downloads
tensorflow/contrib/makefile/download_dependencies.sh
# Compile protobuf for the target iOS device architectures.
-tensorflow/contrib/makefile/compile_ios_protobuf.sh
+tensorflow/contrib/makefile/compile_ios_protobuf.sh ${JOBS_COUNT}
# Build the iOS TensorFlow libraries.
-tensorflow/contrib/makefile/compile_ios_tensorflow.sh
+tensorflow/contrib/makefile/compile_ios_tensorflow.sh "-O3" -j ${JOBS_COUNT}
# Creates a static universal library in
# tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a
diff --git a/tensorflow/contrib/makefile/compile_ios_protobuf.sh b/tensorflow/contrib/makefile/compile_ios_protobuf.sh
index 9a452023b7..d2b1d4e846 100755
--- a/tensorflow/contrib/makefile/compile_ios_protobuf.sh
+++ b/tensorflow/contrib/makefile/compile_ios_protobuf.sh
@@ -1,4 +1,4 @@
-#!/bin/bash -x
+#!/bin/bash -x -e
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,6 +15,9 @@
# ==============================================================================
# Builds protobuf 3 for iOS.
+SCRIPT_DIR=$(dirname $0)
+source "${SCRIPT_DIR}/build_helper.subr"
+
cd tensorflow/contrib/makefile
HOST_GENDIR="$(pwd)/gen/protobuf-host"
@@ -25,6 +28,12 @@ if [[ ! -f "./downloads/protobuf/autogen.sh" ]]; then
exit 1
fi
+if [ "$#" -gt 1 ]; then
+ JOBS_COUNT=$1
+else
+ JOBS_COUNT=4
+fi
+
GENDIR=`pwd`/gen/protobuf_ios/
LIBDIR=${GENDIR}lib
mkdir -p ${LIBDIR}
@@ -85,7 +94,7 @@ ${LDFLAGS} \
-L${IPHONESIMULATOR_SYSROOT}/usr/lib/ \
-L${IPHONESIMULATOR_SYSROOT}/usr/lib/system" \
"LIBS=${LIBS}"
-make
+make -j ${JOBS_COUNT}
make install
make distclean
@@ -113,7 +122,7 @@ ${LDFLAGS} \
-L${IPHONESIMULATOR_SYSROOT}/usr/lib/ \
-L${IPHONESIMULATOR_SYSROOT}/usr/lib/system" \
"LIBS=${LIBS}"
-make
+make -j ${JOBS_COUNT}
make install
make distclean
@@ -137,7 +146,7 @@ LDFLAGS="-arch armv7 \
-miphoneos-version-min=${MIN_SDK_VERSION} \
${LDFLAGS}" \
"LIBS=${LIBS}"
-make
+make -j ${JOBS_COUNT}
make install
make distclean
@@ -161,7 +170,7 @@ LDFLAGS="-arch armv7s \
-miphoneos-version-min=${MIN_SDK_VERSION} \
${LDFLAGS}" \
"LIBS=${LIBS}"
-make
+make -j ${JOBS_COUNT}
make install
make distclean
@@ -184,7 +193,7 @@ LDFLAGS="-arch arm64 \
-miphoneos-version-min=${MIN_SDK_VERSION} \
${LDFLAGS}" \
"LIBS=${LIBS}"
-make
+make -j ${JOBS_COUNT}
make install
lipo \
diff --git a/tensorflow/contrib/makefile/compile_ios_tensorflow.sh b/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
index 2efc4bbe7f..be1a1d3ec5 100755
--- a/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
+++ b/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
@@ -16,10 +16,30 @@
# Builds the TensorFlow core library with ARM and x86 architectures for iOS, and
# packs them into a fat file.
+ACTUAL_XCODE_VERSION=`xcodebuild -version | head -n 1 | sed 's/Xcode //'`
+REQUIRED_XCODE_VERSION=7.3.0
+if [ ${ACTUAL_XCODE_VERSION//.} -lt ${REQUIRED_XCODE_VERSION//.} ]
+then
+ echo "error: Xcode ${REQUIRED_XCODE_VERSION} or later is required."
+ exit 1
+fi
+
GENDIR=tensorflow/contrib/makefile/gen/
LIBDIR=${GENDIR}lib
LIB_PREFIX=libtensorflow-core
+# TODO(petewarden) - Some new code in Eigen triggers a clang bug, so work
+# around it by patching the source.
+sed -e 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+
make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=ARMV7 LIB_NAME=${LIB_PREFIX}-armv7.a OPTFLAGS="$1" $2 $3
diff --git a/tensorflow/contrib/makefile/tf_cc_files.txt b/tensorflow/contrib/makefile/tf_cc_files.txt
index 5402642f5b..1642074484 100644
--- a/tensorflow/contrib/makefile/tf_cc_files.txt
+++ b/tensorflow/contrib/makefile/tf_cc_files.txt
@@ -47,6 +47,7 @@ tensorflow/core/kernels/in_topk_op.cc
tensorflow/core/kernels/immutable_constant_op.cc
tensorflow/core/kernels/identity_op.cc
tensorflow/core/kernels/gather_op.cc
+tensorflow/core/kernels/fill_functor.cc
tensorflow/core/kernels/example_parsing_ops.cc
tensorflow/core/kernels/dynamic_stitch_op.cc
tensorflow/core/kernels/dynamic_partition_op.cc
@@ -227,6 +228,7 @@ tensorflow/core/framework/graph_def_util.cc
tensorflow/core/framework/function.cc
tensorflow/core/framework/fake_input.cc
tensorflow/core/framework/device_base.cc
+tensorflow/core/framework/common_shape_fns.cc
tensorflow/core/framework/cancellation.cc
tensorflow/core/framework/bfloat16.cc
tensorflow/core/framework/attr_value_util.cc
diff --git a/tensorflow/contrib/session_bundle/example/BUILD b/tensorflow/contrib/session_bundle/example/BUILD
index 8fa0c0b020..80e22e5d44 100644
--- a/tensorflow/contrib/session_bundle/example/BUILD
+++ b/tensorflow/contrib/session_bundle/example/BUILD
@@ -13,12 +13,12 @@ exports_files(["LICENSE"])
filegroup(
name = "all_files",
srcs = glob(
- ["**/*"],
- exclude = [
- "**/METADATA",
- "**/OWNERS",
- "g3doc/sitemap.md",
- ],
+ ["**/*"],
+ exclude = [
+ "**/METADATA",
+ "**/OWNERS",
+ "g3doc/sitemap.md",
+ ],
),
visibility = ["//visibility:public"],
)
@@ -26,27 +26,27 @@ filegroup(
py_binary(
name = "export_half_plus_two",
srcs = [
- "export_half_plus_two.py",
+ "export_half_plus_two.py",
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow:tensorflow_py",
- "//tensorflow/contrib/session_bundle:exporter",
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/session_bundle:exporter",
],
)
genrule(
name = "half_plus_two",
outs = [
- "half_plus_two/00000123/export.meta",
- "half_plus_two/00000123/export-00000-of-00001",
+ "half_plus_two/00000123/export.meta",
+ "half_plus_two/00000123/export-00000-of-00001",
],
cmd =
- "rm -rf /tmp/half_plus_two; " +
- "$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " +
- "cp -r /tmp/half_plus_two/* $(@D)/half_plus_two",
+ "rm -rf /tmp/half_plus_two; " +
+ "$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " +
+ "cp -r /tmp/half_plus_two/* $(@D)/half_plus_two",
tools = [
- ":export_half_plus_two",
+ ":export_half_plus_two",
],
visibility = ["//visibility:public"],
)
diff --git a/tensorflow/core/framework/op_kernel.cc b/tensorflow/core/framework/op_kernel.cc
index 66a281e29f..6bfc55df41 100644
--- a/tensorflow/core/framework/op_kernel.cc
+++ b/tensorflow/core/framework/op_kernel.cc
@@ -787,6 +787,13 @@ Status SupportedDeviceTypesForNode(
return Status::OK();
}
+void LogAllRegisteredKernels() {
+ for (const auto& key_registration : *GlobalKernelRegistryTyped()) {
+ const KernelDef& kernel_def(key_registration.second.def);
+ LOG(INFO) << "OpKernel ('" << ProtoShortDebugString(kernel_def) << "')";
+ }
+}
+
std::unique_ptr<OpKernel> CreateOpKernel(
DeviceType device_type, DeviceBase* device, Allocator* allocator,
const NodeDef& node_def, int graph_def_version, Status* status) {
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index 0b5309d4f6..0092c6286f 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -1078,6 +1078,10 @@ Status FindKernelDef(DeviceType device_type, const NodeDef& node_def,
// calling GlobalKernelRegistry()), inserts 'k' into registry_ptr.
extern "C" void RegisterKernels(void* registry_ptr);
+// Writes a list of all registered kernels to LOG(INFO), to help users debug
+// missing kernel errors.
+void LogAllRegisteredKernels();
+
namespace kernel_factory {
class OpKernelRegistrar {
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index 78117bb48f..443eabaee0 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -19,7 +19,7 @@ limitations under the License.
// TensorFlow uses semantic versioning, see http://semver.org/.
#define TF_MAJOR_VERSION 0
-#define TF_MINOR_VERSION 8
+#define TF_MINOR_VERSION 9
#define TF_PATCH_VERSION 0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
diff --git a/tensorflow/examples/skflow/examples_test.sh b/tensorflow/examples/skflow/examples_test.sh
index 9ce8b690e9..da6b35c9bb 100755
--- a/tensorflow/examples/skflow/examples_test.sh
+++ b/tensorflow/examples/skflow/examples_test.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# This script excercises the examples of using SkFlow.
+# This script exercises the examples of using SkFlow.
DIR="$TEST_SRCDIR"
diff --git a/tensorflow/examples/skflow/iris_run_config.py b/tensorflow/examples/skflow/iris_run_config.py
index 4f057f817d..dff0daf9e8 100644
--- a/tensorflow/examples/skflow/iris_run_config.py
+++ b/tensorflow/examples/skflow/iris_run_config.py
@@ -29,7 +29,7 @@ X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data,
# estimator to control session configurations, e.g. num_cores and gpu_memory_fraction
run_config = learn.estimators.RunConfig(num_cores=3, gpu_memory_fraction=0.6)
-# Build 3 layer DNN with 10, 20, 10 units respecitvely.
+# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200, config=run_config)
diff --git a/tensorflow/examples/skflow/text_classification_character_cnn.py b/tensorflow/examples/skflow/text_classification_character_cnn.py
index 7eb185d77a..998ed30807 100644
--- a/tensorflow/examples/skflow/text_classification_character_cnn.py
+++ b/tensorflow/examples/skflow/text_classification_character_cnn.py
@@ -58,7 +58,7 @@ def char_cnn_model(x, y):
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
- # Max pooling across output of Convlution+Relu.
+ # Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
diff --git a/tensorflow/examples/skflow/text_classification_cnn.py b/tensorflow/examples/skflow/text_classification_cnn.py
index 0d3c8684b5..0cbed33ef1 100644
--- a/tensorflow/examples/skflow/text_classification_cnn.py
+++ b/tensorflow/examples/skflow/text_classification_cnn.py
@@ -56,7 +56,7 @@ def cnn_model(x, y):
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
- # Max pooling across output of Convlution+Relu.
+ # Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
diff --git a/tensorflow/examples/udacity/2_fullyconnected.ipynb b/tensorflow/examples/udacity/2_fullyconnected.ipynb
index 588b581a69..2d1dfa2fba 100644
--- a/tensorflow/examples/udacity/2_fullyconnected.ipynb
+++ b/tensorflow/examples/udacity/2_fullyconnected.ipynb
@@ -258,7 +258,7 @@
" \n",
" # Variables.\n",
" # These are the parameters that we are going to be training. The weight\n",
- " # matrix will be initialized using random valued following a (truncated)\n",
+ " # matrix will be initialized using random values following a (truncated)\n",
" # normal distribution. The biases get initialized to zero.\n",
" weights = tf.Variable(\n",
" tf.truncated_normal([image_size * image_size, num_labels]))\n",
diff --git a/tensorflow/g3doc/api_docs/python/contrib.framework.md b/tensorflow/g3doc/api_docs/python/contrib.framework.md
index 9500f189aa..4c23455597 100644
--- a/tensorflow/g3doc/api_docs/python/contrib.framework.md
+++ b/tensorflow/g3doc/api_docs/python/contrib.framework.md
@@ -184,9 +184,20 @@ See also: `is_non_decreasing`
- - -
-### `tf.contrib.framework.is_tensor(t)` {#is_tensor}
+### `tf.contrib.framework.is_tensor(x)` {#is_tensor}
-Check if `t` is a tensor: `Tensor`, `SparseTensor`, or `Variable`.
+Check for tensor types.
+Check whether an object is a tensor. Equivalent to
+`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
+
+##### Args:
+
+
+* <b>`x`</b>: An python object to check.
+
+##### Returns:
+
+ `True` if `x` is a tensor, `False` if not.
- - -
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.ctc_loss.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.ctc_loss.md
index a99c0b478b..229df8bdbd 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.ctc_loss.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.ctc_loss.md
@@ -69,7 +69,7 @@ Here is a table of the (roughly) expected first order behavior:
##### Returns:
- A 1-D `float` `Tensor`, size `[batch]`, containing logits.
+ A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities.
##### Raises:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.pad_to_bounding_box.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.pad_to_bounding_box.md
index 04c155c03c..c731fb2d2a 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.pad_to_bounding_box.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.pad_to_bounding_box.md
@@ -26,5 +26,6 @@ This op does nothing if `offset_*` is zero and the image already has size
* <b>`ValueError`</b>: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.resize_image_with_crop_or_pad.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.resize_image_with_crop_or_pad.md
index c93111bd99..24104b647c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.resize_image_with_crop_or_pad.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.image.resize_image_with_crop_or_pad.md
@@ -14,7 +14,7 @@ dimension.
##### Args:
-* <b>`image`</b>: 3-D tensor of shape [height, width, channels]
+* <b>`image`</b>: 3-D tensor of shape `[height, width, channels]`
* <b>`target_height`</b>: Target height.
* <b>`target_width`</b>: Target width.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.crop_to_bounding_box.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.crop_to_bounding_box.md
index 4724ff5eb9..1ca4247a9b 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.crop_to_bounding_box.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard8/tf.image.crop_to_bounding_box.md
@@ -26,5 +26,6 @@ lower-right corner is at
* <b>`ValueError`</b>: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative, or either `target_height` or `target_width` is not positive.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.framework.is_tensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.framework.is_tensor.md
index eaab61a99d..3d8b9b5604 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.framework.is_tensor.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.framework.is_tensor.md
@@ -1,4 +1,15 @@
-### `tf.contrib.framework.is_tensor(t)` {#is_tensor}
+### `tf.contrib.framework.is_tensor(x)` {#is_tensor}
-Check if `t` is a tensor: `Tensor`, `SparseTensor`, or `Variable`.
+Check for tensor types.
+Check whether an object is a tensor. Equivalent to
+`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
+
+##### Args:
+
+
+* <b>`x`</b>: An python object to check.
+
+##### Returns:
+
+ `True` if `x` is a tensor, `False` if not.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.tanh.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.tanh.md
index 6f42cfdc2f..154a13059c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.tanh.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.tanh.md
@@ -5,12 +5,12 @@ Computes hyperbolic tangent of `x` element-wise.
##### Args:
-* <b>`x`</b>: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
- or `qint32`.
+* <b>`x`</b>: A Tensor or SparseTensor with type `float`, `double`, `int32`,
+ `complex64`, `int64`, or `qint32`.
* <b>`name`</b>: A name for the operation (optional).
##### Returns:
- A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
- the return type is `quint8`.
+ A Tensor or SparseTensor respectively with the same type as `x` if
+ `x.dtype != qint32` otherwise the return type is `quint8`.
diff --git a/tensorflow/g3doc/api_docs/python/image.md b/tensorflow/g3doc/api_docs/python/image.md
index 5c017ec042..505cc9259b 100644
--- a/tensorflow/g3doc/api_docs/python/image.md
+++ b/tensorflow/g3doc/api_docs/python/image.md
@@ -382,7 +382,7 @@ dimension.
##### Args:
-* <b>`image`</b>: 3-D tensor of shape [height, width, channels]
+* <b>`image`</b>: 3-D tensor of shape `[height, width, channels]`
* <b>`target_height`</b>: Target height.
* <b>`target_width`</b>: Target width.
@@ -461,7 +461,8 @@ This op does nothing if `offset_*` is zero and the image already has size
* <b>`ValueError`</b>: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative.
- - -
@@ -494,7 +495,8 @@ lower-right corner is at
* <b>`ValueError`</b>: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative, or either `target_height` or `target_width` is not positive.
- - -
diff --git a/tensorflow/g3doc/api_docs/python/nn.md b/tensorflow/g3doc/api_docs/python/nn.md
index a7069e2732..372b1f8aea 100644
--- a/tensorflow/g3doc/api_docs/python/nn.md
+++ b/tensorflow/g3doc/api_docs/python/nn.md
@@ -205,14 +205,14 @@ Computes hyperbolic tangent of `x` element-wise.
##### Args:
-* <b>`x`</b>: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
- or `qint32`.
+* <b>`x`</b>: A Tensor or SparseTensor with type `float`, `double`, `int32`,
+ `complex64`, `int64`, or `qint32`.
* <b>`name`</b>: A name for the operation (optional).
##### Returns:
- A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
- the return type is `quint8`.
+ A Tensor or SparseTensor respectively with the same type as `x` if
+ `x.dtype != qint32` otherwise the return type is `quint8`.
@@ -1770,7 +1770,7 @@ Here is a table of the (roughly) expected first order behavior:
##### Returns:
- A 1-D `float` `Tensor`, size `[batch]`, containing logits.
+ A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities.
##### Raises:
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index 97b7a5ed7b..158c84b4ef 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -54,36 +54,48 @@ $ sudo apt-get install python-pip python-dev
# Mac OS X
$ sudo easy_install pip
+$ sudo easy_install --upgrade six
```
-Install TensorFlow:
+Then, select the correct binary to install:
```bash
-# Ubuntu/Linux 64-bit, CPU only, Python 2.7:
-$ sudo pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, CPU only, Python 2.7
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-$ sudo pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-# Mac OS X, CPU only:
-$ sudo easy_install --upgrade six
-$ sudo pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py2-none-any.whl
+# Mac OS X, CPU only, Python 2.7:
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
+
+# Ubuntu/Linux 64-bit, CPU only, Python 3.4
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+
+# Ubuntu/Linux 64-bit, CPU only, Python 3.5
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+
+# Mac OS X, CPU only, Python 3.4 or 3.5:
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
```
-For python3:
+Install TensorFlow:
```bash
-# Ubuntu/Linux 64-bit, CPU only, Python 3.4:
-$ sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
-
-# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-$ sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
+# Python 2
+$ sudo pip install --upgrade $TF_BINARY_URL
-# Mac OS X, CPU only:
-$ sudo easy_install --upgrade six
-$ sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py3-none-any.whl
+# Python 3
+$ sudo pip3 install --upgrade $TF_BINARY_URL
```
NOTE: If you are upgrading from a previous installation of TensorFlow < 0.7.1,
@@ -127,40 +139,53 @@ Create a Virtualenv environment in the directory `~/tensorflow`:
$ virtualenv --system-site-packages ~/tensorflow
```
-Activate the environment and use pip to install TensorFlow inside it:
+Activate the environment:
```bash
$ source ~/tensorflow/bin/activate # If using bash
$ source ~/tensorflow/bin/activate.csh # If using csh
(tensorflow)$ # Your prompt should change
+```
-# Ubuntu/Linux 64-bit, CPU only, Python 2.7:
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+Now, install TensorFlow just as you would for a regular Pip installation. First select the correct binary to install:
-# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+```bash
+# Ubuntu/Linux 64-bit, CPU only, Python 2.7
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-# Mac OS X, CPU only:
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py2-none-any.whl
-```
+# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-and again for python3:
+# Mac OS X, CPU only, Python 2.7:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
-```bash
-$ source ~/tensorflow/bin/activate # If using bash
-$ source ~/tensorflow/bin/activate.csh # If using csh
-(tensorflow)$ # Your prompt should change
+# Ubuntu/Linux 64-bit, CPU only, Python 3.4
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
-# Ubuntu/Linux 64-bit, CPU only, Python 3.4:
-(tensorflow)$ pip3 install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, CPU only, Python 3.5
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
-# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-(tensorflow)$ pip3 install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
-# Mac OS X, CPU only:
-(tensorflow)$ pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py3-none-any.whl
+# Mac OS X, CPU only, Python 3.4 or 3.5:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
+```
+
+Finally install TensorFlow:
+
+```bash
+# Python 2
+(tensorflow)$ pip install --upgrade $TF_BINARY_URL
+
+# Python 3
+(tensorflow)$ pip3 install --upgrade $TF_BINARY_URL
```
With the Virtualenv environment activated, you can now
@@ -216,6 +241,9 @@ $ conda create -n tensorflow python=2.7
# Python 3.4
$ conda create -n tensorflow python=3.4
+
+# Python 3.5
+$ conda create -n tensorflow python=3.5
```
Activate the environment and use conda or pip to install TensorFlow inside it.
@@ -241,33 +269,47 @@ If using pip make sure to use the `--ignore-installed` flag to prevent errors ab
```bash
$ source activate tensorflow
(tensorflow)$ # Your prompt should change
+```
-# Ubuntu/Linux 64-bit, CPU only, Python 2.7:
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+Now, install TensorFlow just as you would for a regular Pip installation. First select the correct binary to install:
-# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+```bash
+# Ubuntu/Linux 64-bit, CPU only, Python 2.7
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-# Mac OS X, CPU only:
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py2-none-any.whl
-```
+# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
-and again for Python 3:
+# Mac OS X, CPU only, Python 2.7:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
-```bash
-$ source activate tensorflow
-(tensorflow)$ # Your prompt should change
+# Ubuntu/Linux 64-bit, CPU only, Python 3.4
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
-# Ubuntu/Linux 64-bit, CPU only, Python 3.4:
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, CPU only, Python 3.5
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
-# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4. Requires CUDA toolkit 7.5 and cuDNN v4.
-# For other versions, see "Install from sources" below.
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.8.0-cp34-cp34m-linux_x86_64.whl
+# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
+# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
-# Mac OS X, CPU only:
-(tensorflow)$ pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0-py3-none-any.whl
+# Mac OS X, CPU only, Python 3.4 or 3.5:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
+```
+
+Finally install TensorFlow:
+
+```bash
+# Python 2
+(tensorflow)$ pip install --ignore-installed --upgrade $TF_BINARY_URL
+
+# Python 3
+(tensorflow)$ pip3 install --ignore-installed --upgrade $TF_BINARY_URL
```
### Usage
@@ -310,7 +352,7 @@ code.
* `gcr.io/tensorflow/tensorflow:latest-devel-gpu`: GPU Binary image plus source
code.
-We also have tags with `latest` replaced by a released version (e.g., `0.8.0-gpu`).
+We also have tags with `latest` replaced by a released version (e.g., `0.9.0-gpu`).
With Docker the installation is as follows:
@@ -335,16 +377,16 @@ The option `-p 8888:8888` is used to publish the Docker container᾿s internal p
The format of the port mapping is `hostPort:containerPort`. You can specify any valid port number for the host port but have to use `8888` for the container port portion.
-If you're using a container with GPU support, some additional flags must be
-passed to expose the GPU device to the container. For the default config, we
-include a
-[script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/docker_run_gpu.sh)
-in the repo with these flags, so the command-line would look like
+For NVidia GPU support install latest NVidia drivers and
+[nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
+Run with
```bash
-$ path/to/repo/tensorflow/tools/docker/docker_run_gpu.sh gcr.io/tensorflow/tensorflow:gpu
+$ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
```
+For more details see (TensorFlow docker readme)[https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker].
+
You can now [test your installation](#test-the-tensorflow-installation) within the Docker container.
## Test the TensorFlow installation
@@ -517,8 +559,8 @@ to reflect the cuDNN version you downloaded):
``` bash
tar xvzf cudnn-7.5-linux-x64-v4.tgz
-sudo cp cudnn-7.5-linux-x64-v4/cudnn.h /usr/local/cuda/include
-sudo cp cudnn-7.5-linux-x64-v4/libcudnn* /usr/local/cuda/lib64
+sudo cp cuda/include/cudnn.h /usr/local/cuda/include
+sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
```
@@ -719,7 +761,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.8.0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.9.0-py2-none-any.whl
```
## Setting up TensorFlow for Development
diff --git a/tensorflow/g3doc/resources/roadmap.md b/tensorflow/g3doc/resources/roadmap.md
index be7ac5e778..9972481515 100644
--- a/tensorflow/g3doc/resources/roadmap.md
+++ b/tensorflow/g3doc/resources/roadmap.md
@@ -1,5 +1,5 @@
# Roadmap
-**Last updated: April 12, 2016**
+**Last updated: June 3, 2016**
TensorFlow is a fast moving project. In order for the community to better
understand what the near future will bring, this document shares what we are
@@ -16,7 +16,7 @@ we do not have timelines for these features.
* Shape Inference
### Making TensorFlow easier to use
-* Higher level APIs (for instance, layers)
+* Easier setup for distributed training jobs
### Performance
* Speed and memory benchmarks
@@ -24,13 +24,15 @@ we do not have timelines for these features.
### Core Features
* Repeated partial graph evaluation ([#672](https://github.com/tensorflow/tensorflow/issues/672))
+* Automatic op placement ([#2126](https://github.com/tensorflow/tensorflow/issues/2126))
### Platforms
-* iOS support ([#16](https://github.com/tensorflow/tensorflow/issues/16))
* OpenCL support ([#22](https://github.com/tensorflow/tensorflow/issues/22))
* Windows support ([#17](https://github.com/tensorflow/tensorflow/issues/17))
-* MacOS GPU support
### Community
-* Integration with other machine learning frameworks
-* Better installation support; support for package managers
+* More educational resources
+* Better integration of TensorFlow into the opensource big data ecosystem ([#1996](https://github.com/tensorflow/tensorflow/issues/1996),
+[#2218](https://github.com/tensorflow/tensorflow/issues/2218),
+[#2655](https://github.com/tensorflow/tensorflow/issues/2655))
+* Models benchmarking and comparison tooling
diff --git a/tensorflow/models/embedding/word2vec_test.py b/tensorflow/models/embedding/word2vec_test.py
index 34cb21d821..3f6fbc78aa 100644
--- a/tensorflow/models/embedding/word2vec_test.py
+++ b/tensorflow/models/embedding/word2vec_test.py
@@ -33,8 +33,8 @@ FLAGS = flags.FLAGS
class Word2VecTest(tf.test.TestCase):
def setUp(self):
- FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
- FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
+ FLAGS.train_data = os.path.join(self.get_temp_dir(), "test-text.txt")
+ FLAGS.eval_data = os.path.join(self.get_temp_dir(), "eval-text.txt")
FLAGS.save_path = self.get_temp_dir()
with open(FLAGS.train_data, "w") as f:
f.write(
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
index 4a21d1acc6..7f1be574bb 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -213,6 +213,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
def testFloatTanhEdge(self):
@@ -251,6 +252,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(x, np.sign, tf.sign)
def testDoubleBasic(self):
@@ -288,6 +290,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
def testHalfBasic(self):
@@ -320,6 +323,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
self._compareBothSparse(y, np.sign, tf.sign)
def testInt32Basic(self):
@@ -374,6 +378,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
@@ -404,6 +409,7 @@ class UnaryOpTest(tf.test.TestCase):
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
+ self._compareBothSparse(x, np.tanh, tf.tanh)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py
index 8db74d56fd..8ec29ea796 100644
--- a/tensorflow/python/kernel_tests/rnn_test.py
+++ b/tensorflow/python/kernel_tests/rnn_test.py
@@ -19,6 +19,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import itertools
import time
import timeit
@@ -1177,6 +1178,114 @@ class BidirectionalRNNTest(tf.test.TestCase):
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=True,
use_shape=True)
+ def _createBidirectionalDynamicRNN(self, use_gpu, use_shape,
+ use_state_tuple, use_time_major):
+ num_units = 3
+ input_size = 5
+ batch_size = 2
+ max_length = 8
+
+ initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
+ sequence_length = tf.placeholder(tf.int64)
+ cell_fw = tf.nn.rnn_cell.LSTMCell(num_units,
+ initializer=initializer,
+ state_is_tuple=use_state_tuple)
+ cell_bw = tf.nn.rnn_cell.LSTMCell(num_units,
+ initializer=initializer,
+ state_is_tuple=use_state_tuple)
+ inputs = max_length * [
+ tf.placeholder(tf.float32,
+ shape=(batch_size if use_shape else None, input_size))]
+ inputs_c = tf.pack(inputs)
+ if not use_time_major:
+ inputs_c = tf.transpose(inputs_c, [1, 0, 2])
+ outputs, states = tf.nn.bidirectional_dynamic_rnn(
+ cell_fw,
+ cell_bw,
+ inputs_c,
+ sequence_length,
+ dtype=tf.float32,
+ time_major=use_time_major)
+ outputs = tf.concat(2, outputs)
+ state_fw, state_bw = states
+ outputs_shape = [None, max_length, 2 * num_units]
+ if use_shape:
+ outputs_shape[0] = batch_size
+ if use_time_major:
+ outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
+ self.assertEqual(
+ outputs.get_shape().as_list(),
+ outputs_shape)
+
+ input_value = np.random.randn(batch_size, input_size)
+
+ return input_value, inputs, outputs, state_fw, state_bw, sequence_length
+
+ def _testBidirectionalDynamicRNN(self, use_gpu, use_shape,
+ use_state_tuple, use_time_major):
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
+ self._createBidirectionalDynamicRNN(
+ use_gpu, use_shape, use_state_tuple, use_time_major))
+ tf.initialize_all_variables().run()
+ # Run with pre-specified sequence length of 2, 3
+ if use_state_tuple:
+ out, c_fw, m_fw, c_bw, m_bw = sess.run(
+ [outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
+ feed_dict={inputs[0]: input_value,
+ sequence_length: [2, 3]})
+ s_fw = (c_fw, m_fw)
+ s_bw = (c_bw, m_bw)
+ else:
+ out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
+ feed_dict={inputs[0]: input_value,
+ sequence_length: [2, 3]})
+
+ # Since the forward and backward LSTM cells were initialized with the
+ # same parameters, the forward and backward output has to be the same,
+ # but reversed in time. The format is output[time][batch][depth], and
+ # due to depth concatenation (as num_units=3 for both RNNs):
+ # - forward output: out[][][depth] for 0 <= depth < 3
+ # - backward output: out[][][depth] for 4 <= depth < 6
+ #
+ # First sequence in batch is length=2
+ # Check that the time=0 forward output is equal to time=1 backward output
+ if not use_time_major:
+ out = np.swapaxes(out, 0, 1)
+ self.assertEqual(out[0][0][0], out[1][0][3])
+ self.assertEqual(out[0][0][1], out[1][0][4])
+ self.assertEqual(out[0][0][2], out[1][0][5])
+ # Check that the time=1 forward output is equal to time=0 backward output
+ self.assertEqual(out[1][0][0], out[0][0][3])
+ self.assertEqual(out[1][0][1], out[0][0][4])
+ self.assertEqual(out[1][0][2], out[0][0][5])
+
+ # Second sequence in batch is length=3
+ # Check that the time=0 forward output is equal to time=2 backward output
+ self.assertEqual(out[0][1][0], out[2][1][3])
+ self.assertEqual(out[0][1][1], out[2][1][4])
+ self.assertEqual(out[0][1][2], out[2][1][5])
+ # Check that the time=1 forward output is equal to time=1 backward output
+ self.assertEqual(out[1][1][0], out[1][1][3])
+ self.assertEqual(out[1][1][1], out[1][1][4])
+ self.assertEqual(out[1][1][2], out[1][1][5])
+ # Check that the time=2 forward output is equal to time=0 backward output
+ self.assertEqual(out[2][1][0], out[0][1][3])
+ self.assertEqual(out[2][1][1], out[0][1][4])
+ self.assertEqual(out[2][1][2], out[0][1][5])
+ # Via the reasoning above, the forward and backward final state should be
+ # exactly the same
+ self.assertAllClose(s_fw, s_bw)
+
+ def testBidirectionalDynamicRNN(self):
+ # Generate 2^4 option values
+ # from [True, True, True, True] to [False, False, False, False]
+ options = itertools.product([True, False], repeat=4)
+ for option in options:
+ self._testBidirectionalDynamicRNN(use_gpu=option[0], use_shape=option[1],
+ use_state_tuple=option[2],
+ use_time_major=option[3])
+
class MultiDimensionalLSTMTest(tf.test.TestCase):
diff --git a/tensorflow/python/ops/ctc_ops.py b/tensorflow/python/ops/ctc_ops.py
index 58c1c21010..bab9dc0ef5 100644
--- a/tensorflow/python/ops/ctc_ops.py
+++ b/tensorflow/python/ops/ctc_ops.py
@@ -95,7 +95,7 @@ def ctc_loss(inputs, labels, sequence_length,
ctc_merge_repeated: Boolean. Default: True.
Returns:
- A 1-D `float` `Tensor`, size `[batch]`, containing logits.
+ A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index 56dfde3bfb..9b1381eebf 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# ==============================================================================
+#==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
diff --git a/tensorflow/python/ops/image_ops.py b/tensorflow/python/ops/image_ops.py
index 7502c99e41..a72690c0f2 100644
--- a/tensorflow/python/ops/image_ops.py
+++ b/tensorflow/python/ops/image_ops.py
@@ -170,6 +170,9 @@ from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import logging_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import check_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
@@ -177,6 +180,7 @@ from tensorflow.python.ops.gen_image_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import make_all
+from tensorflow.contrib.framework.python.framework import is_tensor
@@ -189,19 +193,50 @@ ops.NoGradient('SampleDistortedBoundingBox')
ops.NoGradient("ExtractGlimpse")
-def _ImageDimensions(images):
+def _assert(cond, ex_type, msg):
+ """A polymorphic assert, works with tensors and boolean expressions.
+
+ If `cond` is not a tensor, behave like an ordinary assert statement, except
+ that a empty list is returned. If `cond` is a tensor, return a list
+ containing a single TensorFlow assert op.
+
+ Args:
+ cond: Something evaluates to a boolean value. May be a tensor.
+ ex_type: The exception class to use.
+ msg: The error message.
+
+ Returns:
+ A list, containing at most one assert op.
+ """
+ if is_tensor(cond):
+ return [logging_ops.Assert(cond, [msg])]
+ else:
+ if not cond:
+ raise ex_type(msg)
+ else:
+ return []
+
+
+def _ImageDimensions(images, static_only=True):
"""Returns the dimensions of an image tensor.
Args:
- images: 4-D Tensor of shape [batch, height, width, channels]
+ images: 4-D Tensor of shape `[batch, height, width, channels]`
+ static_only: Boolean, whether to return only static shape.
Returns:
- list of integers [batch, height, width, channels]
+ list of integers `[batch, height, width, channels]`, when static shape is
+ fully defined or `static_only` is `True`.
+ list of integer scalar tensors `[batch, height, width, channels]`, when
+ static shape is not fully defined.
"""
# A simple abstraction to provide names for each dimension. This abstraction
# should make it simpler to switch dimensions in the future (e.g. if we ever
# want to switch height and width.)
- return images.get_shape().as_list()
+ if static_only or images.get_shape().is_fully_defined():
+ return images.get_shape().as_list()
+ else:
+ return array_ops.unpack(array_ops.shape(images))
def _Check3DImage(image, require_static=True):
@@ -213,17 +248,27 @@ def _Check3DImage(image, require_static=True):
known and non-zero.
Raises:
- ValueError: if image.shape is not a [3] vector.
+ ValueError: if `image.shape` is not a 3-vector.
+
+ Returns:
+ An empty list, if `image` has fully defined dimensions. Otherwise, a list
+ containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
- raise ValueError('\'image\' must be three-dimensional.')
+ raise ValueError("'image' must be three-dimensional.")
if require_static and not image_shape.is_fully_defined():
- raise ValueError('\'image\' must be fully defined.')
+ raise ValueError("'image' must be fully defined.")
if any(x == 0 for x in image_shape):
- raise ValueError('all dims of \'image.shape\' must be > 0: %s' %
+ raise ValueError("all dims of 'image.shape' must be > 0: %s" %
image_shape)
+ if not image_shape.is_fully_defined():
+ return [check_ops.assert_positive(array_ops.shape(image),
+ ["all dims of 'image.shape' "
+ "must be > 0."])]
+ else:
+ return []
def _CheckAtLeast3DImage(image):
@@ -429,36 +474,39 @@ def pad_to_bounding_box(image, offset_height, offset_width, target_height,
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative.
"""
image = ops.convert_to_tensor(image, name='image')
- _Check3DImage(image, require_static=True)
- height, width, depth = _ImageDimensions(image)
- if target_width < width:
- raise ValueError('target_width must be >= width')
- if target_height < height:
- raise ValueError('target_height must be >= height')
+ assert_ops = []
+ assert_ops += _Check3DImage(image, require_static=False)
+ height, width, depth = _ImageDimensions(image, static_only=False)
after_padding_width = target_width - offset_width - width
after_padding_height = target_height - offset_height - height
- if after_padding_width < 0:
- raise ValueError('target_width not possible given '
- 'offset_width and image width')
- if after_padding_height < 0:
- raise ValueError('target_height not possible given '
- 'offset_height and image height')
+ assert_ops += _assert(offset_height >= 0, ValueError,
+ 'offset_height must be >= 0')
+ assert_ops += _assert(offset_width >= 0, ValueError,
+ 'offset_width must be >= 0')
+ assert_ops += _assert(after_padding_width >= 0, ValueError,
+ 'width must be <= target - offset')
+ assert_ops += _assert(after_padding_height >= 0, ValueError,
+ 'height must be <= target - offset')
+ image = control_flow_ops.with_dependencies(assert_ops, image)
# Do not pad on the depth dimensions.
- if (offset_width or offset_height or after_padding_width or
- after_padding_height):
- paddings = [[offset_height, after_padding_height],
- [offset_width, after_padding_width], [0, 0]]
- padded = array_ops.pad(image, paddings)
- padded.set_shape([target_height, target_width, depth])
- else:
- padded = image
+ paddings = array_ops.reshape(
+ array_ops.pack([offset_height, after_padding_height,
+ offset_width, after_padding_width,
+ 0, 0]),
+ [3, 2])
+ padded = array_ops.pad(image, paddings)
+
+ padded_shape = [None if is_tensor(i) else i
+ for i in [target_height, target_width, depth]]
+ padded.set_shape(padded_shape)
return padded
@@ -486,24 +534,38 @@ def crop_to_bounding_box(image, offset_height, offset_width, target_height,
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
- `target_*` arguments
+ `target_*` arguments, or either `offset_height` or `offset_width` is
+ negative, or either `target_height` or `target_width` is not positive.
"""
image = ops.convert_to_tensor(image, name='image')
- _Check3DImage(image, require_static=True)
- height, width, _ = _ImageDimensions(image)
-
- if offset_width < 0:
- raise ValueError('offset_width must be >= 0.')
- if offset_height < 0:
- raise ValueError('offset_height must be >= 0.')
-
- if width < (target_width + offset_width):
- raise ValueError('width must be >= target + offset.')
- if height < (target_height + offset_height):
- raise ValueError('height must be >= target + offset.')
- cropped = array_ops.slice(image, [offset_height, offset_width, 0],
- [target_height, target_width, -1])
+ assert_ops = []
+ assert_ops += _Check3DImage(image, require_static=False)
+
+ height, width, depth = _ImageDimensions(image, static_only=False)
+
+ assert_ops += _assert(offset_width >= 0, ValueError,
+ 'offset_width must be >= 0.')
+ assert_ops += _assert(offset_height >= 0, ValueError,
+ 'offset_height must be >= 0.')
+ assert_ops += _assert(target_width > 0, ValueError,
+ 'target_width must be > 0.')
+ assert_ops += _assert(target_height > 0, ValueError,
+ 'target_height must be > 0.')
+ assert_ops += _assert(width >= (target_width + offset_width), ValueError,
+ 'width must be >= target + offset.')
+ assert_ops += _assert(height >= (target_height + offset_height), ValueError,
+ 'height must be >= target + offset.')
+ image = control_flow_ops.with_dependencies(assert_ops, image)
+
+ cropped = array_ops.slice(
+ image,
+ array_ops.pack([offset_height, offset_width, 0]),
+ array_ops.pack([target_height, target_width, -1]))
+
+ cropped_shape = [None if is_tensor(i) else i
+ for i in [target_height, target_width, depth]]
+ cropped.set_shape(cropped_shape)
return cropped
@@ -521,7 +583,7 @@ def resize_image_with_crop_or_pad(image, target_height, target_width):
dimension.
Args:
- image: 3-D tensor of shape [height, width, channels]
+ image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
@@ -533,43 +595,73 @@ def resize_image_with_crop_or_pad(image, target_height, target_width):
`[target_height, target_width, channels]`
"""
image = ops.convert_to_tensor(image, name='image')
- _Check3DImage(image, require_static=True)
- original_height, original_width, _ = _ImageDimensions(image)
-
- if target_width <= 0:
- raise ValueError('target_width must be > 0.')
- if target_height <= 0:
- raise ValueError('target_height must be > 0.')
-
- offset_crop_width = 0
- offset_pad_width = 0
- if target_width < original_width:
- offset_crop_width = (original_width - target_width) // 2
- elif target_width > original_width:
- offset_pad_width = (target_width - original_width) // 2
-
- offset_crop_height = 0
- offset_pad_height = 0
- if target_height < original_height:
- offset_crop_height = (original_height - target_height) // 2
- elif target_height > original_height:
- offset_pad_height = (target_height - original_height) // 2
+
+ assert_ops = []
+ assert_ops += _Check3DImage(image, require_static=False)
+ assert_ops += _assert(target_width > 0, ValueError,
+ 'target_width must be > 0.')
+ assert_ops += _assert(target_height > 0, ValueError,
+ 'target_height must be > 0.')
+
+ image = control_flow_ops.with_dependencies(assert_ops, image)
+ # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
+ # Make sure our checks come first, so that error messages are clearer.
+ if is_tensor(target_height):
+ target_height = control_flow_ops.with_dependencies(
+ assert_ops, target_height)
+ if is_tensor(target_width):
+ target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
+
+ def max_(x, y):
+ if is_tensor(x) or is_tensor(y):
+ return math_ops.maximum(x, y)
+ else:
+ return max(x, y)
+
+ def min_(x, y):
+ if is_tensor(x) or is_tensor(y):
+ return math_ops.minimum(x, y)
+ else:
+ return min(x, y)
+
+ def equal_(x, y):
+ if is_tensor(x) or is_tensor(y):
+ return math_ops.equal(x, y)
+ else:
+ return x == y
+
+ height, width, _ = _ImageDimensions(image, static_only=False)
+ width_diff = target_width - width
+ offset_crop_width = max_(-width_diff // 2, 0)
+ offset_pad_width = max_(width_diff // 2, 0)
+
+ height_diff = target_height - height
+ offset_crop_height = max_(-height_diff // 2, 0)
+ offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
- min(target_height, original_height),
- min(target_width, original_width))
+ min_(target_height, height),
+ min_(target_width, width))
# Maybe pad if needed.
resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
+ # In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
- if not resized.get_shape()[0].is_compatible_with(target_height):
- raise ValueError('resized height is not correct.')
- if not resized.get_shape()[1].is_compatible_with(target_width):
- raise ValueError('resized width is not correct.')
+
+ resized_height, resized_width, _ = \
+ _ImageDimensions(resized, static_only=False)
+
+ assert_ops = []
+ assert_ops += _assert(equal_(resized_height, target_height), ValueError,
+ 'resized height is not correct.')
+ assert_ops += _assert(equal_(resized_width, target_width), ValueError,
+ 'resized width is not correct.')
+
+ resized = control_flow_ops.with_dependencies(assert_ops, resized)
return resized
diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py
index db29f74045..7f5109d7df 100644
--- a/tensorflow/python/ops/image_ops_test.py
+++ b/tensorflow/python/ops/image_ops_test.py
@@ -26,6 +26,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
@@ -478,39 +479,144 @@ class PerImageWhiteningTest(test_util.TensorFlowTestCase):
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
- def testNoOp(self):
- x_shape = [13, 9, 3]
- x_np = np.ones(x_shape, dtype=np.float32)
-
- with self.test_session():
- x = constant_op.constant(x_np, shape=x_shape)
- target_height = x_shape[0]
- target_width = x_shape[1]
- y = image_ops.crop_to_bounding_box(x, 0, 0, target_height, target_width)
- y_tf = y.eval()
- self.assertAllEqual(y_tf, x_np)
-
- def testCropping(self):
- x_np = np.arange(0, 30, dtype=np.int32).reshape([6, 5, 1])
-
- offset_height = 1
- after_height = 2
+ def _CropToBoundingBox(self, x, offset_height, offset_width,
+ target_height, target_width, use_tensor_inputs):
+ if use_tensor_inputs:
+ offset_height = ops.convert_to_tensor(offset_height)
+ offset_width = ops.convert_to_tensor(offset_width)
+ target_height = ops.convert_to_tensor(target_height)
+ target_width = ops.convert_to_tensor(target_width)
+ x_tensor = array_ops.placeholder(x.dtype, shape=[None]*x.ndim)
+ feed_dict = {x_tensor: x}
+ else:
+ x_tensor = x
+ feed_dict = {}
- offset_width = 0
- after_width = 3
+ y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
+ target_height, target_width)
+ if not use_tensor_inputs:
+ self.assertTrue(y.get_shape().is_fully_defined())
- target_height = x_np.shape[0] - offset_height - after_height
- target_width = x_np.shape[1] - offset_width - after_width
+ with self.test_session():
+ return y.eval(feed_dict=feed_dict)
+
+ def _assertReturns(self, x, x_shape, offset_height, offset_width,
+ y, y_shape, use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ target_height, target_width, _ = y_shape
+ x = np.array(x).reshape(x_shape)
+ y = np.array(y).reshape(y_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
+ target_height, target_width,
+ use_tensor_inputs)
+ self.assertAllClose(y, y_tf)
+
+ def _assertRaises(self, x, x_shape, offset_height, offset_width,
+ target_height, target_width, err_msg,
+ use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ x = np.array(x).reshape(x_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ try:
+ self._CropToBoundingBox(x, offset_height, offset_width,
+ target_height, target_width,
+ use_tensor_inputs)
+ except Exception as e:
+ if err_msg not in str(e):
+ raise
+ else:
+ raise AssertionError('Exception not raised: %s' % err_msg)
- y_np = x_np[offset_height:offset_height + target_height,
- offset_width:offset_width + target_width, :]
+ def testNoOp(self):
+ x_shape = [10, 10, 10]
+ x = np.random.uniform(size=x_shape)
+ self._assertReturns(x, x_shape, 0, 0, x, x_shape)
- with self.test_session():
- x = constant_op.constant(x_np, shape=x_np.shape)
- y = image_ops.crop_to_bounding_box(x, offset_height, offset_width,
- target_height, target_width)
- y_tf = y.eval()
- self.assertAllEqual(y_tf.flatten(), y_np.flatten())
+ def testCrop(self):
+ x = [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9]
+ x_shape = [3, 3, 1]
+
+ offset_height, offset_width = [1, 0]
+ y_shape = [2, 3, 1]
+ y = [4, 5, 6,
+ 7, 8, 9]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 1]
+ y_shape = [3, 2, 1]
+ y = [2, 3,
+ 5, 6,
+ 8, 9]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 0]
+ y_shape = [2, 3, 1]
+ y = [1, 2, 3,
+ 4, 5, 6]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 0]
+ y_shape = [3, 2, 1]
+ y = [1, 2,
+ 4, 5,
+ 7, 8]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ def testNon3DInput(self):
+ # Input image is not 3D
+ x = [0] * 15
+ offset_height, offset_width = [0, 0]
+ target_height, target_width = [2, 2]
+
+ for x_shape in ([1, 3, 5, 1], [3, 5]):
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "'image' must be three-dimensional")
+
+ def testZeroLengthInput(self):
+ # Input image has 0-length dimension(s).
+ # Each line is a test configuration:
+ # x_shape, target_height, target_width
+ test_config = (([0, 2, 2], 1, 1),
+ ([2, 0, 2], 1, 1),
+ ([2, 2, 0], 1, 1),
+ ([0, 2, 2], 0, 1),
+ ([2, 0, 2], 1, 0))
+ offset_height, offset_width = [0, 0]
+ x = []
+
+ for x_shape, target_height, target_width in test_config:
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "all dims of 'image.shape' must be > 0",
+ use_tensor_inputs_options=[False])
+ # Multiple assertion could fail, but the evaluation order is arbitrary.
+ # Match gainst generic pattern.
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "assertion failed:",
+ use_tensor_inputs_options=[True])
+
+ def testBadParams(self):
+ x_shape = [4, 4, 1]
+ x = np.zeros(x_shape)
+
+ # Each line is a test configuration:
+ # (offset_height, offset_width, target_height, target_width), err_msg
+ test_config = (([-1, 0, 3, 3], 'offset_height must be >= 0'),
+ ([0, -1, 3, 3], 'offset_width must be >= 0'),
+ ([0, 0, 0, 3], 'target_height must be > 0'),
+ ([0, 0, 3, 0], 'target_width must be > 0'),
+ ([2, 0, 3, 3], 'height must be >= target + offset'),
+ ([0, 2, 3, 3], 'width must be >= target + offset'))
+
+ for params, err_msg in test_config:
+ self._assertRaises(x, x_shape, *params, err_msg=err_msg)
class CentralCropTest(test_util.TensorFlowTestCase):
@@ -550,45 +656,148 @@ class CentralCropTest(test_util.TensorFlowTestCase):
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
- def testNoOp(self):
- x_shape = [13, 9, 3]
- x_np = np.ones(x_shape, dtype=np.float32)
+ def _PadToBoundingBox(self, x, offset_height, offset_width,
+ target_height, target_width, use_tensor_inputs):
+ if use_tensor_inputs:
+ offset_height = ops.convert_to_tensor(offset_height)
+ offset_width = ops.convert_to_tensor(offset_width)
+ target_height = ops.convert_to_tensor(target_height)
+ target_width = ops.convert_to_tensor(target_width)
+ x_tensor = array_ops.placeholder(x.dtype, shape=[None]*x.ndim)
+ feed_dict = {x_tensor: x}
+ else:
+ x_tensor = x
+ feed_dict = {}
- target_height = x_shape[0]
- target_width = x_shape[1]
+ y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
+ target_height, target_width)
+ if not use_tensor_inputs:
+ self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session():
- x = constant_op.constant(x_np, shape=x_shape)
- y = image_ops.pad_to_bounding_box(x, 0, 0, target_height, target_width)
- y_tf = y.eval()
- self.assertAllEqual(y_tf, x_np)
-
- def testPadding(self):
- x_shape = [3, 4, 1]
- x_np = np.ones(x_shape, dtype=np.float32)
-
- offset_height = 2
- after_height = 3
-
- offset_width = 1
- after_width = 4
+ return y.eval(feed_dict=feed_dict)
+
+ def _assertReturns(self, x, x_shape, offset_height, offset_width,
+ y, y_shape, use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ target_height, target_width, _ = y_shape
+ x = np.array(x).reshape(x_shape)
+ y = np.array(y).reshape(y_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
+ target_height, target_width,
+ use_tensor_inputs)
+ self.assertAllClose(y, y_tf)
+
+ def _assertRaises(self, x, x_shape, offset_height, offset_width,
+ target_height, target_width, err_msg,
+ use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ x = np.array(x).reshape(x_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ try:
+ self._PadToBoundingBox(x, offset_height, offset_width,
+ target_height, target_width,
+ use_tensor_inputs)
+ except Exception as e:
+ if err_msg not in str(e):
+ raise
+ else:
+ raise AssertionError('Exception not raised: %s' % err_msg)
- target_height = x_shape[0] + offset_height + after_height
- target_width = x_shape[1] + offset_width + after_width
-
- # Note the padding are along batch, height, width and depth.
- paddings = ((offset_height, after_height),
- (offset_width, after_width),
- (0, 0))
+ def testNoOp(self):
+ x_shape = [10, 10, 10]
+ x = np.random.uniform(size=x_shape)
+ offset_height, offset_width = [0, 0]
- y_np = np.pad(x_np, paddings, 'constant')
+ self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
- with self.test_session():
- x = constant_op.constant(x_np, shape=x_shape)
- y = image_ops.pad_to_bounding_box(x, offset_height, offset_width,
- target_height, target_width)
- y_tf = y.eval()
- self.assertAllEqual(y_tf, y_np)
+ def testPadding(self):
+ x = [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9]
+ x_shape = [3, 3, 1]
+
+ offset_height, offset_width = [1, 0]
+ y = [0, 0, 0,
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9]
+ y_shape = [4, 3, 1]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 1]
+ y = [0, 1, 2, 3,
+ 0, 4, 5, 6,
+ 0, 7, 8, 9]
+ y_shape = [3, 4, 1]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 0]
+ y = [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+ 0, 0, 0]
+ y_shape = [4, 3, 1]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ offset_height, offset_width = [0, 0]
+ y = [1, 2, 3, 0,
+ 4, 5, 6, 0,
+ 7, 8, 9, 0]
+ y_shape = [3, 4, 1]
+ self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
+
+ def testNon3DInput(self):
+ # Input image is not 3D
+ x = [0] * 15
+ offset_height, offset_width = [0, 0]
+ target_height, target_width = [2, 2]
+
+ for x_shape in ([1, 3, 5, 1], [3, 5]):
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "'image' must be three-dimensional")
+
+ def testZeroLengthInput(self):
+ # Input image has 0-length dimension(s).
+ # Each line is a test configuration:
+ # x_shape, target_height, target_width
+ test_config = (([0, 2, 2], 2, 2),
+ ([2, 0, 2], 2, 2),
+ ([2, 2, 0], 2, 2))
+ offset_height, offset_width = [0, 0]
+ x = []
+
+ for x_shape, target_height, target_width in test_config:
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "all dims of 'image.shape' must be > 0",
+ use_tensor_inputs_options=[False])
+
+ # The orignal error message does not contain back slashes. However, they
+ # are added by either the assert op or the runtime. If this behaviour
+ # changes in the future, the match string will also needs to be changed.
+ self._assertRaises(x, x_shape, offset_height, offset_width,
+ target_height, target_width,
+ "all dims of \\'image.shape\\' must be > 0",
+ use_tensor_inputs_options=[True])
+
+ def testBadParams(self):
+ x_shape = [3, 3, 1]
+ x = np.zeros(x_shape)
+
+ # Each line is a test configuration:
+ # offset_height, offset_width, target_height, target_width, err_msg
+ test_config = ((-1, 0, 4, 4, 'offset_height must be >= 0'),
+ ( 0,-1, 4, 4, 'offset_width must be >= 0'),
+ ( 2, 0, 4, 4, 'height must be <= target - offset'),
+ ( 0, 2, 4, 4, 'width must be <= target - offset'))
+
+ for config_item in test_config:
+ self._assertRaises(x, x_shape, *config_item)
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
@@ -998,128 +1207,230 @@ class ResizeImagesTest(test_util.TensorFlowTestCase):
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
- def _ResizeImageWithCropOrPad(self, original, original_shape,
- expected, expected_shape):
- x_np = np.array(original, dtype=np.uint8).reshape(original_shape)
- y_np = np.array(expected).reshape(expected_shape)
+ def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
+ use_tensor_inputs):
+ if use_tensor_inputs:
+ target_height = ops.convert_to_tensor(target_height)
+ target_width = ops.convert_to_tensor(target_width)
+ x_tensor = array_ops.placeholder(x.dtype, shape=[None]*x.ndim)
+ feed_dict = {x_tensor: x}
+ else:
+ x_tensor = x
+ feed_dict = {}
- target_height = expected_shape[0]
- target_width = expected_shape[1]
+ y = image_ops.resize_image_with_crop_or_pad(
+ x_tensor, target_height, target_width)
+ if not use_tensor_inputs:
+ self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session():
- image = constant_op.constant(x_np, shape=original_shape)
- y = image_ops.resize_image_with_crop_or_pad(image,
- target_height,
- target_width)
- resized = y.eval()
- self.assertAllClose(resized, y_np, atol=1e-5)
+ return y.eval(feed_dict=feed_dict)
+
+ def _assertReturns(self, x, x_shape, y, y_shape,
+ use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ target_height, target_width, _ = y_shape
+ x = np.array(x).reshape(x_shape)
+ y = np.array(y).reshape(y_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
+ use_tensor_inputs)
+ self.assertAllClose(y, y_tf)
+
+ def _assertRaises(self, x, x_shape, target_height, target_width, err_msg,
+ use_tensor_inputs_options=None):
+ use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
+ x = np.array(x).reshape(x_shape)
+
+ for use_tensor_inputs in use_tensor_inputs_options:
+ try:
+ self._ResizeImageWithCropOrPad(x, target_height, target_width,
+ use_tensor_inputs)
+ except Exception as e:
+ if err_msg not in str(e):
+ raise
+ else:
+ raise AssertionError('Exception not raised: %s' % err_msg)
- def testBasic(self):
- # Basic no-op.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- original, [2, 4, 1])
+ def testNoOp(self):
+ x_shape = [10, 10, 10]
+ x = np.random.uniform(size=x_shape)
+
+ self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
- original = [1, 2, 3, 4, 5, 6, 7, 8]
- expected = [0, 1, 2, 3, 4, 0,
- 0, 5, 6, 7, 8, 0]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [2, 6, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [0, 1, 2, 3, 4, 0,
+ 0, 5, 6, 7, 8, 0]
+ y_shape = [2, 6, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
+
# Pad odd along col.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- expected = [0, 1, 2, 3, 4, 0, 0,
- 0, 5, 6, 7, 8, 0, 0]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [2, 7, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [0, 1, 2, 3, 4, 0, 0,
+ 0, 5, 6, 7, 8, 0, 0]
+ y_shape = [2, 7, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- expected = [0, 0, 0, 0,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [4, 4, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [0, 0, 0, 0,
+ 1, 2, 3, 4,
+ 5, 6, 7, 8,
+ 0, 0, 0, 0]
+ y_shape = [4, 4, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
+
# Pad odd along row.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- expected = [0, 0, 0, 0,
- 1, 2, 3, 4,
- 5, 6, 7, 8,
- 0, 0, 0, 0,
- 0, 0, 0, 0]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [5, 4, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [0, 0, 0, 0,
+ 1, 2, 3, 4,
+ 5, 6, 7, 8,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0]
+ y_shape = [5, 4, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- expected = [2, 3,
- 6, 7]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [2, 2, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [2, 3,
+ 6, 7]
+ y_shape = [2, 2, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
+
# Crop odd along col.
+ x = [1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12]
+ x_shape = [2, 6, 1]
- original = [1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12]
- expected = [2, 3, 4,
- 8, 9, 10]
- self._ResizeImageWithCropOrPad(original, [2, 6, 1],
- expected, [2, 3, 1])
+ y = [2, 3, 4,
+ 8, 9, 10]
+ y_shape = [2, 3, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
- original = [1, 2,
- 3, 4,
- 5, 6,
- 7, 8]
- expected = [3, 4,
- 5, 6]
- self._ResizeImageWithCropOrPad(original, [4, 2, 1],
- expected, [2, 2, 1])
+ x = [1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8]
+ x_shape = [4, 2, 1]
+
+ y = [3, 4,
+ 5, 6]
+ y_shape = [2, 2, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
- original = [1, 2,
- 3, 4,
- 5, 6,
- 7, 8,
- 9, 10,
- 11, 12,
- 13, 14,
- 15, 16]
- expected = [3, 4,
- 5, 6,
- 7, 8,
- 9, 10,
- 11, 12]
- self._ResizeImageWithCropOrPad(original, [8, 2, 1],
- expected, [5, 2, 1])
+ x = [1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 13, 14,
+ 15, 16]
+ x_shape = [8, 2, 1]
+
+ y = [3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12]
+ y_shape = [5, 2, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
- original = [1, 2, 3, 4,
- 5, 6, 7, 8]
- expected = [0, 0,
- 2, 3,
- 6, 7,
- 0, 0]
- self._ResizeImageWithCropOrPad(original, [2, 4, 1],
- expected, [4, 2, 1])
+ x = [1, 2, 3, 4,
+ 5, 6, 7, 8]
+ x_shape = [2, 4, 1]
+
+ y = [0, 0,
+ 2, 3,
+ 6, 7,
+ 0, 0]
+ y_shape = [4, 2, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
- original = [1, 2,
- 3, 4,
- 5, 6,
- 7, 8]
- expected = [0, 3, 4, 0,
- 0, 5, 6, 0]
- self._ResizeImageWithCropOrPad(original, [4, 2, 1],
- expected, [2, 4, 1])
+ x = [1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8]
+ x_shape = [4, 2, 1]
+
+ y = [0, 3, 4, 0,
+ 0, 5, 6, 0]
+ y_shape = [2, 4, 1]
+
+ self._assertReturns(x, x_shape, y, y_shape)
+
+ def testNon3DInput(self):
+ # Input image is not 3D
+ x = [0] * 15
+ target_height, target_width = [4, 4]
+
+ for x_shape in ([1, 3, 5, 1], [3, 5]):
+ self._assertRaises(x, x_shape, target_height, target_width,
+ "'image' must be three-dimensional")
+
+ def testZeroLengthInput(self):
+ # Input image has 0-length dimension(s).
+ target_height, target_width = [1, 1]
+ x = []
+
+ for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
+ self._assertRaises(x, x_shape, target_height, target_width,
+ "all dims of 'image.shape' must be > 0",
+ use_tensor_inputs_options=[False])
+
+ # The orignal error message does not contain back slashes. However, they
+ # are added by either the assert op or the runtime. If this behaviour
+ # changes in the future, the match string will also needs to be changed.
+ self._assertRaises(x, x_shape, target_height, target_width,
+ "all dims of \\'image.shape\\' must be > 0",
+ use_tensor_inputs_options=[True])
+
+ def testBadParams(self):
+ x_shape = [4, 4, 1]
+ x = np.zeros(x_shape)
+
+ # target_height <= 0
+ target_height, target_width = [0, 5]
+ self._assertRaises(x, x_shape, target_height, target_width,
+ 'target_height must be > 0')
+
+ # target_width <= 0
+ target_height, target_width = [5, 0]
+ self._assertRaises(x, x_shape, target_height, target_width,
+ 'target_width must be > 0')
def _SimpleColorRamp():
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index d27cefc61d..0a76450c5b 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -1548,17 +1548,20 @@ def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
- x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
- or `qint32`.
+ x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
+ `complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
- A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
- the return type is `quint8`.
+ A Tensor or SparseTensor respectively with the same type as `x` if
+ `x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
- x = ops.convert_to_tensor(x, name="x")
- return gen_math_ops._tanh(x, name=name)
+ if isinstance(x, ops.SparseTensor):
+ x_tanh = gen_math_ops._tanh(x.values, name=name)
+ return ops.SparseTensor(indices=x.indices, values=x_tanh, shape=x.shape)
+ else:
+ return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
diff --git a/tensorflow/python/ops/nn.py b/tensorflow/python/ops/nn.py
index f798160111..79030645f2 100644
--- a/tensorflow/python/ops/nn.py
+++ b/tensorflow/python/ops/nn.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# ==============================================================================
+# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""## Activation Functions
diff --git a/tensorflow/python/ops/nn_conv_test.py b/tensorflow/python/ops/nn_conv_test.py
index 8c771fc2a3..3d3425292c 100644
--- a/tensorflow/python/ops/nn_conv_test.py
+++ b/tensorflow/python/ops/nn_conv_test.py
@@ -319,6 +319,195 @@ class Conv2DBackpropFilterGradTest(tf.test.TestCase):
self.assertLess(err, err_tolerance)
+class Conv3DTransposeTest(tf.test.TestCase):
+
+ def testConv3DTransposeSingleStride(self):
+ with self.test_session():
+ strides = [1, 1, 1, 1, 1]
+
+ # Input, output: [batch, depth, height, width, channel]
+ x_shape = [2, 5, 6, 4, 3]
+ y_shape = [2, 5, 6, 4, 2]
+
+ # Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
+ f_shape = [3, 3, 3, 2, 3]
+
+ x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
+ f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
+ output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
+ padding="SAME")
+ value = output.eval()
+
+ # We count the number of cells being added at the locations in the output.
+ # At the center, #cells = kernel_depth * kernel_height * kernel_width
+ # At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
+ # * ceil(kernel_width/2)
+ # At the edges, #cells =
+ # kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
+ # ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
+ # ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
+ # At the borders, #cells =
+ # ceil(kernel_depth/2) * kernel_height * kernel_width or
+ # kernel_depth * ceil(kernel_height/2) * kernel_width or
+ # kernel_depth * kernel_height * ceil(kernel_width/2)
+
+ for n in xrange(x_shape[0]):
+ for k in xrange(f_shape[3]):
+ for w in xrange(y_shape[3]):
+ for h in xrange(y_shape[2]):
+ for d in xrange(y_shape[1]):
+ d_in = d > 0 and d < y_shape[1] - 1
+ h_in = h > 0 and h < y_shape[2] - 1
+ w_in = w > 0 and w < y_shape[3] - 1
+ if d_in + h_in + w_in == 3:
+ target = 27 * 3.0
+ elif d_in + h_in + w_in == 2:
+ target = 18 * 3.0
+ elif d_in or h_in or w_in:
+ target = 12 * 3.0
+ else:
+ target = 8 * 3.0
+ self.assertAllClose(target, value[n, d, h, w, k])
+
+ def testConv3DTransposeSame(self):
+ with self.test_session():
+ strides = [1, 2, 2, 2, 1]
+
+ # Input, output: [batch, depth, height, width, depth]
+ x_shape = [2, 5, 6, 4, 3]
+ y_shape = [2, 10, 12, 8, 2]
+
+ # Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
+ f_shape = [3, 3, 3, 2, 3]
+
+ x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
+ f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
+ output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
+ padding="SAME")
+ value = output.eval()
+
+ for n in xrange(x_shape[0]):
+ for k in xrange(f_shape[3]):
+ for w in xrange(y_shape[3]):
+ for h in xrange(y_shape[2]):
+ for d in xrange(y_shape[1]):
+ # We add a case for locations divisible by the stride.
+ d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
+ h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
+ w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
+ if d_in + h_in + w_in == 3:
+ target = 8 * 3.0
+ elif d_in + h_in + w_in == 2:
+ target = 4 * 3.0
+ elif d_in or h_in or w_in:
+ target = 2 * 3.0
+ else:
+ target = 3.0
+ self.assertAllClose(target, value[n, d, h, w, k])
+
+ def testConv3DTransposeValid(self):
+ with self.test_session():
+ strides = [1, 2, 2, 2, 1]
+
+ # Input, output: [batch, depth, height, width, depth]
+ x_shape = [2, 5, 6, 4, 3]
+ y_shape = [2, 11, 13, 9, 2]
+
+ # Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
+ f_shape = [3, 3, 3, 2, 3]
+
+ x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
+ f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
+ output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
+ padding="VALID")
+ value = output.eval()
+
+ cache_values = np.zeros(y_shape, dtype=np.float32)
+
+ # The amount of padding added
+ pad = 1
+
+ for n in xrange(x_shape[0]):
+ for k in xrange(f_shape[3]):
+ for w in xrange(y_shape[3]):
+ for h in xrange(y_shape[2]):
+ for d in xrange(y_shape[1]):
+ # We add a case for locations divisible by the stride.
+ d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
+ h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
+ w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
+ if d_in + h_in + w_in == 3:
+ target = 8 * 3.0
+ elif d_in + h_in + w_in == 2:
+ target = 4 * 3.0
+ elif d_in or h_in or w_in:
+ target = 2 * 3.0
+ else:
+ target = 3.0
+ cache_values[n, d, h, w, k] = target
+
+ # copy values in the border
+ cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
+ cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
+ cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
+ cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
+ cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
+ cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
+
+ self.assertAllClose(cache_values, value)
+
+ def testGradient(self):
+ x_shape = [2, 3, 4, 3, 2]
+ f_shape = [3, 3, 3, 2, 2]
+ y_shape = [2, 6, 8, 6, 2]
+ strides = [1, 2, 2, 2, 1]
+ np.random.seed(1) # Make it reproducible.
+ x_val = np.random.random_sample(x_shape).astype(np.float64)
+ f_val = np.random.random_sample(f_shape).astype(np.float64)
+ with self.test_session():
+ x = tf.constant(x_val, name="x", dtype=tf.float32)
+ f = tf.constant(f_val, name="f", dtype=tf.float32)
+ output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
+ padding="SAME")
+ err = tf.test.compute_gradient_error(
+ [x, f], [x_shape, f_shape], output, y_shape)
+ print("conv3d_transpose gradient err = %g " % err)
+ err_tolerance = 0.0005
+ self.assertLess(err, err_tolerance)
+
+
+class Conv3DBackpropFilterV2GradTest(tf.test.TestCase):
+
+ def testGradient(self):
+ with self.test_session():
+ for padding in ["SAME", "VALID"]:
+ for stride in [1, 2]:
+ np.random.seed(1)
+ in_shape = [2, 4, 3, 3, 2]
+ in_val = tf.constant(
+ 2 * np.random.random_sample(in_shape) - 1,
+ dtype=tf.float32)
+ filter_shape = [3, 3, 3, 2, 3]
+ strides = [1, stride, stride, stride, 1]
+ # Make a convolution op with the current settings, just to easily get
+ # the shape of the output.
+ conv_out = tf.nn.conv3d(in_val, tf.zeros(filter_shape), strides,
+ padding)
+ out_backprop_shape = conv_out.get_shape().as_list()
+ out_backprop_val = tf.constant(
+ 2 * np.random.random_sample(out_backprop_shape) - 1,
+ dtype=tf.float32)
+ output = tf.nn.conv3d_backprop_filter_v2(in_val, filter_shape,
+ out_backprop_val,
+ strides, padding)
+ err = tf.test.compute_gradient_error([in_val, out_backprop_val],
+ [in_shape, out_backprop_shape],
+ output, filter_shape)
+ print("conv3d_backprop_filter gradient err = %g " % err)
+ err_tolerance = 1e-3
+ self.assertLess(err, err_tolerance)
+
+
class Conv1DTest(tf.test.TestCase):
def testBasic(self):
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py
index 3f4bb0e068..a396f06eba 100644
--- a/tensorflow/python/ops/nn_grad.py
+++ b/tensorflow/python/ops/nn_grad.py
@@ -82,6 +82,34 @@ def _Conv3DGrad(op, grad):
padding=op.get_attr("padding"))]
+@ops.RegisterGradient("Conv3DBackpropInputV2")
+def _Conv3DBackpropInputGrad(op, grad):
+ return [None,
+ nn_ops.conv3d_backprop_filter_v2(grad,
+ array_ops.shape(op.inputs[1]),
+ op.inputs[2],
+ strides=op.get_attr("strides"),
+ padding=op.get_attr("padding")),
+ nn_ops.conv3d(grad,
+ op.inputs[1],
+ strides=op.get_attr("strides"),
+ padding=op.get_attr("padding"))]
+
+
+@ops.RegisterGradient("Conv3DBackpropFilterV2")
+def _Conv3DBackpropFilterGrad(op, grad):
+ return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
+ grad,
+ op.inputs[2],
+ strides=op.get_attr("strides"),
+ padding=op.get_attr("padding")),
+ None,
+ nn_ops.conv3d(op.inputs[0],
+ grad,
+ strides=op.get_attr("strides"),
+ padding=op.get_attr("padding"))]
+
+
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return nn_ops.avg_pool3d_grad(
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index d17536daeb..486ef6efb9 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -297,6 +297,73 @@ def conv2d_transpose(value,
name=name)
+def conv3d_transpose(value,
+ filter,
+ output_shape,
+ strides,
+ padding="SAME",
+ name=None):
+ """The transpose of `conv3d`.
+
+ This operation is sometimes called "deconvolution" after [Deconvolutional
+ Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
+ actually the transpose (gradient) of `conv3d` rather than an actual
+ deconvolution.
+
+ Args:
+ value: A 5-D `Tensor` of type `float` and shape
+ `[batch, depth, height, width, in_channels]`.
+ filter: A 5-D `Tensor` with the same type as `value` and shape
+ `[depth, height, width, output_channels, in_channels]`. `filter`'s
+ `in_channels` dimension must match that of `value`.
+ output_shape: A 1-D `Tensor` representing the output shape of the
+ deconvolution op.
+ strides: A list of ints. The stride of the sliding window for each
+ dimension of the input tensor.
+ padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
+ See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
+ name: Optional name for the returned tensor.
+
+ Returns:
+ A `Tensor` with the same type as `value`.
+
+ Raises:
+ ValueError: If input/output depth does not match `filter`'s shape, or if
+ padding is other than `'VALID'` or `'SAME'`.
+ """
+ with ops.op_scope([value, filter, output_shape], name,
+ "conv3d_transpose") as name:
+ value = ops.convert_to_tensor(value, name="value")
+ filter = ops.convert_to_tensor(filter, name="filter")
+ if not value.get_shape()[4].is_compatible_with(filter.get_shape()[4]):
+ raise ValueError("input channels does not match filter's input channels, "
+ "{} != {}".format(value.get_shape()[4], filter.get_shape(
+ )[4]))
+
+ output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
+ if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
+ raise ValueError("output_shape must have shape (5,), got {}"
+ .format(output_shape_.get_shape()))
+
+ if isinstance(output_shape, (list, np.ndarray)):
+ # output_shape's shape should be == [5] if reached this point.
+ if not filter.get_shape()[3].is_compatible_with(output_shape[4]):
+ raise ValueError(
+ "output_shape does not match filter's output channels, "
+ "{} != {}".format(output_shape[4], filter.get_shape()[3]))
+
+ if padding != "VALID" and padding != "SAME":
+ raise ValueError("padding must be either VALID or SAME:"
+ " {}".format(padding))
+
+ return gen_nn_ops.conv3d_backprop_input_v2(input_sizes=output_shape_,
+ filter=filter,
+ out_backprop=value,
+ strides=strides,
+ padding=padding,
+ name=name)
+
+
# pylint: disable=protected-access
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index 0ad22b2e4d..8917a38a56 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -535,6 +535,125 @@ def bidirectional_rnn(cell_fw, cell_bw, inputs,
return (outputs, output_state_fw, output_state_bw)
+def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
+ initial_state_fw=None, initial_state_bw=None,
+ dtype=None, parallel_iterations=None,
+ swap_memory=False, time_major=False, scope=None):
+ """Creates a dynamic version of bidirectional recurrent neural network.
+
+ Similar to the unidirectional case above (rnn) but takes input and builds
+ independent forward and backward RNNs. The input_size of forward and
+ backward cell must match. The initial state for both directions is zero by
+ default (but can be set optionally) and no intermediate states are ever
+ returned -- the network is fully unrolled for the given (passed in)
+ length(s) of the sequence(s) or completely unrolled if length(s) is not
+ given.
+
+ Args:
+ cell_fw: An instance of RNNCell, to be used for forward direction.
+ cell_bw: An instance of RNNCell, to be used for backward direction.
+ inputs: The RNN inputs.
+ If time_major == False (default), this must be a tensor of shape:
+ `[batch_size, max_time, input_size]`.
+ If time_major == True, this must be a tensor of shape:
+ `[max_time, batch_size, input_size]`.
+ [batch_size, input_size].
+ sequence_length: An int32/int64 vector, size `[batch_size]`,
+ containing the actual lengths for each of the sequences.
+ initial_state_fw: (optional) An initial state for the forward RNN.
+ This must be a tensor of appropriate type and shape
+ `[batch_size x cell_fw.state_size]`.
+ If `cell_fw.state_size` is a tuple, this should be a tuple of
+ tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
+ initial_state_bw: (optional) Same as for `initial_state_fw`, but using
+ the corresponding properties of `cell_bw`.
+ parallel_iterations: (Default: 32). The number of iterations to run in
+ parallel. Those operations which do not have any temporal dependency
+ and can be run in parallel, will be. This parameter trades off
+ time for space. Values >> 1 use more memory but take less time,
+ while smaller values use less memory but computations take longer.
+ swap_memory: Transparently swap the tensors produced in forward inference
+ but needed for back prop from GPU to CPU. This allows training RNNs
+ which would typically not fit on a single GPU, with very minimal (or no)
+ performance penalty.
+ time_major: The shape format of the `inputs` and `outputs` Tensors.
+ If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
+ If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
+ Using `time_major = True` is a bit more efficient because it avoids
+ transposes at the beginning and end of the RNN calculation. However,
+ most TensorFlow data is batch-major, so by default this function
+ accepts input and emits output in batch-major form.
+ dtype: (optional) The data type for the initial state. Required if
+ initial_state is not provided.
+ sequence_length: An int32/int64 vector, size `[batch_size]`,
+ containing the actual lengths for each of the sequences.
+ either of the initial states are not provided.
+ scope: VariableScope for the created subgraph; defaults to "BiRNN"
+
+ Returns:
+ A tuple (outputs, output_states) where:
+ outputs: A tuple (output_fw, output_bw) containing the forward and
+ the backward rnn output `Tensor`.
+ If time_major == False (default),
+ output_fw will be a `Tensor` shaped:
+ `[batch_size, max_time, cell_fw.output_size]`
+ and output_bw will be a `Tensor` shaped:
+ `[batch_size, max_time, cell_bw.output_size]`.
+ If time_major == True,
+ output_fw will be a `Tensor` shaped:
+ `[max_time, batch_size, cell_fw.output_size]`
+ and output_bw will be a `Tensor` shaped:
+ `[max_time, batch_size, cell_bw.output_size]`.
+ It returns a tuple instead of a single concatenated `Tensor`, unlike
+ in the `bidirectional_rnn`. If the concatenated one is preferred,
+ the forward and backward outputs can be concatenated as
+ `tf.concat(2, outputs)`.
+ output_states: A tuple (output_state_fw, output_state_bw) containing
+ the forward and the backward final states of bidirectional rnn.
+
+ Raises:
+ TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
+ """
+
+ if not isinstance(cell_fw, rnn_cell.RNNCell):
+ raise TypeError("cell_fw must be an instance of RNNCell")
+ if not isinstance(cell_bw, rnn_cell.RNNCell):
+ raise TypeError("cell_bw must be an instance of RNNCell")
+
+ name = scope or "BiRNN"
+ # Forward direction
+ with vs.variable_scope(name + "_FW") as fw_scope:
+ output_fw, output_state_fw = dynamic_rnn(
+ cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
+ initial_state=initial_state_fw, dtype=dtype,
+ parallel_iterations=parallel_iterations, swap_memory=swap_memory,
+ time_major=time_major, scope=fw_scope)
+ # Backward direction
+ if not time_major:
+ time_dim = 1
+ batch_dim = 0
+ else:
+ time_dim = 0
+ batch_dim = 1
+ with vs.variable_scope(name + "_BW") as bw_scope:
+ inputs_reverse = array_ops.reverse_sequence(
+ input=inputs, seq_lengths=sequence_length,
+ seq_dim=time_dim, batch_dim=batch_dim)
+ tmp, output_state_bw = dynamic_rnn(
+ cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
+ initial_state=initial_state_bw, dtype=dtype,
+ parallel_iterations=parallel_iterations, swap_memory=swap_memory,
+ time_major=time_major, scope=bw_scope)
+ output_bw = array_ops.reverse_sequence(
+ input=tmp, seq_lengths=sequence_length,
+ seq_dim = time_dim, batch_dim=batch_dim)
+
+ outputs = (output_fw, output_bw)
+ output_states = (output_state_fw, output_state_bw)
+
+ return (outputs, output_states)
+
+
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index 3aeceaac12..5a42472a29 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# ==============================================================================
+# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
diff --git a/tensorflow/tensorboard/README.md b/tensorflow/tensorboard/README.md
index b95a4177bc..c3c8ade6c6 100644
--- a/tensorflow/tensorboard/README.md
+++ b/tensorflow/tensorboard/README.md
@@ -54,18 +54,18 @@ work, but there may be bugs or performance issues.
The first step in using TensorBoard is acquiring data from your TensorFlow run.
For this, you need [summary
-ops](https://www.tensorflow.org/versions/r0.8/api_docs/python/train.html#summary-operations).
+ops](https://www.tensorflow.org/versions/r0.9/api_docs/python/train.html#summary-operations).
Summary ops are ops, like
-[`tf.matmul`](https://www.tensorflow.org/versions/r0.8/api_docs/python/math_ops.html#matmul)
+[`tf.matmul`](https://www.tensorflow.org/versions/r0.9/api_docs/python/math_ops.html#matmul)
or
-[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#relu),
+[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.9/api_docs/python/nn.html#relu),
which means they take in tensors, produce tensors, and are evaluated from within
a TensorFlow graph. However, summary ops have a twist: the Tensors they produce
contain serialized protobufs, which are written to disk and sent to TensorBoard.
To visualize the summary data in TensorBoard, you should evaluate the summary
op, retrieve the result, and then write that result to disk using a
SummaryWriter. A full explanation, with examples, is in [the
-tutorial](https://www.tensorflow.org/versions/r0.8/how_tos/summaries_and_tensorboard/index.html).
+tutorial](https://www.tensorflow.org/versions/r0.9/how_tos/summaries_and_tensorboard/index.html).
### Tags: Giving names to data
@@ -178,7 +178,7 @@ TensorFlow model. To get best use of the graph visualizer, you should use name
scopes to hierarchically group the ops in your graph - otherwise, the graph may
be difficult to decipher. For more information, including examples, see [the
graph visualizer
-tutorial](https://www.tensorflow.org/versions/r0.8/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
+tutorial](https://www.tensorflow.org/versions/r0.9/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
# Frequently Asked Questions
diff --git a/tensorflow/tools/ci_build/builds/test_installation.sh b/tensorflow/tools/ci_build/builds/test_installation.sh
index 173e3b254e..6fb40c7b65 100755
--- a/tensorflow/tools/ci_build/builds/test_installation.sh
+++ b/tensorflow/tools/ci_build/builds/test_installation.sh
@@ -89,6 +89,7 @@ PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:"\
"tensorflow/contrib/quantization/python/dequantize_op_test.py:"\
"tensorflow/contrib/quantization/python/quantized_conv_ops_test.py:"\
"tensorflow/contrib/quantization/tools/quantize_graph_test.py:"\
+"tensorflow/contrib/session_bundle/exporter_test.py:"\
"tensorflow/python/platform/default/_resource_loader_test.py:"\
"tensorflow/python/platform/default/flags_test.py:"\
"tensorflow/python/platform/default/logging_test.py:"\
diff --git a/tensorflow/tools/dist_test/Dockerfile b/tensorflow/tools/dist_test/Dockerfile
index 5705767c7d..66787ca7f8 100644
--- a/tensorflow/tools/dist_test/Dockerfile
+++ b/tensorflow/tools/dist_test/Dockerfile
@@ -20,7 +20,7 @@ RUN /var/gcloud/google-cloud-sdk/bin/gcloud components install kubectl
# Install nightly TensorFlow pip
# TODO(cais): Should we build it locally instead?
RUN pip install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
# Copy test files
COPY scripts /var/tf-dist-test/scripts
diff --git a/tensorflow/tools/dist_test/server/Dockerfile b/tensorflow/tools/dist_test/server/Dockerfile
index 4cbc2f0645..c3bf751735 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile
+++ b/tensorflow/tools/dist_test/server/Dockerfile
@@ -36,7 +36,7 @@ RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
# Install TensorFlow CPU version from nightly build
RUN pip --no-cache-dir install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/dist_test/server/Dockerfile.test b/tensorflow/tools/dist_test/server/Dockerfile.test
index af795e8066..de4411a05c 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile.test
+++ b/tensorflow/tools/dist_test/server/Dockerfile.test
@@ -42,7 +42,7 @@ RUN pip install --upgrade pandas==0.18.1
# Install TensorFlow CPU version.
RUN pip --no-cache-dir install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index a5927e79a5..31c3cd4d30 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.8.0
+ENV TENSORFLOW_VERSION 0.9.0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 4dd97a6f20..db91720cd9 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.8.0
+ENV TENSORFLOW_VERSION 0.9.0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
index 35bd03c62d..921d4e5353 100644
--- a/tensorflow/tools/docker/README.md
+++ b/tensorflow/tools/docker/README.md
@@ -31,7 +31,7 @@ Run non-GPU container using
$ docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow
-For GPU support install Nvidia drivers (ideally latest) and
+For GPU support install NVidia drivers (ideally latest) and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run using
$ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
@@ -46,6 +46,12 @@ it there please and try using the nvidia-docker as described above.
$ docker run -it -p 8888:8888 $CUDA_SO $DEVICES gcr.io/tensorflow/tensorflow:latest-gpu
+## More containers
+
+See all available [tags](https://hub.docker.com/r/tensorflow/tensorflow/tags/)
+for additional containers like release candidates or nighlty builds.
+
+
## Rebuilding the containers
Just pick the dockerfile corresponding to the container you want to build, and run
diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh
deleted file mode 100755
index 08f391ddf9..0000000000
--- a/tensorflow/tools/docker/docker_run_gpu.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-set -e
-
-export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | \
- xargs -I{} echo '-v {}:{}')
-export DEVICES=$(\ls /dev/nvidia* | \
- xargs -I{} echo '--device {}:{}')
-
-if [[ "${DEVICES}" = "" ]]; then
- echo "Failed to locate NVidia device(s). Did you want the non-GPU container?"
- exit 1
-fi
-
-docker run -it $CUDA_SO $DEVICES "$@"
diff --git a/tensorflow/tools/docker/parameterized_docker_build.sh b/tensorflow/tools/docker/parameterized_docker_build.sh
index dd4680bc0d..56770bed64 100644..100755
--- a/tensorflow/tools/docker/parameterized_docker_build.sh
+++ b/tensorflow/tools/docker/parameterized_docker_build.sh
@@ -179,6 +179,9 @@ if [[ "${DO_PIP_BUILD}" == "1" ]]; then
export TF_BUILD_IS_OPT="OPT"
export TF_BUILD_IS_PIP="PIP"
+ export TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
+"-e TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2"
+
pushd "${SCRIPT_DIR}/../../../"
rm -rf pip_test/whl &&
tensorflow/tools/ci_build/ci_parameterized_build.sh
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index cf83a767e7..ae63181258 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -30,9 +30,9 @@ sh_binary(
":other_headers",
":simple_console",
"//tensorflow:tensorflow_py",
- "//tensorflow/contrib/session_bundle:all_files",
- "//tensorflow/contrib/session_bundle:manifest_proto_py",
- "//tensorflow/contrib/session_bundle/example:half_plus_two",
+# "//tensorflow/contrib/session_bundle:all_files",
+# "//tensorflow/contrib/session_bundle:manifest_proto_py",
+# "//tensorflow/contrib/session_bundle/example:half_plus_two",
"//tensorflow/contrib/slim:all_files",
"//tensorflow/contrib/slim/python/slim/data:all_files",
"//tensorflow/core:framework_headers",
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 07b10de08c..59cb647e45 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -27,7 +27,7 @@ from setuptools import find_packages, setup, Command, Extension
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.8.0'
+_VERSION = '0.9.0'
numpy_version = "1.8.2"
if platform.system() == "Darwin":
diff --git a/tensorflow/tools/proto_text/BUILD b/tensorflow/tools/proto_text/BUILD
index 19f14748c5..980a6d651e 100644
--- a/tensorflow/tools/proto_text/BUILD
+++ b/tensorflow/tools/proto_text/BUILD
@@ -42,6 +42,13 @@ cc_library(
name = "gen_proto_text_functions_lib",
srcs = ["gen_proto_text_functions_lib.cc"],
hdrs = ["gen_proto_text_functions_lib.h"],
+ linkopts = [
+ "-lm",
+ "-lpthread",
+ ] + select({
+ "//tensorflow:darwin": [],
+ "//conditions:default": ["-lrt"],
+ }),
deps = [
"//tensorflow/core:lib",
],
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 49f37ed1ab..74026bbcd7 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -143,13 +143,13 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
native.new_git_repository(
name = "boringssl_git",
commit = "e72df93461c6d9d2b5698f10e16d3ab82f5adde3",
- remote = "https://boringssl.googlesource.com/boringssl",
+ remote = "https://github.com/google/boringssl.git",
build_file = path_prefix + "boringssl.BUILD",
)
native.bind(
name = "boringssl_err_data_c",
- actual = "@//" + path_prefix + "third_party/boringssl:err_data_c",
+ actual = tf_repo_name + "//third_party/boringssl:err_data_c",
)
native.new_git_repository(
diff --git a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc
index 7e7633192c..071997ca44 100755
--- a/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc
+++ b/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,6 +34,8 @@ NOTES:
//third_party/gpus/crosstool/v*/*/clang/bin/crosstool_wrapper_is_not_gcc
"""
+from __future__ import print_function
+
__author__ = 'keveman@google.com (Manjunath Kudlur)'
from argparse import ArgumentParser
@@ -54,7 +56,7 @@ LLVM_HOST_COMPILER_PATH = ('/usr/bin/gcc')
PREFIX_DIR = os.path.dirname(GCC_HOST_COMPILER_PATH)
def Log(s):
- print 'gpus/crosstool: {0}'.format(s)
+ print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):