aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-01-02 22:19:48 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-02 22:27:38 -0800
commit46d2c286045f4d2616d348cbaaea7fd52fadfe8b (patch)
treef8c1b4cd1ea1f17e7a80b6f969555ee3b0b65837
parent143556267343e8f3f352b0ddba5272fc30b80078 (diff)
Merge changes from github.
Change: 143412147
-rw-r--r--README.md8
-rw-r--r--RELEASE.md3
-rwxr-xr-xconfigure2
-rw-r--r--tensorflow/contrib/cmake/setup.py2
-rw-r--r--tensorflow/contrib/ios_examples/README.md2
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/mnist.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/datasets/text_datasets.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator.py1
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions.py13
-rw-r--r--tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc2
-rw-r--r--tensorflow/core/kernels/BUILD8
-rw-r--r--tensorflow/examples/udacity/1_notmnist.ipynb2
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnv.md6
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassPartialTensorShape.md6
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassPartialTensorShapeUtils.md6
-rw-r--r--tensorflow/g3doc/api_docs/python/array_ops.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md244
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md17
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md49
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md237
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md521
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md22
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md26
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md27
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md207
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md37
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md4
-rw-r--r--tensorflow/g3doc/api_docs/python/summary.md481
-rw-r--r--tensorflow/g3doc/api_docs/python/test.md521
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md84
-rw-r--r--tensorflow/g3doc/tutorials/mnist/beginners/index.md14
-rw-r--r--tensorflow/java/README.md23
-rw-r--r--tensorflow/java/pom.xml44
-rw-r--r--tensorflow/tools/ci_build/builds/android_nightly.sh20
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh8
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh41
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--tensorflow/workspace.bzl2
-rw-r--r--third_party/common.bzl43
-rw-r--r--third_party/jpeg/BUILD1
-rw-r--r--third_party/jpeg/jpeg.BUILD142
-rw-r--r--third_party/llvm/llvm.BUILD21
-rw-r--r--third_party/llvm/llvm.bzl42
47 files changed, 1751 insertions, 1212 deletions
diff --git a/README.md b/README.md
index fd61605b0e..a030e94003 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.12.1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-0.12.1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/RELEASE.md b/RELEASE.md
index a8bbc7064b..29b6cba6ba 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -139,6 +139,9 @@
* `tf.all_variables`, `tf.VARIABLES` and `tf.initialize_all_variables` renamed
to `tf.global_variables`, `tf.GLOBAL_VARIABLES` and
`tf.global_variables_initializer` respectively.
+* `tf.zeros_initializer()` and `tf.ones_initializer()` now return a callable
+ that must be called with initializer arguments, in your code replace
+ tf.zeros_initializer with tf.zeros_initializer()
## Bug Fixes and Other Changes
diff --git a/configure b/configure
index c2401252dd..ee1967f937 100755
--- a/configure
+++ b/configure
@@ -4,7 +4,7 @@ set -e
set -o pipefail
# Find out the absolute path to where ./configure resides
-pushd `dirname $0` #> /dev/null
+pushd `dirname $0` > /dev/null
SOURCE_BASE_DIR=`pwd -P`
popd > /dev/null
diff --git a/tensorflow/contrib/cmake/setup.py b/tensorflow/contrib/cmake/setup.py
index 12dea2ea4f..25e54c964a 100644
--- a/tensorflow/contrib/cmake/setup.py
+++ b/tensorflow/contrib/cmake/setup.py
@@ -26,7 +26,7 @@ from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.12.0-cmake-experimental'
+_VERSION = '0.12.1-cmake-experimental'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/contrib/ios_examples/README.md b/tensorflow/contrib/ios_examples/README.md
index a802007c5d..00c13d9c7e 100644
--- a/tensorflow/contrib/ios_examples/README.md
+++ b/tensorflow/contrib/ios_examples/README.md
@@ -10,7 +10,7 @@ This folder contains examples of how to build applications for iOS devices using
[tensorflow/contrib/makefile](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/makefile)
under "iOS" to compile a static library containing the core TensorFlow code.
- - Download
+ - From the root of the Tensorflow folder, download
[Inception v1](https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip),
and extract the label and graph files into the data folders inside both the
simple and camera examples:
diff --git a/tensorflow/contrib/learn/python/learn/datasets/mnist.py b/tensorflow/contrib/learn/python/learn/datasets/mnist.py
index 08068da182..f11e40e045 100644
--- a/tensorflow/contrib/learn/python/learn/datasets/mnist.py
+++ b/tensorflow/contrib/learn/python/learn/datasets/mnist.py
@@ -42,7 +42,7 @@ def extract_images(f):
f: A file object that can be passed into a gzip reader.
Returns:
- data: A 4D unit8 numpy array [index, y, x, depth].
+ data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
@@ -81,7 +81,7 @@ def extract_labels(f, one_hot=False, num_classes=10):
num_classes: Number of classes for the one hot encoding.
Returns:
- labels: a 1D unit8 numpy array.
+ labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
diff --git a/tensorflow/contrib/learn/python/learn/datasets/text_datasets.py b/tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
index eb0c8546ff..79201759e6 100644
--- a/tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
+++ b/tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
@@ -27,7 +27,7 @@ import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
-DBPEDIA_URL = 'https://googledrive.com/host/0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M/dbpedia_csv.tar.gz'
+DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/blob/master/dbpedia_csv.tar.gz'
def maybe_download_dbpedia(data_dir):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator.py b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
index 726f83dacb..e4b3a6c193 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
@@ -749,6 +749,7 @@ class BaseEstimator(
supervisor_save_model_steps=self._config.save_checkpoints_steps,
supervisor_save_summaries_steps=self._config.save_summary_steps,
keep_checkpoint_max=self._config.keep_checkpoint_max,
+ keep_checkpoint_every_n_hours=self._config.keep_checkpoint_every_n_hours,
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions.py b/tensorflow/contrib/learn/python/learn/graph_actions.py
index 43e99a2f17..5cae1e9a4a 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions.py
@@ -128,6 +128,7 @@ def _monitored_train(graph,
supervisor_save_model_secs=600,
supervisor_save_model_steps=None,
keep_checkpoint_max=5,
+ keep_checkpoint_every_n_hours=10000.0,
supervisor_save_summaries_secs=None,
supervisor_save_summaries_steps=100,
feed_fn=None,
@@ -176,6 +177,13 @@ def _monitored_train(graph,
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to `tf.Saver` constructor.
+ keep_checkpoint_every_n_hours: In addition to keeping the most recent
+ `keep_checkpoint_max` checkpoint files, you might want to keep one checkpoint file
+ for every N hours of training. This can be useful if you want to later
+ analyze how a model progressed during a long training session. For
+ example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
+ one checkpoint file for every 2 hours of training. The default value of
+ 10,000 hours effectively disables the feature.
supervisor_save_summaries_secs: Save summaries every
`supervisor_save_summaries_secs` seconds when training.
supervisor_save_summaries_steps: Save summaries every
@@ -248,7 +256,10 @@ def _monitored_train(graph,
def make_saver():
return tf_saver.Saver(
- sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True)
+ sharded=True,
+ max_to_keep=keep_checkpoint_max,
+ keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
+ defer_build=True)
scaffold = monitored_session.Scaffold(
init_op=init_op,
diff --git a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
index ecd272a5be..729aa36803 100644
--- a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
+++ b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
@@ -92,7 +92,7 @@ class RpcRecvTensorCall : public BaseRecvTensorCall {
// RpcRecvTensorCall, and it always sets this->wi_ to null when
// a call object is released to it, we can assert that this->wi_ is
// always null at the point of deletion.
- CHECK_EQ(nullptr, wi_)
+ CHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall destructor.";
}
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index 15d64ea4cc..fd7265e4ed 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -25,7 +25,6 @@ package_group(
load(
"//tensorflow:tensorflow.bzl",
- "if_not_windows",
"tf_cc_test",
"tf_cc_tests",
"tf_copts",
@@ -2390,6 +2389,8 @@ cc_library(
":batch_norm_op",
":bias_op",
":conv_ops",
+ ":depthwise_conv_grad_op",
+ ":depthwise_conv_op",
":dilation_ops",
":fused_batch_norm_op",
":in_topk_op",
@@ -2401,10 +2402,7 @@ cc_library(
":softsign_op",
":topk_op",
":xent_op",
- ] + if_not_windows([
- ":depthwise_conv_grad_op",
- ":depthwise_conv_op",
- ]),
+ ],
)
NN_DEPS = [
diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb
index b60d4a9034..c9ec86f71a 100644
--- a/tensorflow/examples/udacity/1_notmnist.ipynb
+++ b/tensorflow/examples/udacity/1_notmnist.ipynb
@@ -114,7 +114,7 @@
"\n",
"def download_progress_hook(count, blockSize, totalSize):\n",
" \"\"\"A hook to report the progress of a download. This is mostly intended for users with\n",
- " slow internet connections. Reports every 1% change in download progress.\n",
+ " slow internet connections. Reports every 5% change in download progress.\n",
" \"\"\"\n",
" global last_percent_reported\n",
" percent = int(count * blockSize * 100 / totalSize)\n",
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnv.md b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
index 9f554aa3aa..43f75fefb9 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnv.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
@@ -184,6 +184,12 @@ Renames file src to target. If target already exists, it will be replaced.
+#### `string tensorflow::Env::GetExecutablePath()` {#string_tensorflow_Env_GetExecutablePath}
+
+Returns the absolute path of the current executable. It resolves symlinks if there is any.
+
+
+
#### `virtual uint64 tensorflow::Env::NowMicros()=0` {#virtual_uint64_tensorflow_Env_NowMicros}
Returns the number of micro-seconds since the Unix epoch.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShape.md b/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShape.md
index 5db2760bbc..ac2c26093d 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShape.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShape.md
@@ -60,6 +60,12 @@ Return true iff the rank and all of the dimensions are well defined.
+#### `bool tensorflow::PartialTensorShape::IsIdenticalTo(const PartialTensorShape &shape) const` {#bool_tensorflow_PartialTensorShape_IsIdenticalTo}
+
+
+
+Exact equality test. Returns true iff the ranks match (i.e., both are unknown, or both are known and equal), and all dimensions are equal (i.e., both dimensions are known, or both are known and equal). This is a stronger condition that IsCompatibleWith.
+
#### `bool tensorflow::PartialTensorShape::IsCompatibleWith(const PartialTensorShape &shape) const` {#bool_tensorflow_PartialTensorShape_IsCompatibleWith}
diff --git a/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShapeUtils.md b/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShapeUtils.md
index 616adc0c59..ca3666ba8f 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShapeUtils.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassPartialTensorShapeUtils.md
@@ -12,6 +12,12 @@ Static helper routines for ` PartialTensorShape `. Includes a few common predica
+#### `bool tensorflow::PartialTensorShapeUtils::AreIdentical(const gtl::ArraySlice< PartialTensorShape > &shapes0, const gtl::ArraySlice< PartialTensorShape > &shapes1)` {#bool_tensorflow_PartialTensorShapeUtils_AreIdentical}
+
+
+
+
+
#### `bool tensorflow::PartialTensorShapeUtils::AreCompatible(const gtl::ArraySlice< PartialTensorShape > &shapes0, const gtl::ArraySlice< PartialTensorShape > &shapes1)` {#bool_tensorflow_PartialTensorShapeUtils_AreCompatible}
diff --git a/tensorflow/g3doc/api_docs/python/array_ops.md b/tensorflow/g3doc/api_docs/python/array_ops.md
index ff2d5f4b49..cab74b0ed3 100644
--- a/tensorflow/g3doc/api_docs/python/array_ops.md
+++ b/tensorflow/g3doc/api_docs/python/array_ops.md
@@ -2165,7 +2165,7 @@ The attr `block_size` indicates the input block size and how the data is moved.
* Chunks of data of size `block_size * block_size` from depth are rearranged
into non-overlapping blocks of size `block_size x block_size`
- * The width the output tensor is `input_depth * block_size`, whereas the
+ * The width the output tensor is `input_width * block_size`, whereas the
height is `input_height * block_size`.
* The depth of the input tensor must be divisible by
`block_size * block_size`.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md
index 03dc6bb3b0..ef74b4d54a 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.depth_to_space.md
@@ -10,7 +10,7 @@ The attr `block_size` indicates the input block size and how the data is moved.
* Chunks of data of size `block_size * block_size` from depth are rearranged
into non-overlapping blocks of size `block_size x block_size`
- * The width the output tensor is `input_depth * block_size`, whereas the
+ * The width the output tensor is `input_width * block_size`, whereas the
height is `input_height * block_size`.
* The depth of the input tensor must be divisible by
`block_size * block_size`.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
index 788d2066ad..8dc62c4c18 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.summary.TaggedRunMetadata.md
@@ -1,8 +1,252 @@
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
new file mode 100644
index 0000000000..bf17320a5a
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard1/tf.merge_all_summaries.md
@@ -0,0 +1,17 @@
+### `tf.merge_all_summaries(*args, **kwargs)` {#merge_all_summaries}
+
+Merges all summaries collected in the default graph. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge_all.
+
+ Args:
+ key: `GraphKey` used to collect the summaries. Defaults to
+ `GraphKeys.SUMMARIES`.
+
+ Returns:
+ If no summaries were collected, returns None. Otherwise returns a scalar
+ `Tensor` of type `string` containing the serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
new file mode 100644
index 0000000000..6220d3641b
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.image_summary.md
@@ -0,0 +1,49 @@
+### `tf.image_summary(*args, **kwargs)` {#image_summary}
+
+Outputs a `Summary` protocol buffer with images. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.image. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.
+
+ The summary has up to `max_images` summary values containing images. The
+ images are built from `tensor` which must be 4-D with shape `[batch_size,
+ height, width, channels]` and where `channels` can be:
+
+ * 1: `tensor` is interpreted as Grayscale.
+ * 3: `tensor` is interpreted as RGB.
+ * 4: `tensor` is interpreted as RGBA.
+
+ The images have the same number of channels as the input tensor. For float
+ input, the values are normalized one image at a time to fit in the range
+ `[0, 255]`. `uint8` values are unchanged. The op uses two different
+ normalization algorithms:
+
+ * If the input values are all positive, they are rescaled so the largest one
+ is 255.
+
+ * If any input value is negative, the values are shifted so input value 0.0
+ is at 127. They are then rescaled so that either the smallest value is 0,
+ or the largest one is 255.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_images` is 1, the summary value tag is '*tag*/image'.
+ * If `max_images` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
+ width, channels]` where `channels` is 1, 3, or 4.
+ max_images: Max number of batch elements to generate images for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
index 19532f7cc3..bce704ef4f 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.summary.SummaryDescription.md
@@ -1,8 +1,245 @@
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
index 6dba0e4f1e..ec995fd99c 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.test.TestCase.md
@@ -175,125 +175,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -314,49 +195,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -440,11 +282,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -457,30 +298,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -552,13 +369,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -589,33 +399,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -653,20 +436,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -741,38 +510,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -797,71 +534,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -869,79 +541,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -962,26 +561,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1033,51 +612,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1100,13 +634,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1166,9 +693,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1222,13 +749,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
Returns a unique temporary directory for the test to use.
@@ -1251,20 +771,6 @@ pollute each others environment.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1288,18 +794,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
new file mode 100644
index 0000000000..3ffd9260c7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.scalar_summary.md
@@ -0,0 +1,22 @@
+### `tf.scalar_summary(*args, **kwargs)` {#scalar_summary}
+
+Outputs a `Summary` protocol buffer with scalar values. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
+
+ The input `tags` and `values` must have the same shape. The generated
+ summary has a summary value for each tag-value pair in `tags` and `values`.
+
+ Args:
+ tags: A `string` `Tensor`. Tags for the summaries.
+ values: A real numeric Tensor. Values for the summaries.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
new file mode 100644
index 0000000000..3cfd7103d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard4/tf.summary.SummaryDescription.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
new file mode 100644
index 0000000000..570d7b712c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.histogram_summary.md
@@ -0,0 +1,26 @@
+### `tf.histogram_summary(*args, **kwargs)` {#histogram_summary}
+
+Outputs a `Summary` protocol buffer with a histogram. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.
+
+ The generated
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ has one summary value containing a histogram for `values`.
+
+ This op reports an `InvalidArgument` error if any value is not finite.
+
+ Args:
+ tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
+ values: A real numeric `Tensor`. Any shape. Values to use to
+ build the histogram.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
new file mode 100644
index 0000000000..ccb984f5ab
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.merge_summary.md
@@ -0,0 +1,27 @@
+### `tf.merge_summary(*args, **kwargs)` {#merge_summary}
+
+Merges summaries. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.merge.
+
+ This op creates a
+ [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+ protocol buffer that contains the union of all the values in the input
+ summaries.
+
+ When the Op is run, it reports an `InvalidArgument` error if multiple values
+ in the summaries to merge use the same tag.
+
+ Args:
+ inputs: A list of `string` `Tensor` objects containing serialized `Summary`
+ protocol buffers.
+ collections: Optional list of graph collections keys. The new summary op is
+ added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer resulting from the merging.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
new file mode 100644
index 0000000000..24a3b3f10c
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.summary.SummaryDescription.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
new file mode 100644
index 0000000000..f2d0c042d7
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.summary.TaggedRunMetadata.RegisterExtension.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
new file mode 100644
index 0000000000..e9bdda200f
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.train.SummaryWriter.md
@@ -0,0 +1,207 @@
+
+- - -
+
+#### `tf.train.SummaryWriter.__init__(*args, **kwargs)` {#SummaryWriter.__init__}
+
+Creates a `SummaryWriter` and an event file. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.
+
+ This class is deprecated, and should be replaced with tf.summary.FileWriter.
+
+ On construction the summary writer creates a new event file in `logdir`.
+ This event file will contain `Event` protocol buffers constructed when you
+ call one of the following functions: `add_summary()`, `add_session_log()`,
+ `add_event()`, or `add_graph()`.
+
+ If you pass a `Graph` to the constructor it is added to
+ the event file. (This is equivalent to calling `add_graph()` later).
+
+ TensorBoard will pick the graph from the file and display it graphically so
+ you can interactively explore the graph you built. You will usually pass
+ the graph from the session in which you launched it:
+
+ ```python
+ ...create a graph...
+ # Launch the graph in a session.
+ sess = tf.Session()
+ # Create a summary writer, add the 'graph' to the event file.
+ writer = tf.train.SummaryWriter(<some-directory>, sess.graph)
+ ```
+
+ The other arguments to the constructor control the asynchronous writes to
+ the event file:
+
+ * `flush_secs`: How often, in seconds, to flush the added summaries
+ and events to disk.
+ * `max_queue`: Maximum number of summaries or events pending to be
+ written to disk before one of the 'add' calls block.
+
+ Args:
+ logdir: A string. Directory where event file will be written.
+ graph: A `Graph` object, such as `sess.graph`.
+ max_queue: Integer. Size of the queue for pending events and summaries.
+ flush_secs: Number. How often, in seconds, to flush the
+ pending events and summaries to disk.
+ graph_def: DEPRECATED: Use the `graph` argument instead.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_event(event)` {#SummaryWriter.add_event}
+
+Adds an event to the event file.
+
+##### Args:
+
+
+* <b>`event`</b>: An `Event` protocol buffer.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_graph(graph, global_step=None, graph_def=None)` {#SummaryWriter.add_graph}
+
+Adds a `Graph` to the event file.
+
+The graph described by the protocol buffer will be displayed by
+TensorBoard. Most users pass a graph in the constructor instead.
+
+##### Args:
+
+
+* <b>`graph`</b>: A `Graph` object, such as `sess.graph`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+* <b>`graph_def`</b>: DEPRECATED. Use the `graph` parameter instead.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If both graph and graph_def are passed to the method.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_meta_graph(meta_graph_def, global_step=None)` {#SummaryWriter.add_meta_graph}
+
+Adds a `MetaGraphDef` to the event file.
+
+The `MetaGraphDef` allows running the given graph via
+`saver.import_meta_graph()`.
+
+##### Args:
+
+
+* <b>`meta_graph_def`</b>: A `MetaGraphDef` object, often as retured by
+ `saver.export_meta_graph()`.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ graph.
+
+##### Raises:
+
+
+* <b>`TypeError`</b>: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_run_metadata(run_metadata, tag, global_step=None)` {#SummaryWriter.add_run_metadata}
+
+Adds a metadata information for a single session.run() call.
+
+##### Args:
+
+
+* <b>`run_metadata`</b>: A `RunMetadata` protobuf object.
+* <b>`tag`</b>: The tag name for this metadata.
+* <b>`global_step`</b>: Number. Optional global step counter to record with the
+ StepStats.
+
+##### Raises:
+
+
+* <b>`ValueError`</b>: If the provided tag was already used for this type of event.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_session_log(session_log, global_step=None)` {#SummaryWriter.add_session_log}
+
+Adds a `SessionLog` protocol buffer to the event file.
+
+This method wraps the provided session in an `Event` protocol buffer
+and adds it to the event file.
+
+##### Args:
+
+
+* <b>`session_log`</b>: A `SessionLog` protocol buffer.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` {#SummaryWriter.add_summary}
+
+Adds a `Summary` protocol buffer to the event file.
+
+This method wraps the provided summary in an `Event` protocol buffer
+and adds it to the event file.
+
+You can pass the result of evaluating any summary op, using
+[`Session.run()`](client.md#Session.run) or
+[`Tensor.eval()`](framework.md#Tensor.eval), to this
+function. Alternatively, you can pass a `tf.Summary` protocol
+buffer that you populate with your own data. The latter is
+commonly done to report evaluation results in event files.
+
+##### Args:
+
+
+* <b>`summary`</b>: A `Summary` protocol buffer, optionally serialized as a string.
+* <b>`global_step`</b>: Number. Optional global step value to record with the
+ summary.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.close()` {#SummaryWriter.close}
+
+Flushes the event file to disk and close the file.
+
+Call this method when you do not need the summary writer anymore.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.flush()` {#SummaryWriter.flush}
+
+Flushes the event file to disk.
+
+Call this method to make sure that all pending events have been written to
+disk.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.get_logdir()` {#SummaryWriter.get_logdir}
+
+Returns the directory where event file will be written.
+
+
+- - -
+
+#### `tf.train.SummaryWriter.reopen()` {#SummaryWriter.reopen}
+
+Reopens the EventFileWriter.
+
+Can be called after `close()` to add more events in the same directory.
+The events will go into a new events file.
+
+Does nothing if the EventFileWriter was not closed.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
new file mode 100644
index 0000000000..c5830ab550
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.audio_summary.md
@@ -0,0 +1,37 @@
+### `tf.audio_summary(*args, **kwargs)` {#audio_summary}
+
+Outputs a `Summary` protocol buffer with audio. (deprecated)
+
+THIS FUNCTION IS DEPRECATED. It will be removed after 2016-11-30.
+Instructions for updating:
+Please switch to tf.summary.audio. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.
+
+ The summary has up to `max_outputs` summary values containing audio. The
+ audio is built from `tensor` which must be 3-D with shape `[batch_size,
+ frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+ assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
+ `sample_rate`.
+
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+ build the `tag` of the summary values:
+
+ * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+ * If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+
+ Args:
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
+ of the summary values.
+ tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
+ or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
+ sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
+ signal in hertz.
+ max_outputs: Max number of batch elements to generate audio for.
+ collections: Optional list of ops.GraphKeys. The collections to add the
+ summary to. Defaults to [ops.GraphKeys.SUMMARIES]
+ name: A name for the operation (optional).
+
+ Returns:
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
+ buffer.
+
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
new file mode 100644
index 0000000000..613f4ebd73
--- /dev/null
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.summary.TaggedRunMetadata.FromString.md
@@ -0,0 +1,4 @@
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
diff --git a/tensorflow/g3doc/api_docs/python/summary.md b/tensorflow/g3doc/api_docs/python/summary.md
index 8d344036db..be029f4290 100644
--- a/tensorflow/g3doc/api_docs/python/summary.md
+++ b/tensorflow/g3doc/api_docs/python/summary.md
@@ -487,11 +487,248 @@ metadata is stored in its NodeDef. This method retrieves the description.
- - -
+#### `tf.summary.SummaryDescription.ByteSize()` {#SummaryDescription.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.Clear()` {#SummaryDescription.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearExtension(extension_handle)` {#SummaryDescription.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ClearField(field_name)` {#SummaryDescription.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.CopyFrom(other_msg)` {#SummaryDescription.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.DiscardUnknownFields()` {#SummaryDescription.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FindInitializationErrors()` {#SummaryDescription.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.FromString(s)` {#SummaryDescription.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasExtension(extension_handle)` {#SummaryDescription.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.HasField(field_name)` {#SummaryDescription.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.IsInitialized(errors=None)` {#SummaryDescription.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ListFields()` {#SummaryDescription.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFrom(msg)` {#SummaryDescription.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.MergeFromString(serialized)` {#SummaryDescription.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.ParseFromString(serialized)` {#SummaryDescription.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.RegisterExtension(extension_handle)` {#SummaryDescription.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializePartialToString()` {#SummaryDescription.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SerializeToString()` {#SummaryDescription.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.SetInParent()` {#SummaryDescription.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.WhichOneof(oneof_name)` {#SummaryDescription.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__deepcopy__(memo=None)` {#SummaryDescription.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__eq__(other)` {#SummaryDescription.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.SummaryDescription.__getstate__()` {#SummaryDescription.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.SummaryDescription.__hash__()` {#SummaryDescription.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__init__(**kwargs)` {#SummaryDescription.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__ne__(other_msg)` {#SummaryDescription.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__repr__()` {#SummaryDescription.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__setstate__(state)` {#SummaryDescription.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__str__()` {#SummaryDescription.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.__unicode__()` {#SummaryDescription.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.SummaryDescription.type_hint` {#SummaryDescription.type_hint}
+
+Magic attribute generated for "type_hint" proto field.
+
+
- - -
@@ -500,9 +737,253 @@ Support the pickle protocol.
- - -
+#### `tf.summary.TaggedRunMetadata.ByteSize()` {#TaggedRunMetadata.ByteSize}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.Clear()` {#TaggedRunMetadata.Clear}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearExtension(extension_handle)` {#TaggedRunMetadata.ClearExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ClearField(field_name)` {#TaggedRunMetadata.ClearField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.CopyFrom(other_msg)` {#TaggedRunMetadata.CopyFrom}
+
+Copies the content of the specified message into the current message.
+
+The method clears the current message and then merges the specified
+message using MergeFrom.
+
+##### Args:
+
+
+* <b>`other_msg`</b>: Message to copy into the current one.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.DiscardUnknownFields()` {#TaggedRunMetadata.DiscardUnknownFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FindInitializationErrors()` {#TaggedRunMetadata.FindInitializationErrors}
+
+Finds required fields which are not initialized.
+
+##### Returns:
+
+ A list of strings. Each string is a path to an uninitialized field from
+ the top-level message, e.g. "foo.bar[5].baz".
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.FromString(s)` {#TaggedRunMetadata.FromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasExtension(extension_handle)` {#TaggedRunMetadata.HasExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.HasField(field_name)` {#TaggedRunMetadata.HasField}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.IsInitialized(errors=None)` {#TaggedRunMetadata.IsInitialized}
+
+Checks if all required fields of a message are set.
+
+##### Args:
+
+
+* <b>`errors`</b>: A list which, if provided, will be populated with the field
+ paths of all missing required fields.
+
+##### Returns:
+
+ True iff the specified message has all required fields set.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ListFields()` {#TaggedRunMetadata.ListFields}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFrom(msg)` {#TaggedRunMetadata.MergeFrom}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.MergeFromString(serialized)` {#TaggedRunMetadata.MergeFromString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.ParseFromString(serialized)` {#TaggedRunMetadata.ParseFromString}
+
+Parse serialized protocol buffer data into this message.
+
+Like MergeFromString(), except we clear the object first and
+do not return the value that MergeFromString returns.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.RegisterExtension(extension_handle)` {#TaggedRunMetadata.RegisterExtension}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializePartialToString()` {#TaggedRunMetadata.SerializePartialToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SerializeToString()` {#TaggedRunMetadata.SerializeToString}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.SetInParent()` {#TaggedRunMetadata.SetInParent}
+
+Sets the _cached_byte_size_dirty bit to true,
+and propagates this to our listener iff this was a state change.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.WhichOneof(oneof_name)` {#TaggedRunMetadata.WhichOneof}
+
+Returns the name of the currently set field inside a oneof, or None.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__deepcopy__(memo=None)` {#TaggedRunMetadata.__deepcopy__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__eq__(other)` {#TaggedRunMetadata.__eq__}
+
+
+
+
+- - -
+
#### `tf.summary.TaggedRunMetadata.__getstate__()` {#TaggedRunMetadata.__getstate__}
Support the pickle protocol.
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__hash__()` {#TaggedRunMetadata.__hash__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__init__(**kwargs)` {#TaggedRunMetadata.__init__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__ne__(other_msg)` {#TaggedRunMetadata.__ne__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__repr__()` {#TaggedRunMetadata.__repr__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__setstate__(state)` {#TaggedRunMetadata.__setstate__}
+
+Support the pickle protocol.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__str__()` {#TaggedRunMetadata.__str__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.__unicode__()` {#TaggedRunMetadata.__unicode__}
+
+
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.run_metadata` {#TaggedRunMetadata.run_metadata}
+
+Magic attribute generated for "run_metadata" proto field.
+
+
+- - -
+
+#### `tf.summary.TaggedRunMetadata.tag` {#TaggedRunMetadata.tag}
+
+Magic attribute generated for "tag" proto field.
+
+
diff --git a/tensorflow/g3doc/api_docs/python/test.md b/tensorflow/g3doc/api_docs/python/test.md
index ade1478781..fb7252625b 100644
--- a/tensorflow/g3doc/api_docs/python/test.md
+++ b/tensorflow/g3doc/api_docs/python/test.md
@@ -215,125 +215,6 @@ Checks that for all elements of farray1 and farray2
- - -
-#### `tf.test.TestCase.assertBetween(value, minv, maxv, msg=None)` {#TestCase.assertBetween}
-
-Asserts that value is between minv and maxv (inclusive).
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandFails(command, regexes, env=None, close_fds=True, msg=None)` {#TestCase.assertCommandFails}
-
-Asserts a shell command fails and the error matches a regex in a list.
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: the list of regular expression strings.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCommandSucceeds(command, regexes=('',), env=None, close_fds=True, msg=None)` {#TestCase.assertCommandSucceeds}
-
-Asserts that a shell command succeeds (i.e. exits with code 0).
-
-##### Args:
-
-
-* <b>`command`</b>: List or string representing the command to run.
-* <b>`regexes`</b>: List of regular expression byte strings that match success.
-* <b>`env`</b>: Dictionary of environment variable settings.
-* <b>`close_fds`</b>: Whether or not to close all open fd's in the child after
- forking.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsExactSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsExactSubsequence}
-
-Assert that "container" contains "subsequence" as an exact subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, and without other elements interspersed. For example, [1, 2, 3] is an
-exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be an exact subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsInOrder(strings, target, msg=None)` {#TestCase.assertContainsInOrder}
-
-Asserts that the strings provided are found in the target in order.
-
-This may be useful for checking HTML output.
-
-##### Args:
-
-
-* <b>`strings`</b>: A list of strings, such as [ 'fox', 'dog' ]
-* <b>`target`</b>: A target string in which to look for the strings, such as
- 'The quick brown fox jumped over the lazy dog'.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubsequence(container, subsequence, msg=None)` {#TestCase.assertContainsSubsequence}
-
-Assert that "container" contains "subsequence" as a subsequence.
-
-Asserts that "container" contains all the elements of "subsequence", in
-order, but possibly with other elements interspersed. For example, [1, 2, 3]
-is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
-
-##### Args:
-
-
-* <b>`container`</b>: the list we're testing for subsequence inclusion.
-* <b>`subsequence`</b>: the list we hope will be a subsequence of container.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertContainsSubset(expected_subset, actual_set, msg=None)` {#TestCase.assertContainsSubset}
-
-Checks whether actual iterable is a superset of expected iterable.
-
-
-- - -
-
-#### `tf.test.TestCase.assertCountEqual(*args, **kwargs)` {#TestCase.assertCountEqual}
-
-An unordered sequence specific comparison.
-
-Equivalent to assertItemsEqual(). This method is a compatibility layer
-for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
-assertCountEqual() calls.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertDeviceEqual(device1, device2)` {#TestCase.assertDeviceEqual}
Asserts that the two given devices are the same.
@@ -354,49 +235,10 @@ Checks whether actual is a superset of expected.
- - -
-#### `tf.test.TestCase.assertDictEqual(a, b, msg=None)` {#TestCase.assertDictEqual}
+#### `tf.test.TestCase.assertDictEqual(d1, d2, msg=None)` {#TestCase.assertDictEqual}
-Raises AssertionError if a and b are not equal dictionaries.
-
-##### Args:
-
-
-* <b>`a`</b>: A dict, the expected value.
-* <b>`b`</b>: A dict, the actual value.
-* <b>`msg`</b>: An optional str, the associated message.
-
-##### Raises:
-
-
-* <b>`AssertionError`</b>: if the dictionaries are not equal.
-
-
-- - -
-
-#### `tf.test.TestCase.assertEmpty(container, msg=None)` {#TestCase.assertEmpty}
-
-Assert that an object has zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-- - -
-
-#### `tf.test.TestCase.assertEndsWith(actual, expected_end, msg=None)` {#TestCase.assertEndsWith}
-
-Assert that actual.endswith(expected_end) is True.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`expected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
- - -
@@ -480,11 +322,10 @@ Included for symmetry with assertIsNone.
- - -
-#### `tf.test.TestCase.assertItemsEqual(*args, **kwargs)` {#TestCase.assertItemsEqual}
-
-An unordered sequence specific comparison.
+#### `tf.test.TestCase.assertItemsEqual(expected_seq, actual_seq, msg=None)` {#TestCase.assertItemsEqual}
-It asserts that actual_seq and expected_seq have the same element counts.
+An unordered sequence specific comparison. It asserts that
+actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
@@ -497,30 +338,6 @@ Asserts that each element has the same count in both sequences.
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertJsonEqual(first, second, msg=None)` {#TestCase.assertJsonEqual}
-
-Asserts that the JSON objects defined in two strings are equal.
-
-A summary of the differences will be included in the failure message
-using assertSameStructure.
-
-##### Args:
-
-
-* <b>`first`</b>: A string contining JSON to decode and compare to second.
-* <b>`second`</b>: A string contining JSON to decode and compare to first.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
- - -
@@ -592,13 +409,6 @@ if not.
- - -
-#### `tf.test.TestCase.assertNoCommonElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertNoCommonElements}
-
-Checks whether actual iterable and expected iterable are disjoint.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotAlmostEqual(first, second, places=None, msg=None, delta=None)` {#TestCase.assertNotAlmostEqual}
Fail if the two objects are equal as determined by their
@@ -629,33 +439,6 @@ Objects that are equal automatically fail.
- - -
-#### `tf.test.TestCase.assertNotEmpty(container, msg=None)` {#TestCase.assertNotEmpty}
-
-Assert that an object has non-zero length.
-
-##### Args:
-
-
-* <b>`container`</b>: Anything that implements the collections.Sized interface.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
-#### `tf.test.TestCase.assertNotEndsWith(actual, unexpected_end, msg=None)` {#TestCase.assertNotEndsWith}
-
-Assert that actual.endswith(unexpected_end) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_end`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertNotEqual(first, second, msg=None)` {#TestCase.assertNotEqual}
Fail if the two objects are equal as determined by the '!='
@@ -693,20 +476,6 @@ Fail the test if the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertNotStartsWith(actual, unexpected_start, msg=None)` {#TestCase.assertNotStartsWith}
-
-Assert that actual.startswith(unexpected_start) is False.
-
-##### Args:
-
-
-* <b>`actual`</b>: str
-* <b>`unexpected_start`</b>: str
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertProtoEquals(expected_message_maybe_ascii, message)` {#TestCase.assertProtoEquals}
Asserts that message is same as parsed expected_message_ascii.
@@ -781,38 +550,6 @@ Asserts that the message in a raised exception matches a regexp.
- - -
-#### `tf.test.TestCase.assertRaisesWithLiteralMatch(expected_exception, expected_exception_message, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithLiteralMatch}
-
-Asserts that the message in a raised exception equals the given string.
-
-Unlike assertRaisesRegexp, this method takes a literal string, not
-a regular expression.
-
-with self.assertRaisesWithLiteralMatch(ExType, 'message'):
- DoSomething()
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_exception_message`</b>: String message expected in the raised
- exception. For a raise exception e, expected_exception_message must
- equal str(e).
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra kwargs.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
#### `tf.test.TestCase.assertRaisesWithPredicateMatch(exception_type, expected_err_re_or_predicate)` {#TestCase.assertRaisesWithPredicateMatch}
Returns a context manager to enclose code expected to raise an exception.
@@ -837,71 +574,6 @@ predicate search.
- - -
-#### `tf.test.TestCase.assertRaisesWithRegexpMatch(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs)` {#TestCase.assertRaisesWithRegexpMatch}
-
-Asserts that the message in a raised exception matches the given regexp.
-
-This is just a wrapper around assertRaisesRegexp. Please use
-assertRaisesRegexp instead of assertRaisesWithRegexpMatch.
-
-##### Args:
-
-
-* <b>`expected_exception`</b>: Exception class expected to be raised.
-* <b>`expected_regexp`</b>: Regexp (re pattern object or string) expected to be
- found in error message.
-* <b>`callable_obj`</b>: Function to be called, or None to return a context.
-* <b>`args`</b>: Extra args.
-* <b>`kwargs`</b>: Extra keyword args.
-
-##### Returns:
-
- A context manager if callable_obj is None. Otherwise, None.
-
-##### Raises:
-
- self.failureException if callable_obj does not raise a macthing exception.
-
-
-- - -
-
-#### `tf.test.TestCase.assertRegexMatch(actual_str, regexes, message=None)` {#TestCase.assertRegexMatch}
-
-Asserts that at least one regex in regexes matches str.
-
- If possible you should use assertRegexpMatches, which is a simpler
- version of this method. assertRegexpMatches takes a single regular
- expression (a string or re compiled object) instead of a list.
-
- Notes:
- 1. This function uses substring matching, i.e. the matching
- succeeds if *any* substring of the error message matches *any*
- regex in the list. This is more convenient for the user than
- full-string matching.
-
- 2. If regexes is the empty list, the matching will always fail.
-
- 3. Use regexes=[''] for a regex that will always pass.
-
- 4. '.' matches any single character *except* the newline. To
- match any character, use '(.|
-)'.
-
- 5. '^' matches the beginning of each line, not just the beginning
- of the string. Similarly, '$' matches the end of each line.
-
- 6. An exception will be thrown if regexes contains an invalid
- regex.
-
- Args:
- actual_str: The string we try to match with the items in regexes.
- regexes: The regular expressions we want to match against str.
- See "Notes" above for detailed notes on how this is interpreted.
- message: The message to be printed if the test fails.
-
-
-- - -
-
#### `tf.test.TestCase.assertRegexpMatches(text, expected_regexp, msg=None)` {#TestCase.assertRegexpMatches}
Fail the test unless the text matches the regular expression.
@@ -909,79 +581,6 @@ Fail the test unless the text matches the regular expression.
- - -
-#### `tf.test.TestCase.assertSameElements(expected_seq, actual_seq, msg=None)` {#TestCase.assertSameElements}
-
-Assert that two sequences have the same elements (in any order).
-
-This method, unlike assertItemsEqual, doesn't care about any
-duplicates in the expected and actual sequences.
-
- >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
- # Doesn't raise an AssertionError
-
-If possible, you should use assertItemsEqual instead of
-assertSameElements.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`msg`</b>: The message to be printed if the test fails.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSameStructure(a, b, aname='a', bname='b', msg=None)` {#TestCase.assertSameStructure}
-
-Asserts that two values contain the same structural content.
-
-The two arguments should be data trees consisting of trees of dicts and
-lists. They will be deeply compared by walking into the contents of dicts
-and lists; other items will be compared using the == operator.
-If the two structures differ in content, the failure message will indicate
-the location within the structures where the first difference is found.
-This may be helpful when comparing large structures.
-
-##### Args:
-
-
-* <b>`a`</b>: The first structure to compare.
-* <b>`b`</b>: The second structure to compare.
-* <b>`aname`</b>: Variable name to use for the first structure in assertion messages.
-* <b>`bname`</b>: Variable name to use for the second structure.
-* <b>`msg`</b>: Additional text to include in the failure message.
-
-
-- - -
-
-#### `tf.test.TestCase.assertSequenceAlmostEqual(expected_seq, actual_seq, places=None, msg=None, delta=None)` {#TestCase.assertSequenceAlmostEqual}
-
-An approximate equality assertion for ordered sequences.
-
-Fail if the two sequences are unequal as determined by their value
-differences rounded to the given number of decimal places (default 7) and
-comparing to zero, or by comparing that the difference between each value
-in the two sequences is more than the given delta.
-
-Note that decimal places (from zero) are usually not the same as significant
-digits (measured from the most signficant digit).
-
-If the two sequences compare equal then they will automatically compare
-almost equal.
-
-##### Args:
-
-
-* <b>`expected_seq`</b>: A sequence containing elements we are expecting.
-* <b>`actual_seq`</b>: The sequence that we are testing.
-* <b>`places`</b>: The number of decimal places to compare.
-* <b>`msg`</b>: The message to be printed if the test fails.
-* <b>`delta`</b>: The OK difference between compared values.
-
-
-- - -
-
#### `tf.test.TestCase.assertSequenceEqual(seq1, seq2, msg=None, seq_type=None)` {#TestCase.assertSequenceEqual}
An equality assertion for ordered sequences (like lists and tuples).
@@ -1002,26 +601,6 @@ which can be indexed, has a length, and has an equality operator.
- - -
-#### `tf.test.TestCase.assertSequenceStartsWith(prefix, whole, msg=None)` {#TestCase.assertSequenceStartsWith}
-
-An equality assertion for the beginning of ordered sequences.
-
-If prefix is an empty sequence, it will raise an error unless whole is also
-an empty sequence.
-
-If prefix is not a sequence, it will raise an error if the first element of
-whole does not match.
-
-##### Args:
-
-
-* <b>`prefix`</b>: A sequence expected at the beginning of the whole parameter.
-* <b>`whole`</b>: The sequence in which to look for prefix.
-* <b>`msg`</b>: Optional message to report on failure.
-
-
-- - -
-
#### `tf.test.TestCase.assertSetEqual(set1, set2, msg=None)` {#TestCase.assertSetEqual}
A set-specific equality assertion.
@@ -1073,51 +652,6 @@ Assert that actual.startswith(expected_start) is True.
- - -
-#### `tf.test.TestCase.assertTotallyOrdered(*groups, **kwargs)` {#TestCase.assertTotallyOrdered}
-
-Asserts that total ordering has been implemented correctly.
-
-For example, say you have a class A that compares only on its attribute x.
-Comparators other than __lt__ are omitted for brevity.
-
-class A(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- def __hash__(self):
- return hash(self.x)
-
- def __lt__(self, other):
- try:
- return self.x < other.x
- except AttributeError:
- return NotImplemented
-
-assertTotallyOrdered will check that instances can be ordered correctly.
-For example,
-
-self.assertTotallyOrdered(
- [None], # None should come before everything else.
- [1], # Integers sort earlier.
- [A(1, 'a')],
- [A(2, 'b')], # 2 is after 1.
- [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
- [A(4, 'z')],
- ['foo']) # Strings sort last.
-
-##### Args:
-
-
-* <b>`*groups`</b>: A list of groups of elements. Each group of elements is a list
- of objects that are equal. The elements in each group must be less than
- the elements in the group after it. For example, these groups are
- totally ordered: [None], [1], [2, 2], [3].
-* <b>`**kwargs`</b>: optional msg keyword argument can be passed.
-
-
-- - -
-
#### `tf.test.TestCase.assertTrue(expr, msg=None)` {#TestCase.assertTrue}
Check that the expression is true.
@@ -1140,13 +674,6 @@ A tuple-specific equality assertion.
- - -
-#### `tf.test.TestCase.assertUrlEqual(a, b, msg=None)` {#TestCase.assertUrlEqual}
-
-Asserts that urls are equal, ignoring ordering of query params.
-
-
-- - -
-
#### `tf.test.TestCase.assert_(expr, msg=None)` {#TestCase.assert_}
Check that the expression is true.
@@ -1206,9 +733,9 @@ tearDown.
- - -
-#### `tf.test.TestCase.fail(msg=None, prefix=None)` {#TestCase.fail}
+#### `tf.test.TestCase.fail(msg=None)` {#TestCase.fail}
-Fail immediately with the given message, optionally prefixed.
+Fail immediately, with the given message.
- - -
@@ -1262,13 +789,6 @@ Fail immediately with the given message, optionally prefixed.
- - -
-#### `tf.test.TestCase.getRecordedProperties()` {#TestCase.getRecordedProperties}
-
-Return any properties that the user has recorded.
-
-
-- - -
-
#### `tf.test.TestCase.get_temp_dir()` {#TestCase.get_temp_dir}
Returns a unique temporary directory for the test to use.
@@ -1291,20 +811,6 @@ pollute each others environment.
- - -
-#### `tf.test.TestCase.recordProperty(property_name, property_value)` {#TestCase.recordProperty}
-
-Record an arbitrary property for later use.
-
-##### Args:
-
-
-* <b>`property_name`</b>: str, name of property to record; must be a valid XML
- attribute name
-* <b>`property_value`</b>: value of property; must be valid XML attribute value
-
-
-- - -
-
#### `tf.test.TestCase.run(result=None)` {#TestCase.run}
@@ -1328,18 +834,11 @@ Hook method for setting up class fixture before running tests in the class.
#### `tf.test.TestCase.shortDescription()` {#TestCase.shortDescription}
-Format both the test method name and the first line of its docstring.
-
-If no docstring is given, only returns the method name.
-
-This method overrides unittest.TestCase.shortDescription(), which
-only returns the first line of the docstring, obscuring the name
-of the test upon failure.
-
-##### Returns:
-
+Returns a one-line description of the test, or None if no
+description has been provided.
-* <b>`desc`</b>: A short description of a test method.
+The default implementation of this method returns the first line of
+the specified test method's docstring.
- - -
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index 430bcc38da..b087a8dbc3 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -8,7 +8,7 @@ github source.
The TensorFlow Python API supports Python 2.7 and Python 3.3+.
The GPU version works best with Cuda Toolkit 8.0 and
-cuDNN v5. Other versions are supported (Cuda toolkit >= 7.0 and
+cuDNN v5.1. Other versions are supported (Cuda toolkit >= 7.0 and
cuDNN >= v3) only when installing from sources.
Please see [Cuda installation](#optional-install-cuda-gpus-on-linux) for
details. For Mac OS X, please see
@@ -78,37 +78,37 @@ If the above commands do not work on your system, you can follow these instructi
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py3-none-any.whl
```
Install TensorFlow:
@@ -150,14 +150,14 @@ Both distributions include pip. To install the CPU-only version of
TensorFlow, enter the following command at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.0-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-0.12.1-cp35-cp35m-win_amd64.whl
```
To install the GPU version of TensorFlow, enter the following command
at a command prompt:
```bat
-C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-win_amd64.whl
+C:\> pip install --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-0.12.1-cp35-cp35m-win_amd64.whl
```
You can now [test your installation](#test-the-tensorflow-installation).
@@ -212,37 +212,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py3-none-any.whl
```
Finally install TensorFlow:
@@ -364,37 +364,37 @@ select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py2-none-any.whl
# Mac OS X, GPU enabled, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see "Installing from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.12.1-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py3-none-any.whl
# Mac OS X, GPU enabled, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-0.12.1-py3-none-any.whl
```
Finally install TensorFlow:
@@ -462,7 +462,7 @@ code.
code.
We also have tags with `latest` replaced by a released version (e.g.,
-`0.12.0-gpu`).
+`0.12.1-gpu`).
With Docker the installation is as follows:
@@ -500,14 +500,8 @@ For NVidia GPU support install latest NVidia drivers and
$ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
```
-If you have a problem running `nvidia-docker`, then using the default config, we
-include a
-[script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/docker_run_gpu.sh)
-in the repo with these flags, so the command-line would look like
-
-```bash
-$ path/to/repo/tensorflow/tools/docker/docker_run_gpu.sh -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
-```
+If you run into a problem running `nvidia-docker`, Please report an issue
+[here](https://github.com/NVIDIA/nvidia-docker/issues).
For more details see [TensorFlow docker
readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
@@ -520,7 +514,7 @@ the Docker container.
### (Optional, Linux) Enable GPU Support
If you installed the GPU version of TensorFlow, you must also install the Cuda
-Toolkit 8.0 and cuDNN v5. Please see [Cuda
+Toolkit 8.0 and cuDNN v5.1. Please see [Cuda
installation](#optional-install-cuda-gpus-on-linux).
You also need to set the `LD_LIBRARY_PATH` and `CUDA_HOME` environment
@@ -634,7 +628,7 @@ Install the toolkit into e.g. `/usr/local/cuda`.
[https://developer.nvidia.com/cudnn](https://developer.nvidia.com/cudnn)
-Download cuDNN v5.
+Download cuDNN v5.1.
Uncompress and copy the cuDNN files into the toolkit directory. Assuming the
toolkit is installed in `/usr/local/cuda`, run the following commands (edited
@@ -870,7 +864,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.12.1-py2-none-any.whl
```
## Optimizing CPU performance
diff --git a/tensorflow/g3doc/tutorials/mnist/beginners/index.md b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
index e5d3f28de6..9b6caf8358 100644
--- a/tensorflow/g3doc/tutorials/mnist/beginners/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
@@ -376,22 +376,20 @@ to your graph which implement backpropagation and gradient descent. Then it
gives you back a single operation which, when run, does a step of gradient
descent training, slightly tweaking your variables to reduce the loss.
-Now we have our model set up to train. One last thing before we launch it, we
-have to create an operation to initialize the variables we created. Note that
-this defines the operation but does not run it yet:
+
+We can now launch the model in an `InteractiveSession`:
```python
-init = tf.global_variables_initializer()
+sess = tf.InteractiveSession()
```
-We can now launch the model in a `Session`, and now we run the operation that
-initializes the variables:
+We first have to create an operation to initialize the variables we created:
```python
-sess = tf.Session()
-sess.run(init)
+tf.global_variables_initializer().run()
```
+
Let's train -- we'll run the training step 1000 times!
```python
diff --git a/tensorflow/java/README.md b/tensorflow/java/README.md
index 1eea76c48a..1811fed73a 100644
--- a/tensorflow/java/README.md
+++ b/tensorflow/java/README.md
@@ -32,14 +32,33 @@ Java bindings for TensorFlow.
## Installation
-Build the Java Archive and native library:
+Build the Java Archive (JAR) and native library:
```sh
bazel build -c opt \
- //tensorflow/java:libtensorflow.jar \
+ //tensorflow/java:tensorflow \
//tensorflow/java:libtensorflow-jni
```
+To use the library in an external Java project, publish the library to a Maven repository. For example,
+publish the library to the local Maven repository using the `mvn` tool (installed separately):
+
+```sh
+mvn install:install-file \
+ -Dfile=bazel-bin/tensorflow/java/libtensorflow.jar \
+ -DpomFile=tensorflow/java/pom.xml
+```
+
+Refer to the library using Maven coordinates. For example, if you're using Maven then place this dependency into your `pom.xml` file:
+
+```xml
+<dependency>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>libtensorflow</artifactId>
+ <version>0.12.0-SNAPSHOT</version>
+</dependency>
+```
+
## Example
### With bazel
diff --git a/tensorflow/java/pom.xml b/tensorflow/java/pom.xml
new file mode 100644
index 0000000000..d3167e0df2
--- /dev/null
+++ b/tensorflow/java/pom.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>libtensorflow</artifactId>
+ <version>0.12.0-SNAPSHOT</version>
+ <packaging>jar</packaging>
+
+ <name>tensorflow</name>
+ <url>https://www.tensorflow.org</url>
+ <inceptionYear>2015</inceptionYear>
+
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+
+ <scm>
+ <url>https://github.com/tensorflow/tensorflow.git</url>
+ <connection>git@github.com:tensorflow/tensorflow.git</connection>
+ <developerConnection>scm:git:https://github.com/tensorflow/tensorflow.git</developerConnection>
+ </scm>
+
+</project>
diff --git a/tensorflow/tools/ci_build/builds/android_nightly.sh b/tensorflow/tools/ci_build/builds/android_nightly.sh
index fae7efcf68..d3a94d1be0 100644
--- a/tensorflow/tools/ci_build/builds/android_nightly.sh
+++ b/tensorflow/tools/ci_build/builds/android_nightly.sh
@@ -16,12 +16,24 @@
set -e
+copy_lib() {
+ FILE=$1
+ TARGET_DIR=${OUT_DIR}/native/$(basename $FILE)/${CPU}
+ mkdir -p ${TARGET_DIR}
+ echo "Copying ${FILE} to ${TARGET_DIR}"
+ cp ${FILE} ${TARGET_DIR}
+}
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/builds_common.sh"
configure_android_workspace
CPUS=armeabi-v7a,arm64-v8a,x86,x86_64
+OUT_DIR="$(pwd)/out/"
+
+rm -rf ${OUT_DIR}
+
# Build all relevant native libraries for each architecture.
for CPU in ${CPUS//,/ }
do
@@ -32,6 +44,10 @@ do
//tensorflow/core:android_tensorflow_lib \
//tensorflow/contrib/android:libtensorflow_inference.so \
//tensorflow/examples/android:libtensorflow_demo.so
+
+ copy_lib bazel-bin/tensorflow/core/libandroid_tensorflow_lib.lo
+ copy_lib bazel-bin/tensorflow/contrib/android/libtensorflow_inference.so
+ copy_lib bazel-bin/tensorflow/examples/android/libtensorflow_demo.so
done
# Build Jar and also demo containing native libs for all architectures.
@@ -39,3 +55,7 @@ echo "========== Building TensorFlow Android Jar and Demo =========="
bazel build -c opt --fat_apk_cpu=${CPUS} \
//tensorflow/contrib/android:android_tensorflow_inference_java \
//tensorflow/examples/android:tensorflow_demo
+
+echo "Copying demo and Jar to ${OUT_DIR}"
+cp bazel-bin/tensorflow/examples/android/tensorflow_demo.apk \
+ bazel-bin/tensorflow/contrib/android/libandroid_tensorflow_inference_java.jar ${OUT_DIR}
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 351e9f0e96..50d61d8c31 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -18,7 +18,7 @@
# ci_parameterized_build.sh
#
# The script obeys the following required environment variables:
-# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID)
+# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID | ANDROID_NIGHTLY)
# TF_BUILD_PYTHON_VERSION: (PYTHON2 | PYTHON3 | PYTHON3.5)
# TF_BUILD_IS_PIP: (NO_PIP | PIP | BOTH)
#
@@ -127,6 +127,7 @@ PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
PIP_TEST_TUTORIALS_FLAG="--test_tutorials"
PIP_INTEGRATION_TESTS_FLAG="--integration_tests"
ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh"
+ANDROID_NIGHTLY_CMD="${CI_BUILD_DIR}/builds/android_nightly.sh"
TF_GPU_COUNT=${TF_GPU_COUNT:-8}
PARALLEL_GPU_TEST_CMD='//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute'
@@ -236,7 +237,7 @@ elif [[ ${CTYPE} == "gpu" ]]; then
echo ""
fi
fi
-elif [[ ${CTYPE} == "android" ]]; then
+elif [[ ${CTYPE} == "android" ]] || [[ ${CTYPE} == "android_nightly" ]]; then
:
else
die "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\
@@ -343,6 +344,9 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
elif [[ ${CTYPE} == "android" ]]; then
# Run android specific script for android build.
NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
+ elif [[ ${CTYPE} == "android_nightly" ]]; then
+ # Run android specific script for android nightly build.
+ NO_PIP_MAIN_CMD="${ANDROID_NIGHTLY_CMD} ${OPT_FLAG} "
fi
fi
diff --git a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
index c31d6f92b3..e809e89a41 100644
--- a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
@@ -99,33 +99,29 @@ exclude_gpu_cc_tests="${extra_failing_gpu_cc_tests} + ${exclude_cpu_cc_tests}"
# The first argument is the name of the python test direcotry
function get_failing_cpu_py_tests() {
echo "
- //$1/tensorflow/python/kernel_tests:rnn_test + \
- //$1/tensorflow/python/kernel_tests:sets_test + \
- //$1/tensorflow/python/debug:cli_shared_test + \
- //$1/tensorflow/python/debug:command_parser_test + \
- //$1/tensorflow/python/debug:debug_data_test + \
- //$1/tensorflow/python/debug:debug_utils_test + \
- //$1/tensorflow/python/debug:debugger_cli_common_test + \
- //$1/tensorflow/python/debug:framework_test + \
- //$1/tensorflow/python/debug:local_cli_wrapper_test + \
- //$1/tensorflow/python/debug:tensor_format_test + \
- //$1/tensorflow/python:saver_large_variable_test + \
- //$1/tensorflow/python:session_test + \
//$1/tensorflow/python:basic_session_run_hooks_test + \
//$1/tensorflow/python:contrib_test + \
- //$1/tensorflow/python/debug:analyzer_cli_test + \
- //$1/tensorflow/python/debug:curses_ui_test + \
- //$1/tensorflow/python/debug:session_debug_file_test + \
- //$1/tensorflow/python/debug:stepper_test + \
//$1/tensorflow/python:dequantize_op_test + \
//$1/tensorflow/python:directory_watcher_test + \
//$1/tensorflow/python:event_multiplexer_test + \
//$1/tensorflow/python:file_io_test + \
+ //$1/tensorflow/python:file_system_test + \
//$1/tensorflow/python:framework_meta_graph_test + \
//$1/tensorflow/python:framework_ops_test + \
//$1/tensorflow/python:framework_tensor_util_test + \
//$1/tensorflow/python:framework_test_util_test + \
//$1/tensorflow/python:image_ops_test + \
+ //$1/tensorflow/python:localhost_cluster_performance_test + \
+ //$1/tensorflow/python:monitored_session_test + \
+ //$1/tensorflow/python:nn_batchnorm_test + \
+ //$1/tensorflow/python:protobuf_compare_test + \
+ //$1/tensorflow/python:quantized_conv_ops_test + \
+ //$1/tensorflow/python:saver_large_variable_test + \
+ //$1/tensorflow/python:saver_test + \
+ //$1/tensorflow/python:session_test + \
+ //$1/tensorflow/python:supervisor_test + \
+ //$1/tensorflow/python:sync_replicas_optimizer_test + \
+ //$1/tensorflow/python/debug/... + \
//$1/tensorflow/python/kernel_tests:as_string_op_test + \
//$1/tensorflow/python/kernel_tests:benchmark_test + \
//$1/tensorflow/python/kernel_tests:cast_op_test + \
@@ -135,25 +131,22 @@ function get_failing_cpu_py_tests() {
//$1/tensorflow/python/kernel_tests:depthwise_conv_op_test + \
//$1/tensorflow/python/kernel_tests:functional_ops_test + \
//$1/tensorflow/python/kernel_tests:py_func_test + \
+ //$1/tensorflow/python/kernel_tests:rnn_test + \
+ //$1/tensorflow/python/kernel_tests:sets_test + \
//$1/tensorflow/python/kernel_tests:sparse_matmul_op_test + \
//$1/tensorflow/python/kernel_tests:string_to_number_op_test + \
//$1/tensorflow/python/kernel_tests:summary_ops_test + \
//$1/tensorflow/python/kernel_tests:variable_scope_test + \
- //$1/tensorflow/python:monitored_session_test + \
- //$1/tensorflow/python:nn_batchnorm_test + \
- //$1/tensorflow/python:protobuf_compare_test + \
- //$1/tensorflow/python:quantized_conv_ops_test + \
- //$1/tensorflow/python:saver_test + \
- //$1/tensorflow/python:file_system_test \
+ //$1/tensorflow/python/saved_model:saved_model_test \
"
}
function get_failing_gpu_py_tests() {
echo "
- //$1/tensorflow/python/kernel_tests:rnn_test + \
- //$1/tensorflow/python/kernel_tests:sets_test + \
//$1/tensorflow/python/kernel_tests:diag_op_test + \
//$1/tensorflow/python/kernel_tests:one_hot_op_test + \
+ //$1/tensorflow/python/kernel_tests:rnn_test + \
+ //$1/tensorflow/python/kernel_tests:sets_test + \
//$1/tensorflow/python/kernel_tests:trace_op_test + \
$(get_failing_cpu_py_tests $1)
"
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 709b7c9c08..2531e6a96e 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '0.12.0'
+_VERSION = '0.12.1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 7194f2a0f6..73f8b2cc63 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -106,7 +106,7 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
],
sha256 = "c15a9607892113946379ccea3ca8b85018301b200754f209453ab21674268e77",
strip_prefix = "libjpeg-turbo-1.5.1",
- build_file = str(Label("//third_party:jpeg.BUILD")),
+ build_file = str(Label("//third_party/jpeg:jpeg.BUILD")),
)
native.new_http_archive(
diff --git a/third_party/common.bzl b/third_party/common.bzl
new file mode 100644
index 0000000000..db981a5e31
--- /dev/null
+++ b/third_party/common.bzl
@@ -0,0 +1,43 @@
+# Rule for simple expansion of template files. This performs a simple
+# search over the template file for the keys in substitutions,
+# and replaces them with the corresponding values.
+#
+# Typical usage:
+# load("/tools/build_rules/template_rule", "expand_header_template")
+# template_rule(
+# name = "ExpandMyTemplate",
+# src = "my.template",
+# out = "my.txt",
+# substitutions = {
+# "$VAR1": "foo",
+# "$VAR2": "bar",
+# }
+# )
+#
+# Args:
+# name: The name of the rule.
+# template: The template file to expand
+# out: The destination of the expanded file
+# substitutions: A dictionary mapping strings to their substitutions
+
+def template_rule_impl(ctx):
+ ctx.template_action(
+ template = ctx.file.src,
+ output = ctx.outputs.out,
+ substitutions = ctx.attr.substitutions,
+ )
+
+template_rule = rule(
+ attrs = {
+ "src": attr.label(
+ mandatory = True,
+ allow_files = True,
+ single_file = True,
+ ),
+ "substitutions": attr.string_dict(mandatory = True),
+ "out": attr.output(mandatory = True),
+ },
+ # output_to_genfiles is required for header files.
+ output_to_genfiles = True,
+ implementation = template_rule_impl,
+)
diff --git a/third_party/jpeg/BUILD b/third_party/jpeg/BUILD
new file mode 100644
index 0000000000..5b01f6e3e4
--- /dev/null
+++ b/third_party/jpeg/BUILD
@@ -0,0 +1 @@
+licenses(["notice"])
diff --git a/third_party/jpeg/jpeg.BUILD b/third_party/jpeg/jpeg.BUILD
index cbc1e86e51..37401b41d0 100644
--- a/third_party/jpeg/jpeg.BUILD
+++ b/third_party/jpeg/jpeg.BUILD
@@ -5,6 +5,8 @@ licenses(["notice"]) # custom notice-style license, see LICENSE.md
exports_files(["LICENSE.md"])
+load("@//third_party:common.bzl", "template_rule")
+
libjpegturbo_nocopts = "-[W]error"
libjpegturbo_copts = select({
@@ -274,50 +276,118 @@ cc_library(
nocopts = libjpegturbo_nocopts,
)
+template_rule(
+ name = "jconfig_win",
+ src = "win/jconfig.h.in",
+ out = "jconfig_win.h",
+ substitutions = {
+ "@JPEG_LIB_VERSION@": "62",
+ "@VERSION@": "1.5.1",
+ "@LIBJPEG_TURBO_VERSION_NUMBER@": "1005001",
+ "cmakedefine": "define",
+ "@BITS_IN_JSAMPLE@": "8",
+ },
+)
+
+template_rule(
+ name = "jconfigint_win",
+ src = "win/jconfigint.h.in",
+ out = "jconfigint_win.h",
+ substitutions = {
+ "@VERSION@": "1.5.1",
+ "@BUILD@": "20161115",
+ "@CMAKE_PROJECT_NAME@": "libjpeg-turbo",
+ },
+)
+
+JCONFIG_NOWIN_COMMON_SUBSTITUTIONS = {
+ "LIBJPEG_TURBO_VERSION 0": "LIBJPEG_TURBO_VERSION 1.5.1",
+ "LIBJPEG_TURBO_VERSION_NUMBER 0": "LIBJPEG_TURBO_VERSION_NUMBER 1005001",
+ "#undef C_ARITH_CODING_SUPPORTED": "#define C_ARITH_CODING_SUPPORTED 1",
+ "#undef D_ARITH_CODING_SUPPORTED": "#define D_ARITH_CODING_SUPPORTED 1",
+ "#undef HAVE_LOCALE_H": "#define HAVE_LOCALE_H 1",
+ "#undef HAVE_STDDEF_H": "#define HAVE_STDDEF_H 1",
+ "#undef HAVE_STDLIB_H": "#define HAVE_STDLIB_H 1",
+ "#undef HAVE_UNSIGNED_CHAR": "#define HAVE_UNSIGNED_CHAR 1",
+ "#undef HAVE_UNSIGNED_SHORT": "#define HAVE_UNSIGNED_SHORT 1",
+ "#undef INCOMPLETE_TYPES_BROKEN": "",
+ "#undef MEM_SRCDST_SUPPORTED": "#define MEM_SRCDST_SUPPORTED 1",
+ "#undef NEED_BSD_STRINGS": "",
+ "#undef NEED_SYS_TYPES_H": "#define NEED_SYS_TYPES_H 1",
+ "#undef __CHAR_UNSIGNED__": "",
+ "#undef const": "",
+ "#undef size_t": "",
+ "#undef RIGHT_SHIFT_IS_UNSIGNED": "",
+}
+
+JCONFIG_NOWIN_SIMD_SUBSTITUTIONS = JCONFIG_NOWIN_COMMON_SUBSTITUTIONS + {
+ "#undef WITH_SIMD": "#define WITH_SIMD 1",
+}
+
+JCONFIG_NOWIN_NOSIMD_SUBSTITUTIONS = JCONFIG_NOWIN_COMMON_SUBSTITUTIONS + {
+ "#undef WITH_SIMD": "",
+}
+
+template_rule(
+ name = "jconfig_nowin_nosimd",
+ src = "jconfig.h.in",
+ out = "jconfig_nowin_nosimd.h",
+ substitutions = JCONFIG_NOWIN_NOSIMD_SUBSTITUTIONS,
+)
+
+template_rule(
+ name = "jconfig_nowin_simd",
+ src = "jconfig.h.in",
+ out = "jconfig_nowin_simd.h",
+ substitutions = JCONFIG_NOWIN_SIMD_SUBSTITUTIONS,
+)
+
+template_rule(
+ name = "jconfigint_nowin",
+ src = "jconfigint.h.in",
+ out = "jconfigint_nowin.h",
+ substitutions = {
+ "#undef BUILD": "#define BUILD \"20161115\"",
+ "#undef inline": "",
+ "#undef INLINE": "#define INLINE inline __attribute__((always_inline))",
+ "#undef PACKAGE_NAME": "#define PACKAGE_NAME \"libjpeg-turbo\"",
+ "#undef VERSION": "#define VERSION \"1.5.1\"",
+ "#undef SIZEOF_SIZE_T": "#if (__WORDSIZE==64 && !defined(__native_client__))\n" +
+ "#define SIZEOF_SIZE_T 8\n" +
+ "#else\n" +
+ "#define SIZEOF_SIZE_T 4\n" +
+ "#endif\n",
+ },
+)
+
genrule(
name = "configure",
+ srcs = [
+ "jconfig_win.h",
+ "jconfig_nowin_nosimd.h",
+ "jconfig_nowin_simd.h",
+ ],
outs = ["jconfig.h"],
- cmd = "cat <<'EOF' >$@\n" +
- "#define JPEG_LIB_VERSION 62\n" +
- "#define LIBJPEG_TURBO_VERSION 1.5.1\n" +
- "#define LIBJPEG_TURBO_VERSION_NUMBER 1005001\n" +
- "#define C_ARITH_CODING_SUPPORTED 1\n" +
- "#define D_ARITH_CODING_SUPPORTED 1\n" +
- "#define BITS_IN_JSAMPLE 8\n" +
- "#define HAVE_LOCALE_H 1\n" +
- "#define HAVE_STDDEF_H 1\n" +
- "#define HAVE_STDLIB_H 1\n" +
- "#define HAVE_UNSIGNED_CHAR 1\n" +
- "#define HAVE_UNSIGNED_SHORT 1\n" +
- "#define MEM_SRCDST_SUPPORTED 1\n" +
- "#define NEED_SYS_TYPES_H 1\n" +
- select({
- ":k8": "#define WITH_SIMD 1\n",
- ":armeabi-v7a": "#define WITH_SIMD 1\n",
- ":arm64-v8a": "#define WITH_SIMD 1\n",
- "//conditions:default": "",
- }) +
- "EOF",
+ cmd = select({
+ ":windows": "cp $(location jconfig_win.h) $@",
+ ":k8": "cp $(location jconfig_nowin_simd.h) $@",
+ ":armeabi-v7a": "cp $(location jconfig_nowin_simd.h) $@",
+ ":arm64-v8a": "cp $(location jconfig_nowin_simd.h) $@",
+ "//conditions:default": "cp $(location jconfig_nowin_nosimd.h) $@",
+ }),
)
genrule(
name = "configure_internal",
+ srcs = [
+ "jconfigint_win.h",
+ "jconfigint_nowin.h",
+ ],
outs = ["jconfigint.h"],
- cmd = "cat <<'EOF' >$@\n" +
- "#define BUILD \"20161115\"\n" +
- "#ifdef _MSC_VER /* Windows */\n" +
- "#define INLINE __inline\n" +
- "#else\n" +
- "#define INLINE inline __attribute__((always_inline))\n" +
- "#endif\n" +
- "#define PACKAGE_NAME \"libjpeg-turbo\"\n" +
- "#define VERSION \"1.5.1\"\n" +
- "#if (__WORDSIZE==64 && !defined(__native_client__)) || defined(_WIN64)\n" +
- "#define SIZEOF_SIZE_T 8\n" +
- "#else\n" +
- "#define SIZEOF_SIZE_T 4\n" +
- "#endif\n" +
- "EOF",
+ cmd = select({
+ ":windows": "cp $(location jconfigint_win.h) $@",
+ "//conditions:default": "cp $(location jconfigint_nowin.h) $@",
+ }),
)
# jiminy cricket the way this file is generated is completely outrageous
diff --git a/third_party/llvm/llvm.BUILD b/third_party/llvm/llvm.BUILD
index 711ae38c3a..c6ed697013 100644
--- a/third_party/llvm/llvm.BUILD
+++ b/third_party/llvm/llvm.BUILD
@@ -10,10 +10,13 @@ load(
"@//third_party/llvm:llvm.bzl",
"gentbl",
"expand_cmake_vars",
- "expand_header_template",
"llvm_target_cmake_vars",
"cmake_var_string",
)
+load(
+ "@//third_party:common.bzl",
+ "template_rule",
+)
package(default_visibility = ["@//tensorflow/compiler/xla:internal"])
@@ -176,48 +179,48 @@ expand_cmake_vars(
)
# Performs macro expansions on .def.in files
-expand_header_template(
+template_rule(
name = "targets_def_gen",
+ src = "include/llvm/Config/Targets.def.in",
out = "include/llvm/Config/Targets.def",
substitutions = {
"@LLVM_ENUM_TARGETS@": "\n".join(
["LLVM_TARGET({})".format(t) for t in llvm_targets],
),
},
- template = "include/llvm/Config/Targets.def.in",
)
-expand_header_template(
+template_rule(
name = "asm_parsers_def_gen",
+ src = "include/llvm/Config/AsmParsers.def.in",
out = "include/llvm/Config/AsmParsers.def",
substitutions = {
"@LLVM_ENUM_ASM_PARSERS@": "\n".join(
["LLVM_ASM_PARSER({})".format(t) for t in llvm_target_asm_parsers],
),
},
- template = "include/llvm/Config/AsmParsers.def.in",
)
-expand_header_template(
+template_rule(
name = "asm_printers_def_gen",
+ src = "include/llvm/Config/AsmPrinters.def.in",
out = "include/llvm/Config/AsmPrinters.def",
substitutions = {
"@LLVM_ENUM_ASM_PRINTERS@": "\n".join(
["LLVM_ASM_PRINTER({})".format(t) for t in llvm_target_asm_printers],
),
},
- template = "include/llvm/Config/AsmPrinters.def.in",
)
-expand_header_template(
+template_rule(
name = "disassemblers_def_gen",
+ src = "include/llvm/Config/Disassemblers.def.in",
out = "include/llvm/Config/Disassemblers.def",
substitutions = {
"@LLVM_ENUM_DISASSEMBLERS@": "\n".join(
["LLVM_DISASSEMBLER({})".format(t) for t in llvm_target_disassemblers],
),
},
- template = "include/llvm/Config/Disassemblers.def.in",
)
# A common library that all LLVM targets depend on.
diff --git a/third_party/llvm/llvm.bzl b/third_party/llvm/llvm.bzl
index 1d6bf1c706..4a0814fb7f 100644
--- a/third_party/llvm/llvm.bzl
+++ b/third_party/llvm/llvm.bzl
@@ -46,48 +46,6 @@ def gentbl(name, tblgen, td_file, td_srcs, tbl_outs, library = True, **kwargs):
native.cc_library(name=name, textual_hdrs=[f for (_, f) in tbl_outs],
includes=includes, **kwargs)
-
-# Rule for simple expansion of template files. This performs a simple
-# search over the template file for the keys in substitutions,
-# and replaces them with the corresponding values.
-#
-# Typical usage:
-# load("/tools/build_rules/expand_header_template", "expand_header_template")
-# expand_header_template(
-# name = "ExpandMyTemplate",
-# template = "my.template",
-# out = "my.txt",
-# substitutions = {
-# "$VAR1": "foo",
-# "$VAR2": "bar",
-# }
-# )
-#
-# Args:
-# name: The name of the rule.
-# template: The template file to expand
-# out: The destination of the expanded file
-# substitutions: A dictionary mapping strings to their substitutions
-
-def expand_header_template_impl(ctx):
- ctx.template_action(
- template = ctx.file.template,
- output = ctx.outputs.out,
- substitutions = ctx.attr.substitutions,
- )
-
-expand_header_template = rule(
- implementation = expand_header_template_impl,
- attrs = {
- "template": attr.label(mandatory=True, allow_files=True, single_file=True),
- "substitutions": attr.string_dict(mandatory=True),
- "out": attr.output(mandatory=True),
- },
- # output_to_genfiles is required for header files.
- output_to_genfiles = True,
-)
-
-
def llvm_target_cmake_vars(native_arch, target_triple):
return {
"LLVM_HOST_TRIPLE": target_triple,