aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <bsteiner@google.com>2016-08-16 14:01:13 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-08-16 15:17:08 -0700
commit8b3d8c8b5e96036ec78e01292c56ce099c5a62ab (patch)
tree5942281d5ece05ae07c144031ea69c40f9691545
parentf5abc68bd74a8ecec037acac3e0851419201fc48 (diff)
Merge changes from github.
Change: 130451359
-rw-r--r--tensorflow/c/c_api.h2
-rw-r--r--tensorflow/contrib/learn/python/learn/__init__.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions.py8
-rw-r--r--tensorflow/contrib/makefile/README.md9
-rwxr-xr-xtensorflow/contrib/makefile/build_all_ios.sh12
-rwxr-xr-xtensorflow/contrib/makefile/download_dependencies.sh20
-rwxr-xr-xtensorflow/contrib/makefile/rename_protobuf.sh89
-rwxr-xr-xtensorflow/contrib/makefile/rename_protoc.sh34
-rwxr-xr-xtensorflow/contrib/makefile/rename_prototext.sh34
-rw-r--r--tensorflow/contrib/pi_examples/camera/Makefile3
-rw-r--r--tensorflow/contrib/pi_examples/label_image/Makefile3
-rw-r--r--tensorflow/core/distributed_runtime/master_session.cc1
-rw-r--r--tensorflow/core/lib/io/zlib_inputbuffer.cc2
-rw-r--r--tensorflow/core/ops/ops.pbtxt36
-rw-r--r--tensorflow/g3doc/how_tos/distributed/index.md106
-rw-r--r--tensorflow/models/embedding/word2vec.py2
-rw-r--r--tensorflow/models/image/cifar10/cifar10.py2
-rw-r--r--tensorflow/python/kernel_tests/reader_ops_test.py70
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu2
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu2
-rw-r--r--tensorflow/tools/docker/jupyter_notebook_config.py2
-rw-r--r--tensorflow/workspace.bzl4
-rwxr-xr-xutil/python/python_config.sh7
24 files changed, 342 insertions, 112 deletions
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 3fad39c9fa..61024f42e9 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -291,7 +291,7 @@ extern void TF_SetDevice(TF_OperationDescription* desc, const char* device);
// TF_Port concat_dim_input = {...};
// TF_AddInput(desc, concat_dim_input);
// TF_Port values_inputs[5] = {{...}, ..., {...}};
-// TF_AddInputList(desc, 5, values_inputs);
+// TF_AddInputList(desc, values_inputs, 5);
// For inputs that take a single tensor.
extern void TF_AddInput(TF_OperationDescription* desc, TF_Port input);
diff --git a/tensorflow/contrib/learn/python/learn/__init__.py b/tensorflow/contrib/learn/python/learn/__init__.py
index 50089e18a0..34b4bf3823 100644
--- a/tensorflow/contrib/learn/python/learn/__init__.py
+++ b/tensorflow/contrib/learn/python/learn/__init__.py
@@ -35,9 +35,9 @@ from tensorflow.contrib.learn.python.learn.dataframe import *
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
+from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
-from tensorflow.contrib.learn.python.learn.graph_actions import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions.py b/tensorflow/contrib/learn/python/learn/graph_actions.py
index d736d34647..6ecce00446 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions.py
@@ -76,12 +76,6 @@ def get_summary_writer(logdir):
return summary_writer_cache.SummaryWriterCache.get(logdir)
-class NanLossDuringTrainingError(RuntimeError):
-
- def __str__(self):
- return 'NaN loss during training.'
-
-
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)
if vars_to_save:
@@ -496,7 +490,7 @@ def _train_internal(graph,
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
- raise NanLossDuringTrainingError()
+ raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
diff --git a/tensorflow/contrib/makefile/README.md b/tensorflow/contrib/makefile/README.md
index f795b87caa..acfcf8f220 100644
--- a/tensorflow/contrib/makefile/README.md
+++ b/tensorflow/contrib/makefile/README.md
@@ -259,12 +259,13 @@ For other variations of valid optimization flags, see [clang optimization levels
## Raspberry Pi
-Building on the Raspberry Pi is similar to a normal Linux system, though we
-recommend starting by compiling and installing protobuf:
+Building on the Raspberry Pi is similar to a normal Linux system. First
+download the dependencies and build protobuf:
```bash
+tensorflow/contrib/makefile/download_dependencies.sh
cd tensorflow/contrib/makefile/downloads/protobuf/
-./autogen.sh
+./autogen.sh
./configure
make
sudo make install
@@ -295,6 +296,8 @@ OPTFLAGS="-Os -mfpu=neon-vfpv4 -funsafe-math-optimizations -ftree-vectorize" \
CXX=g++-4.8
```
+For more examples, look at the tensorflow/contrib/pi_examples folder in the
+source tree, which contains code samples aimed at the Raspberry Pi.
# Other notes
diff --git a/tensorflow/contrib/makefile/build_all_ios.sh b/tensorflow/contrib/makefile/build_all_ios.sh
index e16d33aac6..6b6ed389fc 100755
--- a/tensorflow/contrib/makefile/build_all_ios.sh
+++ b/tensorflow/contrib/makefile/build_all_ios.sh
@@ -42,18 +42,6 @@ rm -rf tensorflow/contrib/makefile/downloads
# Pull down the required versions of the frameworks we need.
tensorflow/contrib/makefile/download_dependencies.sh
-# TODO(petewarden) - Some new code in Eigen triggers a clang bug, so work
-# around it by patching the source.
-sed -e 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-sed -e 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-sed -e 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-
# Compile protobuf for the target iOS device architectures.
tensorflow/contrib/makefile/compile_ios_protobuf.sh ${JOBS_COUNT}
diff --git a/tensorflow/contrib/makefile/download_dependencies.sh b/tensorflow/contrib/makefile/download_dependencies.sh
index 8ab7aad270..02a76631aa 100755
--- a/tensorflow/contrib/makefile/download_dependencies.sh
+++ b/tensorflow/contrib/makefile/download_dependencies.sh
@@ -36,14 +36,26 @@ curl "https://bitbucket.org/eigen/eigen/get/${EIGEN_HASH}.tar.gz" \
-o /tmp/eigen-${EIGEN_HASH}.tar.gz
tar xzf /tmp/eigen-${EIGEN_HASH}.tar.gz -C ${DOWNLOADS_DIR}
-git clone https://github.com/google/re2.git ${DOWNLOADS_DIR}/re2
-git clone https://github.com/google/gemmlowp.git ${DOWNLOADS_DIR}/gemmlowp
-git clone https://github.com/google/protobuf.git ${DOWNLOADS_DIR}/protobuf
-
# Link to the downloaded Eigen library from a permanent directory name, since
# the downloaded name changes with every version.
cd ${DOWNLOADS_DIR}
rm -rf eigen-latest
ln -s eigen-eigen-${EIGEN_HASH} eigen-latest
+# TODO(petewarden) - Some new code in Eigen triggers a clang bug with iOS arm64,
+# so work around it by patching the source.
+sed -e 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
+-i '' \
+eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+
+git clone https://github.com/google/re2.git re2
+git clone https://github.com/google/gemmlowp.git gemmlowp
+git clone https://github.com/google/protobuf.git protobuf
+
echo "download_dependencies.sh completed successfully."
diff --git a/tensorflow/contrib/makefile/rename_protobuf.sh b/tensorflow/contrib/makefile/rename_protobuf.sh
new file mode 100755
index 0000000000..b3bff2d503
--- /dev/null
+++ b/tensorflow/contrib/makefile/rename_protobuf.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# This script modifies the downloaded protobuf library and the TensorFlow source
+# to put all protobuf-related symbols into the google::protobuf3 namespace
+# instead of the default google::protobuf. This is necessary to work around
+# linking issues for applications that use protobuf v2 already, and want to
+# adopt TensorFlow, since otherwise the two libraries have duplicate function
+# symbols that clash. It also renames all the include paths to google/protobuf3
+# throughout the protobuf and TensorFlow code.
+# This is a massive hack, and if possible it's recommended that you switch your
+# whole application to protobuf v3 so there's no mismatch with TensorFlow. There
+# are also no guarantees that this will continue to work with future versions of
+# protobuf or TensorFlow, or that it's a bulletproof solution for every
+# application.
+#
+# To use this script, run the following sequence:
+# tensorflow/contrib/makefile/download_dependencies.sh
+# tensorflow/contrib/makefile/rename_protobuf.sh
+#
+# You can then build the source as normal. For example on iOS:
+# tensorflow/contrib/makefile/compile_ios_protobuf.sh
+# tensorflow/contrib/makefile/compile_ios_tensorflow.sh
+#
+# Note that this script modifies the source code in-place, so once it's been run
+# it's no longer suitable for further manual modifications, since the difference
+# with the top of tree will already be large.
+
+mv tensorflow/contrib/makefile/downloads/protobuf/src/google/protobuf \
+ tensorflow/contrib/makefile/downloads/protobuf//src/google/protobuf3
+
+# Rename protobuf #includes to use protobuf3.
+find . \
+ -type f \
+ \( -name "*.cc" -or -name "*.h" \) \
+ -exec sed -i '' \
+ 's%#include \([<"]\)google/protobuf/%#include \1google/protobuf3/%' {} \;
+find . \
+ -type f \
+ -name "*.proto" \
+ -exec sed -i '' \
+ 's%import \(.*\)\([<"]\)google/protobuf/%import \1\2google/protobuf3/%' {} \;
+
+# Rename the namespace mentions.
+find . \
+ -type f \
+ \( -name "*.cc" -or -name "*.h" \) \
+ -exec sed -i '' \
+ 's%namespace protobuf\([^3]\)%namespace protobuf3\1%' {} \;
+find . \
+ -type f \
+ \( -name "*.cc" -or -name "*.h" \) \
+ -exec sed -i '' \
+ 's%protobuf::%protobuf3::%g' {} \;
+sed -i '' 's%::google::protobuf;%google::protobuf3;%' \
+ tensorflow/core/platform/default/protobuf.h
+
+# Fix up a couple of special build scripts that look for particular files.
+sed -i '' 's%src/google/protobuf/message.cc%src/google/protobuf3/message.cc%' \
+ tensorflow/contrib/makefile/downloads/protobuf/configure.ac
+sed -i '' 's%src/google/protobuf/stubs/common.h%src/google/protobuf3/stubs/common.h%' \
+ tensorflow/contrib/makefile/downloads/protobuf/autogen.sh
+
+# Update the locations within the protobuf makefile.
+sed -i '' 's%google/protobuf/%google/protobuf3/%g' \
+ tensorflow/contrib/makefile/downloads/protobuf/src/Makefile.am
+
+# Make sure protoc can find the new google/protobuf3 paths by putting them at
+# the root directory.
+cp -r tensorflow/contrib/makefile/downloads/protobuf/src/google .
+
+# Update the protobuf commands used in the makefile.
+sed -i '' 's%$(PROTOC) $(PROTOCFLAGS) $< --cpp_out $(PROTOGENDIR)%tensorflow/contrib/makefile/rename_protoc.sh $(PROTOC) $(PROTOCFLAGS) $< --cpp_out $(PROTOGENDIR)%' tensorflow/contrib/makefile/Makefile
+sed -i '' 's%$(PROTOC) $(PROTOCFLAGS) $< --cpp_out $(HOST_GENDIR)%tensorflow/contrib/makefile/rename_protoc.sh $(PROTOC) $(PROTOCFLAGS) $< --cpp_out $(HOST_GENDIR)%' tensorflow/contrib/makefile/Makefile
+sed -i '' 's%$(PROTO_TEXT) \\%tensorflow/contrib/makefile/rename_prototext.sh $(PROTO_TEXT) \\%' tensorflow/contrib/makefile/Makefile
diff --git a/tensorflow/contrib/makefile/rename_protoc.sh b/tensorflow/contrib/makefile/rename_protoc.sh
new file mode 100755
index 0000000000..7eaa60271a
--- /dev/null
+++ b/tensorflow/contrib/makefile/rename_protoc.sh
@@ -0,0 +1,34 @@
+#!/bin/bash -e -x
+
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+PROTO_C_COMMAND=$1
+shift
+${PROTO_C_COMMAND} $*
+
+# Assumes that the order is always <some flags> *.protofile --cpp_out dir
+PROTO_LAST_THREE_ARGS=(${@: -3})
+PROTO_FILE=${PROTO_LAST_THREE_ARGS[0]}
+CC_FILE=${PROTO_FILE%.proto}.pb.cc
+H_FILE=${PROTO_FILE%.proto}.pb.h
+GEN_DIR=${PROTO_LAST_THREE_ARGS[2]}
+GEN_CC=${GEN_DIR}/${CC_FILE}
+GEN_H=${GEN_DIR}/${H_FILE}
+
+sed -i '' 's%protobuf::%protobuf3::%g' ${GEN_CC}
+sed -i '' 's%protobuf::%protobuf3::%g' ${GEN_H}
+sed -i '' 's%google_2fprotobuf3_2f%google_2fprotobuf_2f%g' ${GEN_CC}
+sed -i '' 's%google_2fprotobuf3_2f%google_2fprotobuf_2f%g' ${GEN_H}
diff --git a/tensorflow/contrib/makefile/rename_prototext.sh b/tensorflow/contrib/makefile/rename_prototext.sh
new file mode 100755
index 0000000000..0b285aae40
--- /dev/null
+++ b/tensorflow/contrib/makefile/rename_prototext.sh
@@ -0,0 +1,34 @@
+#!/bin/bash -e -x
+
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+PROTO_TEXT_COMMAND=$1
+shift
+${PROTO_TEXT_COMMAND} $*
+
+# Assumes a fixed order for the arguments.
+PROTO_LAST_FOUR_ARGS=(${@: -4})
+PROTO_FILE=${PROTO_LAST_FOUR_ARGS[3]}
+CC_FILE=${PROTO_FILE%.proto}.pb_text.cc
+H_FILE=${PROTO_FILE%.proto}.pb_text.h
+GEN_DIR=${PROTO_LAST_FOUR_ARGS[0]}
+GEN_CC=${GEN_DIR}/${CC_FILE#tensorflow/core}
+GEN_H=${GEN_DIR}/${H_FILE#tensorflow/core}
+
+sed -i '' 's%protobuf::%protobuf3::%g' ${GEN_CC}
+sed -i '' 's%protobuf::%protobuf3::%g' ${GEN_H}
+sed -i '' 's%google_2fprotobuf3_2f%google_2fprotobuf_2f%g' ${GEN_CC}
+sed -i '' 's%google_2fprotobuf3_2f%google_2fprotobuf_2f%g' ${GEN_H}
diff --git a/tensorflow/contrib/pi_examples/camera/Makefile b/tensorflow/contrib/pi_examples/camera/Makefile
index 2d14606400..d7dd3e131f 100644
--- a/tensorflow/contrib/pi_examples/camera/Makefile
+++ b/tensorflow/contrib/pi_examples/camera/Makefile
@@ -53,7 +53,8 @@ LIBS := \
-ldl \
-lpthread \
-lm \
--ljpeg
+-ljpeg \
+-lz
LIBFLAGS :=
EXECUTABLE_SRCS := tensorflow/contrib/pi_examples/camera/camera.cc
diff --git a/tensorflow/contrib/pi_examples/label_image/Makefile b/tensorflow/contrib/pi_examples/label_image/Makefile
index 1f310ec93b..511da285a9 100644
--- a/tensorflow/contrib/pi_examples/label_image/Makefile
+++ b/tensorflow/contrib/pi_examples/label_image/Makefile
@@ -52,7 +52,8 @@ LIBS := \
-ldl \
-lpthread \
-lm \
--ljpeg
+-ljpeg \
+-lz
LIBFLAGS :=
EXECUTABLE_SRCS := tensorflow/contrib/pi_examples/label_image/label_image.cc
diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc
index be34e7acd4..f2925b6ea7 100644
--- a/tensorflow/core/distributed_runtime/master_session.cc
+++ b/tensorflow/core/distributed_runtime/master_session.cc
@@ -658,6 +658,7 @@ class CleanupBroadcastHelper {
} // namespace
+
void MasterSession::ReffedClientGraph::CleanupPartitionsAsync(
int64 step_id, StatusCallback done) {
const int num = partitions_.size();
diff --git a/tensorflow/core/lib/io/zlib_inputbuffer.cc b/tensorflow/core/lib/io/zlib_inputbuffer.cc
index b5224168f1..8c0d9c800b 100644
--- a/tensorflow/core/lib/io/zlib_inputbuffer.cc
+++ b/tensorflow/core/lib/io/zlib_inputbuffer.cc
@@ -163,7 +163,7 @@ Status ZlibInputBuffer::ReadNBytes(int64 bytes_to_read, string* result) {
Status ZlibInputBuffer::Inflate() {
int error = inflate(z_stream_.get(), zlib_options_.flush_mode);
- if (error != Z_OK && error != Z_FINISH) {
+ if (error != Z_OK && error != Z_STREAM_END) {
string error_string =
strings::StrCat("inflate() failed with error ", error);
if (z_stream_->msg != NULL) {
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 4ecfbfe010..d342734ed3 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -4583,6 +4583,42 @@ op {
description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the PNG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the PNG-encoded image is transformed to match the requested number\nof color channels."
}
op {
+ name: "DecodeGif"
+ input_arg {
+ name: "contents"
+ description: "0-D. The GIF-encoded image."
+ type: DT_STRING
+ }
+ output_arg {
+ name: "image"
+ description: "3-D with shape `[height, width, channels]`."
+ type_attr: "dtype"
+ }
+ attr {
+ name: "channels"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ description: "Number of color channels for the decoded image."
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ default_value {
+ type: DT_UINT8
+ }
+ allowed_values {
+ list {
+ type: DT_UINT8
+ type: DT_UINT16
+ }
+ }
+ }
+ summary: "Decode a GIF-encoded image to a uint8 or uint16 tensor."
+ description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the GIF-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the GIF-encoded image is transformed to match the requested number\nof color channels."
+}
+op {
name: "DecodeRaw"
input_arg {
name: "bytes"
diff --git a/tensorflow/g3doc/how_tos/distributed/index.md b/tensorflow/g3doc/how_tos/distributed/index.md
index 88f334cb53..b5ad1991b2 100644
--- a/tensorflow/g3doc/how_tos/distributed/index.md
+++ b/tensorflow/g3doc/how_tos/distributed/index.md
@@ -63,7 +63,7 @@ tf.train.ClusterSpec({"local": ["localhost:2222", "localhost:2223"]})
<td><pre>
tf.train.ClusterSpec({
"worker": [
- "worker0.example.com:2222",
+ "worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"
],
@@ -153,7 +153,7 @@ simplify the work of specifying a replicated model. Possible approaches include:
`tf.Graph` that contains one set of parameters (in `tf.Variable` nodes pinned
to `/job:ps`); and multiple copies of the compute-intensive part of the model,
each pinned to a different task in `/job:worker`.
-
+
* **Between-graph replication.** In this approach, there is a separate client
for each `/job:worker` task, typically in the same process as the worker
task. Each client builds a similar graph containing the parameters (pinned to
@@ -203,7 +203,7 @@ def main(_):
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
-
+
# Create and start a server for the local task.
server = tf.train.Server(cluster,
job_name=FLAGS.job_name,
@@ -284,56 +284,50 @@ $ python trainer.py \
## Glossary
-<dl>
- <dt>Client</dt>
- <dd>
- A client is typically a program that builds a TensorFlow graph and
- constructs a `tensorflow::Session` to interact with a cluster. Clients are
- typically written in Python or C++. A single client process can directly
- interact with multiple TensorFlow servers (see "Replicated training" above),
- and a single server can serve multiple clients.
- </dd>
- <dt>Cluster</dt>
- <dd>
- A TensorFlow cluster comprises a one or more "jobs", each divided into lists
- of one or more "tasks". A cluster is typically dedicated to a particular
- high-level objective, such as training a neural network, using many machines
- in parallel. A cluster is defined by a `tf.train.ClusterSpec` object.
- </dd>
- <dt>Job</dt>
- <dd>
- A job comprises a list of "tasks", which typically serve a common
- purpose. For example, a job named `ps` (for "parameter server") typically
- hosts nodes that store and update variables; while a job named `worker`
- typically hosts stateless nodes that perform compute-intensive tasks.
- The tasks in a job typically run on different machines. The set of job roles
- is flexible: for example, a `worker` may maintain some state.
- </dd>
- <dt>Master service</dt>
- <dd>
- An RPC service that provides remote access to a set of distributed devices,
- and acts as a session target. The master service implements the
- <code>tensorflow::Session</code> interface, and is responsible for
- coordinating work across one or more "worker services". All TensorFlow
- servers implement the master service.
- </dd>
- <dt>Task</dt>
- <dd>
- A task corresponds to a specific TensorFlow server, and typically
- corresponds to a single process. A task belongs to a particular "job" and is
- identified by its index within that job's list of tasks.
- </dd>
- <dt>TensorFlow server</dt>
- <dd>
- A process running a <code>tf.train.Server</code> instance, which is a
- member of a cluster, and exports a "master service" and "worker service".
- </dd>
- <dt>Worker service</dt>
- <dd>
- An RPC service that executes parts of a TensorFlow graph using its local
- devices. A worker service implements <a href=
- "https://www.tensorflow.org/code/tensorflow/core/protobuf/worker_service.proto"
- ><code>worker_service.proto</code></a>. All TensorFlow servers implement the
- worker service.
- </dd>
-</dl>
+**Client**
+
+A client is typically a program that builds a TensorFlow graph and constructs a
+`tensorflow::Session` to interact with a cluster. Clients are typically written
+in Python or C++. A single client process can directly interact with multiple
+TensorFlow servers (see "Replicated training" above), and a single server can
+serve multiple clients.
+
+**Cluster**
+
+A TensorFlow cluster comprises a one or more "jobs", each divided into lists
+of one or more "tasks". A cluster is typically dedicated to a particular
+high-level objective, such as training a neural network, using many machines in
+parallel. A cluster is defined by a `tf.train.ClusterSpec` object.
+
+**Job**
+
+A job comprises a list of "tasks", which typically serve a common purpose.
+For example, a job named `ps` (for "parameter server") typically hosts nodes
+that store and update variables; while a job named `worker` typically hosts
+stateless nodes that perform compute-intensive tasks. The tasks in a job
+typically run on different machines. The set of job roles is flexible:
+for example, a `worker` may maintain some state.
+
+**Master service**
+
+An RPC service that provides remote access to a set of distributed devices,
+and acts as a session target. The master service implements the
+`tensorflow::Session` interface, and is responsible for coordinating work across
+one or more "worker services". All TensorFlow servers implement the master
+service.
+
+**Task**
+
+A task corresponds to a specific TensorFlow server, and typically corresponds
+to a single process. A task belongs to a particular "job" and is identified by
+its index within that job's list of tasks.
+
+**TensorFlow server**
+A process running a `tf.train.Server` instance, which is a member of a cluster,
+and exports a "master service" and "worker service".
+
+**Worker service**
+
+An RPC service that executes parts of a TensorFlow graph using its local devices.
+A worker service implements [worker_service.proto](https://www.tensorflow.org/code/tensorflow/core/protobuf/worker_service.proto).
+All TensorFlow servers implement the worker service.
diff --git a/tensorflow/models/embedding/word2vec.py b/tensorflow/models/embedding/word2vec.py
index d4b4309146..2eb5367a95 100644
--- a/tensorflow/models/embedding/word2vec.py
+++ b/tensorflow/models/embedding/word2vec.py
@@ -247,7 +247,7 @@ class Word2Vec(object):
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
- # We replicate sampled noise lables for all examples in the batch
+ # We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py
index 4cd7e006c1..4908964ffc 100644
--- a/tensorflow/models/image/cifar10/cifar10.py
+++ b/tensorflow/models/image/cifar10/cifar10.py
@@ -81,7 +81,7 @@ def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
- Creates a summary that measure the sparsity of activations.
+ Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
diff --git a/tensorflow/python/kernel_tests/reader_ops_test.py b/tensorflow/python/kernel_tests/reader_ops_test.py
index 8bcec44a61..c54587bef1 100644
--- a/tensorflow/python/kernel_tests/reader_ops_test.py
+++ b/tensorflow/python/kernel_tests/reader_ops_test.py
@@ -21,8 +21,10 @@ from __future__ import print_function
import collections
import os
+import six
import threading
import tensorflow as tf
+import zlib
class IdentityReaderTest(tf.test.TestCase):
@@ -178,7 +180,8 @@ class WholeFileReaderTest(tf.test.TestCase):
for i in range(3)]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
- open(fn, "wb").write(c)
+ with open(fn, "wb") as h:
+ h.write(c)
def tearDown(self):
super(WholeFileReaderTest, self).tearDown()
@@ -240,13 +243,13 @@ class TextLineReaderTest(tf.test.TestCase):
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
- f = open(fn, "wb")
- for j in range(self._num_lines):
- f.write(self._LineText(i, j))
- # Always include a newline after the record unless it is
- # at the end of the file, in which case we include it sometimes.
- if j + 1 != self._num_lines or i == 0:
- f.write(b"\r\n" if crlf else b"\n")
+ with open(fn, "wb") as f:
+ for j in range(self._num_lines):
+ f.write(self._LineText(i, j))
+ # Always include a newline after the record unless it is
+ # at the end of the file, in which case we include it sometimes.
+ if j + 1 != self._num_lines or i == 0:
+ f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
@@ -311,11 +314,11 @@ class FixedLengthRecordReaderTest(tf.test.TestCase):
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
- f = open(fn, "wb")
- f.write(b"H" * self._header_bytes)
- for j in range(self._num_records):
- f.write(self._Record(i, j))
- f.write(b"F" * self._footer_bytes)
+ with open(fn, "wb") as f:
+ f.write(b"H" * self._header_bytes)
+ for j in range(self._num_records):
+ f.write(self._Record(i, j))
+ f.write(b"F" * self._footer_bytes)
return filenames
def testOneEpoch(self):
@@ -456,6 +459,47 @@ class TFRecordWriterZlibTest(tf.test.TestCase):
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
+ def testZLibFlushRecord(self):
+ fn = os.path.join(self.get_temp_dir(), "tf_record.txt")
+
+ writer = tf.python_io.TFRecordWriter(fn, options=None)
+ writer.write(b"small record")
+ writer.close()
+ del writer
+
+ with open(fn, "rb") as h:
+ buff = h.read()
+
+ # creating more blocks and trailing blocks shouldn't break reads
+ compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
+
+ output = b""
+ for c in buff:
+ if type(c) == int:
+ c = six.int2byte(c)
+ output += compressor.compress(c)
+ output += compressor.flush(zlib.Z_FULL_FLUSH)
+
+ output += compressor.flush(zlib.Z_FULL_FLUSH)
+ output += compressor.flush(zlib.Z_FULL_FLUSH)
+ output += compressor.flush(zlib.Z_FINISH)
+
+ # overwrite the original file with the compressed data
+ with open(fn, "wb") as h:
+ h.write(output)
+
+ with self.test_session() as sess:
+ options = tf.python_io.TFRecordOptions(
+ compression_type=tf.python_io.TFRecordCompressionType.ZLIB)
+ reader = tf.TFRecordReader(name="test_reader", options=options)
+ queue = tf.FIFOQueue(1, [tf.string], shapes=())
+ key, value = reader.read(queue)
+ queue.enqueue(fn).run()
+ queue.close().run()
+ k, v = sess.run([key, value])
+ self.assertTrue(tf.compat.as_text(k).startswith("%s:" % fn))
+ self.assertAllEqual(b"small record", v)
+
class TFRecordIteratorTest(tf.test.TestCase):
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index 619f003d5b..7b09691178 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -1,4 +1,4 @@
-FROM nvidia/cuda:7.5-cudnn4-devel
+FROM nvidia/cuda:7.5-cudnn5-devel
MAINTAINER Jan Prach <jendap@google.com>
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index e9081d5502..4877f8c989 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -1,4 +1,4 @@
-FROM nvidia/cuda:7.5-cudnn4-devel
+FROM nvidia/cuda:7.5-cudnn5-devel
MAINTAINER Craig Citro <craigcitro@google.com>
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index e08ef1aa75..841f6124c6 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -1,4 +1,4 @@
-FROM nvidia/cuda:7.5-cudnn4-devel
+FROM nvidia/cuda:7.5-cudnn5-devel
MAINTAINER Craig Citro <craigcitro@google.com>
diff --git a/tensorflow/tools/docker/jupyter_notebook_config.py b/tensorflow/tools/docker/jupyter_notebook_config.py
index b789905d82..5cb57aa300 100644
--- a/tensorflow/tools/docker/jupyter_notebook_config.py
+++ b/tensorflow/tools/docker/jupyter_notebook_config.py
@@ -16,7 +16,7 @@ import os
from IPython.lib import passwd
c.NotebookApp.ip = '*'
-c.NotebookApp.port = 8888
+c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python2'
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 05787aa3ab..16a9fb5774 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -7,8 +7,8 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
# These lines need to be changed when updating Eigen. They are parsed from
# this file by the cmake and make builds to determine the eigen version and hash.
- eigen_version = "6f952374ef2b"
- eigen_sha256 = "56d658324b09de3f418ae42ca0646dd1e6e0b897dd58b164ec0d21315764afd9"
+ eigen_version = "9e1b48c333aa"
+ eigen_sha256 = "ad2c990401a0b5529324e000737569f5f60d827f38586d5e02490252b3325c11"
native.new_http_archive(
name = "eigen_archive",
diff --git a/util/python/python_config.sh b/util/python/python_config.sh
index 6eb8a39ece..76ad559f02 100755
--- a/util/python/python_config.sh
+++ b/util/python/python_config.sh
@@ -69,10 +69,9 @@ for path in all_paths:
if len(paths) == 1:
print(paths[0])
-ret_paths = ""
-for path in paths:
- ret_paths += path + " "
-print(ret_paths)
+else:
+ ret_paths = " ".join(paths)
+ print(ret_paths)
END
}