aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-07-31 22:07:30 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-07-31 23:17:46 -0700
commitabe9ab326625105adb3c9d46c027931aec947d1f (patch)
treed9fa7eb9a2fd9b37bc87f98cf353354391b9eb04
parentc0637048dbc099eac1f75878b765220cd02ccfc0 (diff)
Merge changes from github.
Change: 128958134
-rw-r--r--README.md8
-rw-r--r--RELEASE.md50
-rw-r--r--tensorflow/contrib/makefile/Makefile16
-rwxr-xr-xtensorflow/contrib/makefile/build_all_ios.sh12
-rwxr-xr-xtensorflow/contrib/makefile/compile_ios_tensorflow.sh27
-rw-r--r--tensorflow/contrib/makefile/tf_op_files.txt1
-rw-r--r--tensorflow/core/common_runtime/optimization_registry.h2
-rw-r--r--tensorflow/core/common_runtime/simple_graph_execution_state.h2
-rw-r--r--tensorflow/core/graph/gradients.cc2
-rw-r--r--tensorflow/core/graph/graph.cc3
-rw-r--r--tensorflow/core/kernels/argmax_op.cc2
-rw-r--r--tensorflow/core/kernels/cwise_op_conj.cc6
-rw-r--r--tensorflow/core/kernels/cwise_op_gpu_conj.cu.cc3
-rw-r--r--tensorflow/core/kernels/cwise_op_sub.cc6
-rw-r--r--tensorflow/core/kernels/cwise_ops_test.cc39
-rw-r--r--tensorflow/core/kernels/lookup_table_init_op.cc2
-rw-r--r--tensorflow/core/ops/math_ops.cc52
-rw-r--r--tensorflow/core/ops/ops.pbtxt36
-rw-r--r--tensorflow/core/public/version.h4
-rw-r--r--tensorflow/examples/how_tos/reading_data/convert_to_records.py1
-rw-r--r--tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py3
-rw-r--r--tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py3
-rw-r--r--tensorflow/examples/how_tos/reading_data/fully_connected_reader.py2
-rw-r--r--tensorflow/examples/image_retraining/retrain_test.py2
-rw-r--r--tensorflow/examples/learn/wide_n_deep_tutorial.py4
-rw-r--r--tensorflow/examples/skflow/multioutput_regression.py1
-rw-r--r--tensorflow/examples/tutorials/mnist/fully_connected_feed.py2
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnv.md54
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md4
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensor.md54
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorShape.md12
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md16
-rw-r--r--tensorflow/g3doc/api_docs/cc/StructTF_Buffer.md2
-rw-r--r--tensorflow/g3doc/api_docs/index.md6
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.rnn.md6
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.exponential_decay.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/nn.md6
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md107
-rw-r--r--tensorflow/g3doc/how_tos/using_gpu/index.md2
-rw-r--r--tensorflow/python/ops/rnn.py3
-rw-r--r--tensorflow/python/ops/rnn_cell.py5
-rw-r--r--tensorflow/python/platform/tf_logging.py2
-rw-r--r--tensorflow/python/training/learning_rate_decay.py8
-rw-r--r--tensorflow/tensorboard/README.md10
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu3
-rwxr-xr-xtensorflow/tools/ci_build/builds/pip.sh3
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh6
-rw-r--r--tensorflow/tools/dist_test/Dockerfile2
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile2
-rw-r--r--tensorflow/tools/dist_test/server/Dockerfile.test2
-rw-r--r--tensorflow/tools/docker/Dockerfile2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu4
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu2
-rwxr-xr-xtensorflow/tools/docker/parameterized_docker_build.sh6
-rw-r--r--tensorflow/tools/pip_package/setup.py2
-rw-r--r--third_party/gpus/crosstool/CROSSTOOL4
57 files changed, 462 insertions, 168 deletions
diff --git a/README.md b/README.md
index 923b094e4a..e0ac8f6eff 100644
--- a/README.md
+++ b/README.md
@@ -33,10 +33,10 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/140/artifact/pip_test/whl/tensorflow-0.8.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/))
-* Mac GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Linux CPU-only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/140/artifact/pip_test/whl/tensorflow-0.8.0-cp35-cp35m-linux_x86_64.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-linux-gpu/TF_BUILD_CONTAINER_TYPE=GPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac1-slave/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac1-slave/))
+* Mac GPU: [Python 2](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py2-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-py3-none-any.whl) ([build history](http://ci.tensorflow.org/view/Nightly/job/nigntly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
* [Android](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/lastSuccessfulBuild/artifact/bazel-out/local_linux/bin/tensorflow/examples/android/tensorflow_demo.apk) ([build history](http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-android/TF_BUILD_CONTAINER_TYPE=ANDROID,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=NO_PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=android-slave/))
#### *Try your first TensorFlow program*
diff --git a/RELEASE.md b/RELEASE.md
index dd9558dd90..4c9c33bf0d 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,16 +1,40 @@
-# Changes Since Last Release
-## Features and Improvements
-* Connectionist Temporal Classification ops are now "official" (see, e.g.,
- `tf.nn.ctc_loss`)
-* Preliminary graph-construction C API, for use by language bindings.
-* Major revision to the graph-construction C++ API. Scoping mechanism to make op
- naming, specifying control dependencies etc. more consistent. C++ values can
- be used directly as operands, making op construction more concise.
+# Release 0.10.0
-## Breaking Changes to the API
-* `env.h` replaces use of `New*File()` functions to use `std::unique_ptr`
- return arguments, removing the old raw pointer returns.
+## Major Features and Improvements
+
+* Added support for C++ shape inference
+* Added graph-construction C API
+* Major revision to the graph-construction C++ API
+* Support makefile build for iOS
+* Added Mac GPU support
+* Full version of TF-Slim available as `tf.contrib.slim`
+* Added k-Means clustering and WALS matrix factorization
+
+## Big Fixes and Other Changes
+
+* Allow gradient computation for scalar values.
+* Performance improvements for gRPC
+* Improved support for fp16
+* New high-level ops in tf.contrib.{layers,metrics}
+* New features for TensorBoard, such as shape display, exponential smoothing
+* Faster and more stable Google Cloud Storage (GCS) filesystem support
+* Support for zlib compression and decompression for TFRecordReader and TFRecordWriter
+* Support for reading (animated) GIFs
+* Improved support for SparseTensor
+* Added support for more probability distributions (Dirichlet, Beta, Bernoulli, etc.)
+* Added Python interfaces to reset resource containers.
+* Many bugfixes and performance improvements
+* Many documentation fixes
+
+## Thanks to our Contributors
+
+This release contains contributions from many people at Google, as well as:
+
+Alex Rothberg, Andrew Royer, Austin Marshall, @BlackCoal, Bob Adolf, Brian Diesel, Charles-Emmanuel Dias, @chemelnucfin, Chris Lesniewski, Daeyun Shin, Daniel Rodriguez, Danijar Hafner, Darcy Liu, Kristinn R. Thórisson, Daniel Castro, Dmitry Savintsev, Kashif Rasul, Dylan Paiton, Emmanuel T. Odeke, Ernest Grzybowski, Gavin Sherry, Gideon Dresdner, Gregory King, Harold Cooper, @heinzbeinz, Henry Saputra, Huarong Huo, Huazuo Gao, Igor Babuschkin, Igor Macedo Quintanilha, Ivan Ukhov, James Fysh, Jan Wilken Dörrie, Jihun Choi, Johnny Lim, Jonathan Raiman, Justin Francis, @lilac, Li Yi, Marc Khoury, Marco Marchesi, Max Melnick, Micael Carvalho, @mikowals, Mostafa Gazar, Nico Galoppo, Nishant Agrawal, Petr Janda, Yuncheng Li, @raix852, Robert Rose, @Robin-des-Bois, Rohit Girdhar, Sam Abrahams, satok16, Sergey Kishchenko, Sharkd Tu, @shotat, Siddharth Agrawal, Simon Denel, @sono-bfio, SunYeop Lee, Thijs Vogels, @tobegit3hub, @Undo1, Wang Yang, Wenjian Huang, Yaroslav Bulatov, Yuan Tang, Yunfeng Wang, Ziming Dong
+
+We are also grateful to all who filed issues or helped resolve them, asked and
+answered questions, and were part of inspiring discussions.
# Release 0.9.0
@@ -55,7 +79,7 @@
This release contains contributions from many people at Google, as well as:
-Aaron Schumacher, Aidan Dang, Akihiko ITOH, Aki Sukegawa, Arbit Chen, Aziz Alto, Danijar Hafner, Erik Erwitt, Fabrizio Milo, Felix Maximilian Möller, Henry Saputra, Sung Kim, Igor Babuschkin, Jan Zikes, Jeremy Barnes, Jesper Steen Møller, Johannes Mayer, Justin Harris, Kashif Rasul, Kevin Robinson, Loo Rong Jie, Lucas Moura, Łukasz Bieniasz-Krzywiec, Mario Cho, Maxim Grechkin, Michael Heilman, Mostafa Rahmani, Mourad Mourafiq, @ninotoshi, Orion Reblitz-Richardson, Yuncheng Li, @raoqiyu, Robert DiPietro, Sam Abrahams, Sebastian Raschka, Siddharth Agrawal, @snakecharmer1024, Stephen Roller, Sung Kim, SunYeop Lee, Thijs Vogels, Till Hoffmann, Victor Melo, Ville Kallioniemi, Waleed Abdulla, Wenjian Huang, Yaroslav Bulatov, Yeison Rodriguez, Yuan (Terry) Tang, Yuxin Wu, @zhongzyd, Ziming Dong, Zohar Jackson
+Aaron Schumacher, Aidan Dang, Akihiko ITOH, Aki Sukegawa, Arbit Chen, Aziz Alto, Danijar Hafner, Erik Erwitt, Fabrizio Milo, Felix Maximilian Möller, Henry Saputra, Sung Kim, Igor Babuschkin, Jan Zikes, Jeremy Barnes, Jesper Steen Møller, Johannes Mayer, Justin Harris, Kashif Rasul, Kevin Robinson, Loo Rong Jie, Lucas Moura, Łukasz Bieniasz-Krzywiec, Mario Cho, Maxim Grechkin, Michael Heilman, Mostafa Rahmani, Mourad Mourafiq, @ninotoshi, Orion Reblitz-Richardson, Yuncheng Li, @raoqiyu, Robert DiPietro, Sam Abrahams, Sebastian Raschka, Siddharth Agrawal, @snakecharmer1024, Stephen Roller, Sung Kim, SunYeop Lee, Thijs Vogels, Till Hoffmann, Victor Melo, Ville Kallioniemi, Waleed Abdulla, Wenjian Huang, Yaroslav Bulatov, Yeison Rodriguez, Yuan Tang, Yuxin Wu, @zhongzyd, Ziming Dong, Zohar Jackson
We are also grateful to all who filed issues or helped resolve them, asked and
answered questions, and were part of inspiring discussions.
@@ -97,7 +121,7 @@ answered questions, and were part of inspiring discussions.
This release contains contributions from many people at Google, as well as:
-Abhinav Upadhyay, Aggelos Avgerinos, Alan Wu, Alexander G. de G. Matthews, Aleksandr Yahnev, @amchercashin, Andy Kitchen, Aurelien Geron, Awni Hannun, @BanditCat, Bas Veeling, Cameron Chen, @cg31, Cheng-Lung Sung, Christopher Bonnett, Dan Becker, Dan Van Boxel, Daniel Golden, Danijar Hafner, Danny Goodman, Dave Decker, David Dao, David Kretch, Dongjoon Hyun, Dustin Dorroh, @e-lin, Eurico Doirado, Erik Erwitt, Fabrizio Milo, @gaohuazuo, Iblis Lin, Igor Babuschkin, Isaac Hodes, Isaac Turner, Iván Vallés, J Yegerlehner, Jack Zhang, James Wexler, Jan Zikes, Jay Young, Jeff Hodges, @jmtatsch, Johnny Lim, Jonas Meinertz Hansen, Kanit Wongsuphasawat, Kashif Rasul, Ken Shirriff, Kenneth Mitchner, Kenta Yonekura, Konrad Magnusson, Konstantin Lopuhin, @lahwran, @lekaha, @liyongsea, Lucas Adams, @makseq, Mandeep Singh, @manipopopo, Mark Amery, Memo Akten, Michael Heilman, Michael Peteuil, Nathan Daly, Nicolas Fauchereau, @ninotoshi, Olav Nymoen, @panmari, @papelita1234, Pedro Lopes, Pranav Sailesh Mani, RJ Ryan, Rob Culliton, Robert DiPietro, @ronrest, Sam Abrahams, Sarath Shekkizhar, Scott Graham, Sebastian Raschka, Sung Kim, Surya Bhupatiraju, Syed Ahmed, Till Hoffmann, @timsl, @urimend, @vesnica, Vlad Frolov, Vlad Zagorodniy, Wei-Ting Kuo, Wenjian Huang, William Dmitri Breaden Madden, Wladimir Schmidt, Yuwen Yan, Yuxin Wu, Yuya Kusakabe, @zhongzyd, @znah.
+Abhinav Upadhyay, Aggelos Avgerinos, Alan Wu, Alexander G. de G. Matthews, Aleksandr Yahnev, @amchercashin, Andy Kitchen, Aurelien Geron, Awni Hannun, @BanditCat, Bas Veeling, Cameron Chen, @cg31, Cheng-Lung Sung, Christopher Bonnett, Dan Becker, Dan Van Boxel, Daniel Golden, Danijar Hafner, Danny Goodman, Dave Decker, David Dao, David Kretch, Dongjoon Hyun, Dustin Dorroh, @e-lin, Eurico Doirado, Erik Erwitt, Fabrizio Milo, @gaohuazuo, Iblis Lin, Igor Babuschkin, Isaac Hodes, Isaac Turner, Iván Vallés, J Yegerlehner, Jack Zhang, James Wexler, Jan Zikes, Jay Young, Jeff Hodges, @jmtatsch, Johnny Lim, Jonas Meinertz Hansen, Kanit Wongsuphasawat, Kashif Rasul, Ken Shirriff, Kenneth Mitchner, Kenta Yonekura, Konrad Magnusson, Konstantin Lopuhin, @lahwran, @lekaha, @liyongsea, Lucas Adams, @makseq, Mandeep Singh, @manipopopo, Mark Amery, Memo Akten, Michael Heilman, Michael Peteuil, Nathan Daly, Nicolas Fauchereau, @ninotoshi, Olav Nymoen, @panmari, @papelita1234, Pedro Lopes, Pranav Sailesh Mani, RJ Ryan, Rob Culliton, Robert DiPietro, @ronrest, Sam Abrahams, Sarath Shekkizhar, Scott Graham, Sebastian Raschka, Sung Kim, Surya Bhupatiraju, Syed Ahmed, Till Hoffmann, @timsl, @urimend, @vesnica, Vlad Frolov, Vlad Zagorodniy, Wei-Ting Kuo, Wenjian Huang, William Dmitri Breaden Madden, Wladimir Schmidt, Yuan Tang, Yuwen Yan, Yuxin Wu, Yuya Kusakabe, @zhongzyd, @znah.
We are also grateful to all who filed issues or helped resolve them, asked and
answered questions, and were part of inspiring discussions.
diff --git a/tensorflow/contrib/makefile/Makefile b/tensorflow/contrib/makefile/Makefile
index 8b93f3d6c0..4987e9bcd4 100644
--- a/tensorflow/contrib/makefile/Makefile
+++ b/tensorflow/contrib/makefile/Makefile
@@ -112,6 +112,8 @@ LIBDIR := $(GENDIR)lib/
BINDIR := $(GENDIR)bin/
PBTGENDIR := $(GENDIR)proto_text/
PROTOGENDIR := $(GENDIR)proto/
+DEPDIR := $(GENDIR)dep/
+$(shell mkdir -p $(DEPDIR) >/dev/null)
# Settings for the target compiler.
CXX := $(CC_PREFIX) gcc
@@ -119,6 +121,7 @@ OPTFLAGS := -O0
CXXFLAGS := --std=c++11 -DIS_SLIM_BUILD $(OPTFLAGS)
LDFLAGS := \
-L/usr/local/lib
+DEPFLAGS = -MT $@ -MMD -MP -MF $(DEPDIR)/$*.Td
INCLUDES := \
-I. \
@@ -349,6 +352,10 @@ ifeq ($(TARGET),IOS)
-L$(GENDIR)protobuf_ios/lib \
-lz
endif
+ OBJDIR := $(OBJDIR)ios_$(IOS_ARCH)/
+ LIBDIR := $(LIBDIR)ios_$(IOS_ARCH)/
+ BINDIR := $(BINDIR)ios_$(IOS_ARCH)/
+ DEPDIR := $(DEPDIR)ios_$(IOS_ARCH)/
endif
# This library is the main target for this makefile. It will contain a minimal
@@ -442,7 +449,9 @@ $(BENCHMARK_NAME): $(BENCHMARK_OBJS) $(LIB_PATH)
# Matches on the normal hand-written TensorFlow C++ source files.
$(OBJDIR)%.o: %.cc | $(PBT_GEN_FILES)
@mkdir -p $(dir $@)
- $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
+ @mkdir -p $(dir $(DEPDIR)$*)
+ $(CXX) $(CXXFLAGS) $(DEPFLAGS) $(INCLUDES) -c $< -o $@
+ @mv -f $(DEPDIR)/$*.Td $(DEPDIR)/$*.d
# Compiles C++ source files that have been generated by protoc.
$(OBJDIR)%.pb.o: $(PROTOGENDIR)%.pb.cc
@@ -509,3 +518,8 @@ clean:
cleantarget:
rm -rf $(OBJDIR)
rm -rf $(BINDIR)
+
+$(DEPDIR)/%.d: ;
+.PRECIOUS: $(DEPDIR)/%.d
+
+-include $(patsubst %,$(DEPDIR)/%.d,$(basename $(TF_CC_SRCS)))
diff --git a/tensorflow/contrib/makefile/build_all_ios.sh b/tensorflow/contrib/makefile/build_all_ios.sh
index 6b6ed389fc..e16d33aac6 100755
--- a/tensorflow/contrib/makefile/build_all_ios.sh
+++ b/tensorflow/contrib/makefile/build_all_ios.sh
@@ -42,6 +42,18 @@ rm -rf tensorflow/contrib/makefile/downloads
# Pull down the required versions of the frameworks we need.
tensorflow/contrib/makefile/download_dependencies.sh
+# TODO(petewarden) - Some new code in Eigen triggers a clang bug, so work
+# around it by patching the source.
+sed -e 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+sed -e 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
+-i '' \
+tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
+
# Compile protobuf for the target iOS device architectures.
tensorflow/contrib/makefile/compile_ios_protobuf.sh ${JOBS_COUNT}
diff --git a/tensorflow/contrib/makefile/compile_ios_tensorflow.sh b/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
index be1a1d3ec5..0c0edb7bd1 100755
--- a/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
+++ b/tensorflow/contrib/makefile/compile_ios_tensorflow.sh
@@ -28,19 +28,6 @@ GENDIR=tensorflow/contrib/makefile/gen/
LIBDIR=${GENDIR}lib
LIB_PREFIX=libtensorflow-core
-# TODO(petewarden) - Some new code in Eigen triggers a clang bug, so work
-# around it by patching the source.
-sed -e 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-sed -e 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-sed -e 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
--i '' \
-tensorflow/contrib/makefile/downloads/eigen-latest/eigen/src/Core/arch/NEON/Complex.h
-
-make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=ARMV7 LIB_NAME=${LIB_PREFIX}-armv7.a OPTFLAGS="$1" $2 $3
if [ $? -ne 0 ]
@@ -49,7 +36,6 @@ then
exit 1
fi
-make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=ARMV7S LIB_NAME=${LIB_PREFIX}-armv7s.a OPTFLAGS="$1" $2 $3
if [ $? -ne 0 ]
@@ -58,7 +44,6 @@ then
exit 1
fi
-make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=ARM64 LIB_NAME=${LIB_PREFIX}-arm64.a OPTFLAGS="$1" $2 $3
if [ $? -ne 0 ]
@@ -67,7 +52,6 @@ then
exit 1
fi
-make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=I386 LIB_NAME=${LIB_PREFIX}-i386.a OPTFLAGS="$1" $2 $3
if [ $? -ne 0 ]
@@ -76,7 +60,6 @@ then
exit 1
fi
-make -f tensorflow/contrib/makefile/Makefile cleantarget
make -f tensorflow/contrib/makefile/Makefile \
TARGET=IOS IOS_ARCH=X86_64 LIB_NAME=${LIB_PREFIX}-x86_64.a OPTFLAGS="$1" $2 $3
if [ $? -ne 0 ]
@@ -86,10 +69,10 @@ then
fi
lipo \
-${LIBDIR}/${LIB_PREFIX}-armv7.a \
-${LIBDIR}/${LIB_PREFIX}-armv7s.a \
-${LIBDIR}/${LIB_PREFIX}-arm64.a \
-${LIBDIR}/${LIB_PREFIX}-i386.a \
-${LIBDIR}/${LIB_PREFIX}-x86_64.a \
+${LIBDIR}/ios_ARMV7/${LIB_PREFIX}-armv7.a \
+${LIBDIR}/ios_ARMV7S/${LIB_PREFIX}-armv7s.a \
+${LIBDIR}/ios_ARM64/${LIB_PREFIX}-arm64.a \
+${LIBDIR}/ios_I386/${LIB_PREFIX}-i386.a \
+${LIBDIR}/ios_X86_64/${LIB_PREFIX}-x86_64.a \
-create \
-output ${LIBDIR}/${LIB_PREFIX}.a
diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt
index 098007b907..cb29041dc4 100644
--- a/tensorflow/contrib/makefile/tf_op_files.txt
+++ b/tensorflow/contrib/makefile/tf_op_files.txt
@@ -46,6 +46,7 @@ tensorflow/core/kernels/pad_op.cc
tensorflow/core/kernels/pack_op.cc
tensorflow/core/kernels/ops_util.cc
tensorflow/core/kernels/no_op.cc
+tensorflow/core/kernels/mirror_pad_op.cc
tensorflow/core/kernels/maxpooling_op.cc
tensorflow/core/kernels/matmul_op.cc
tensorflow/core/kernels/lrn_op.cc
diff --git a/tensorflow/core/common_runtime/optimization_registry.h b/tensorflow/core/common_runtime/optimization_registry.h
index 54e2c0e499..46fb97fe4b 100644
--- a/tensorflow/core/common_runtime/optimization_registry.h
+++ b/tensorflow/core/common_runtime/optimization_registry.h
@@ -27,7 +27,7 @@ limitations under the License.
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
-class SessionOptions;
+struct SessionOptions;
// All the parameters used by an optimization pass are packaged in
// this struct. They should be enough for the optimization pass to use
diff --git a/tensorflow/core/common_runtime/simple_graph_execution_state.h b/tensorflow/core/common_runtime/simple_graph_execution_state.h
index 6f3b97f83d..595da551d4 100644
--- a/tensorflow/core/common_runtime/simple_graph_execution_state.h
+++ b/tensorflow/core/common_runtime/simple_graph_execution_state.h
@@ -35,7 +35,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
-class SessionOptions;
+struct SessionOptions;
class StepStats;
class Timeline;
diff --git a/tensorflow/core/graph/gradients.cc b/tensorflow/core/graph/gradients.cc
index 01e9b68638..09c3d8d567 100644
--- a/tensorflow/core/graph/gradients.cc
+++ b/tensorflow/core/graph/gradients.cc
@@ -35,8 +35,6 @@ namespace tensorflow {
// TODO(andydavis) Remove some of the code duplicated between this module
// and that in 'common_runtime/function.cc'.
// A few string constant used throughout this module.
-static const char* const kArgOp = "_Arg";
-static const char* const kRetOp = "_Retval";
static const char* const kGradientOp = "SymbolicGradient";
static const char* const kNodeLabel = "Func";
diff --git a/tensorflow/core/graph/graph.cc b/tensorflow/core/graph/graph.cc
index 3607db7e81..a0cdd4fcfc 100644
--- a/tensorflow/core/graph/graph.cc
+++ b/tensorflow/core/graph/graph.cc
@@ -29,9 +29,6 @@ namespace tensorflow {
// Node
string Node::DebugString() const {
- if (this == nullptr) {
- return "{nullptr}";
- }
string ret = strings::StrCat("{name:'", name(), "' id:", id_);
if (IsSource()) {
strings::StrAppend(&ret, " source}");
diff --git a/tensorflow/core/kernels/argmax_op.cc b/tensorflow/core/kernels/argmax_op.cc
index 4dd551021e..595bd7bd5e 100644
--- a/tensorflow/core/kernels/argmax_op.cc
+++ b/tensorflow/core/kernels/argmax_op.cc
@@ -59,7 +59,7 @@ class ArgOp : public OpKernel {
OP_REQUIRES(context, dim >= 0, errors::InvalidArgument("dim must be >= 0"));
OP_REQUIRES(context, dim < input_dims,
- errors::InvalidArgument("Minimum tensor rank: ", dim,
+ errors::InvalidArgument("Minimum tensor rank: ", dim + 1,
" but got: ", input_dims));
OP_REQUIRES(
context, input.dim_size(dim) > 0,
diff --git a/tensorflow/core/kernels/cwise_op_conj.cc b/tensorflow/core/kernels/cwise_op_conj.cc
index d6dc565c81..61b1d98f1e 100644
--- a/tensorflow/core/kernels/cwise_op_conj.cc
+++ b/tensorflow/core/kernels/cwise_op_conj.cc
@@ -19,7 +19,9 @@ namespace tensorflow {
REGISTER2(UnaryOp, CPU, "Conj", functor::conj, complex64, complex128);
#if GOOGLE_CUDA
-// REGISTER_KERNEL_BUILDER(Name("Conj").Device(DEVICE_GPU),
-// UnaryOp<GPUDevice, functor::conj<complex64>>);
+REGISTER_KERNEL_BUILDER(Name("Conj").Device(DEVICE_GPU).TypeConstraint<complex64>("T"),
+ UnaryOp<GPUDevice, functor::conj<complex64>>);
+REGISTER_KERNEL_BUILDER(Name("Conj").Device(DEVICE_GPU).TypeConstraint<complex128>("T"),
+ UnaryOp<GPUDevice, functor::conj<complex128>>);
#endif
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_gpu_conj.cu.cc b/tensorflow/core/kernels/cwise_op_gpu_conj.cu.cc
index 43ead4c5c8..e7dff5d0ac 100644
--- a/tensorflow/core/kernels/cwise_op_gpu_conj.cu.cc
+++ b/tensorflow/core/kernels/cwise_op_gpu_conj.cu.cc
@@ -19,7 +19,8 @@ limitations under the License.
namespace tensorflow {
namespace functor {
-// DEFINE_UNARY1(conj, complex64); // not working
+ DEFINE_UNARY1(conj, complex64);
+ DEFINE_UNARY1(conj, complex128);
} // namespace functor
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cwise_op_sub.cc b/tensorflow/core/kernels/cwise_op_sub.cc
index e6cb8d0d24..8fade5f667 100644
--- a/tensorflow/core/kernels/cwise_op_sub.cc
+++ b/tensorflow/core/kernels/cwise_op_sub.cc
@@ -18,6 +18,12 @@ limitations under the License.
namespace tensorflow {
REGISTER7(BinaryOp, CPU, "Sub", functor::sub, float, Eigen::half, double, int32,
int64, complex64, complex128);
+#if defined(__ANDROID_TYPES_SLIM__)
+// We only register the first type when we have multi-argument calls in the
+// case where we're trying to reduce executable size, but it turns out that the
+// int32 version of this op is needed, so explicitly include it.
+REGISTER(BinaryOp, CPU, "Sub", functor::sub, int32);
+#endif // __ANDROID_TYPES_SLIM__
#if GOOGLE_CUDA
REGISTER4(BinaryOp, GPU, "Sub", functor::sub, float, Eigen::half, double,
int64);
diff --git a/tensorflow/core/kernels/cwise_ops_test.cc b/tensorflow/core/kernels/cwise_ops_test.cc
index 2cf51878ba..823e7e14ed 100644
--- a/tensorflow/core/kernels/cwise_ops_test.cc
+++ b/tensorflow/core/kernels/cwise_ops_test.cc
@@ -23,13 +23,14 @@ limitations under the License.
namespace tensorflow {
-// Creates a Graph which applies a unary "func" on a 3D float tensor
-// of "num" elements.
-static Graph* Unary(const string& func, int num) {
+// Creates a Graph which applies a unary "func" on a 3D tensor of
+// type T with "num" elements.
+template <typename T>
+static Graph* Unary(const string& func, int num, DataType dtype) {
Graph* g = new Graph(OpRegistry::Global());
- Tensor data(DT_FLOAT, TensorShape({64, 64, num / (64 * 64)}));
+ Tensor data(dtype, TensorShape({64, 64, num / (64 * 64)}));
CHECK_GT(data.NumElements(), 0);
- data.flat<float>().setRandom();
+ data.flat<T>().setRandom();
test::graph::Unary(g, func, test::graph::Constant(g, data), 0);
return g;
}
@@ -40,17 +41,23 @@ static int RowsAndColsArg(int r, int c) { return r * kRows + c; }
static int RowsFromArg(int arg) { return (arg / kRows); }
static int ColsFromArg(int arg) { return (arg % kRows); }
-#define BM_UNARY(DEVICE, FUNC) \
- static void BM_##DEVICE##_##FUNC(int iters, int num) { \
- const int64 tot = static_cast<int64>(iters) * num; \
- testing::ItemsProcessed(tot); \
- testing::BytesProcessed(tot * sizeof(float)); \
- test::Benchmark(#DEVICE, Unary(#FUNC, num)).Run(iters); \
- } \
- BENCHMARK(BM_##DEVICE##_##FUNC)->Range(4 << 10, 1 << 20);
-
-BM_UNARY(cpu, Floor);
-BM_UNARY(gpu, Floor);
+#define BM_UNARY(DEVICE, FUNC, T, TYPE) \
+ static void BM_##DEVICE##_##FUNC##_##TYPE(int iters, int num) { \
+ const int64 tot = static_cast<int64>(iters) * num; \
+ testing::ItemsProcessed(tot); \
+ testing::BytesProcessed(tot * sizeof(T)); \
+ test::Benchmark(#DEVICE, Unary<T>(#FUNC, num, TYPE)).Run(iters); \
+ } \
+ BENCHMARK(BM_##DEVICE##_##FUNC##_##TYPE)->Range(4 << 10, 1 << 20);
+
+BM_UNARY(cpu, Floor, float, DT_FLOAT);
+BM_UNARY(gpu, Floor, float, DT_FLOAT);
+BM_UNARY(cpu, Floor, double, DT_DOUBLE);
+BM_UNARY(gpu, Floor, double, DT_DOUBLE);
+BM_UNARY(cpu, Conj, std::complex<float>, DT_COMPLEX64);
+BM_UNARY(gpu, Conj, std::complex<float>, DT_COMPLEX64);
+BM_UNARY(cpu, Conj, std::complex<double>, DT_COMPLEX128);
+BM_UNARY(gpu, Conj, std::complex<double>, DT_COMPLEX128);
// data func scalar.
static Graph* BinaryScalar(int num, const string& func) {
diff --git a/tensorflow/core/kernels/lookup_table_init_op.cc b/tensorflow/core/kernels/lookup_table_init_op.cc
index 10fe91de9a..6303a0f5cf 100644
--- a/tensorflow/core/kernels/lookup_table_init_op.cc
+++ b/tensorflow/core/kernels/lookup_table_init_op.cc
@@ -80,7 +80,7 @@ class KeyValueTensorIterator
Status status() const override { return status_; }
- int64 total_size() const {
+ int64 total_size() const override {
return keys_ == nullptr ? -1 : keys_->NumElements();
}
diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc
index c7a047b03d..dd41bf2a67 100644
--- a/tensorflow/core/ops/math_ops.cc
+++ b/tensorflow/core/ops/math_ops.cc
@@ -499,7 +499,8 @@ REGISTER_OP("Add")
.Doc(R"doc(
Returns x + y element-wise.
-*NOTE*: Add supports broadcasting. AddN does not.
+*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Sub")
@@ -507,6 +508,9 @@ REGISTER_OP("Sub")
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
Returns x - y element-wise.
+
+*NOTE*: `Sub` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Mul")
@@ -515,10 +519,16 @@ REGISTER_OP("Mul")
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
Returns x * y element-wise.
+
+*NOTE*: `Mul` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Div").BINARY_MORE().SetShapeFn(BroadcastBinaryOpShapeFn).Doc(R"doc(
Returns x / y element-wise.
+
+*NOTE*: `Div` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("SquaredDifference")
@@ -527,6 +537,9 @@ REGISTER_OP("SquaredDifference")
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
Returns (x - y)(x - y) element-wise.
+
+*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
#undef BINARY_FEWER
@@ -540,7 +553,10 @@ REGISTER_OP("Maximum")
.SetIsCommutative()
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
-Returns the max of x and y (i.e. x > y ? x : y) element-wise, broadcasts.
+Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+
+*NOTE*: `Maximum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Minimum")
@@ -551,7 +567,10 @@ REGISTER_OP("Minimum")
.SetIsCommutative()
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
-Returns the min of x and y (i.e. x < y ? x : y) element-wise, broadcasts.
+Returns the min of x and y (i.e. x < y ? x : y) element-wise.
+
+*NOTE*: `Minimum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Mod")
@@ -562,6 +581,9 @@ REGISTER_OP("Mod")
.SetShapeFn(BroadcastBinaryOpShapeFn)
.Doc(R"doc(
Returns element-wise remainder of division.
+
+*NOTE*: `Mod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Pow")
@@ -679,24 +701,36 @@ REGISTER_OP("Less")
.COMPARISON()
.Doc(R"doc(
Returns the truth value of (x < y) element-wise.
+
+*NOTE*: `Less` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("LessEqual")
.COMPARISON()
.Doc(R"doc(
Returns the truth value of (x <= y) element-wise.
+
+*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("Greater")
.COMPARISON()
.Doc(R"doc(
Returns the truth value of (x > y) element-wise.
+
+*NOTE*: `Greater` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("GreaterEqual")
.COMPARISON()
.Doc(R"doc(
Returns the truth value of (x >= y) element-wise.
+
+*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
#undef COMPARISON
@@ -718,12 +752,18 @@ REGISTER_OP("Equal")
.EQUALITY_COMPARISON()
.Doc(R"doc(
Returns the truth value of (x == y) element-wise.
+
+*NOTE*: `Equal` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("NotEqual")
.EQUALITY_COMPARISON()
.Doc(R"doc(
Returns the truth value of (x != y) element-wise.
+
+*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
#undef EQUALITY_COMPARISON
@@ -749,12 +789,18 @@ REGISTER_OP("LogicalAnd")
.BINARY_LOGICAL()
.Doc(R"doc(
Returns the truth value of x AND y element-wise.
+
+*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
REGISTER_OP("LogicalOr")
.BINARY_LOGICAL()
.Doc(R"doc(
Returns the truth value of x OR y element-wise.
+
+*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
)doc");
#undef BINARY_LOGICAL
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 7e22297680..01bb4bc82f 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -4490,6 +4490,42 @@ op {
description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the PNG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the PNG-encoded image is transformed to match the requested number\nof color channels."
}
op {
+ name: "DecodeGif"
+ input_arg {
+ name: "contents"
+ description: "0-D. The GIF-encoded image."
+ type: DT_STRING
+ }
+ output_arg {
+ name: "image"
+ description: "3-D with shape `[height, width, channels]`."
+ type_attr: "dtype"
+ }
+ attr {
+ name: "channels"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ description: "Number of color channels for the decoded image."
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ default_value {
+ type: DT_UINT8
+ }
+ allowed_values {
+ list {
+ type: DT_UINT8
+ type: DT_UINT16
+ }
+ }
+ }
+ summary: "Decode a GIF-encoded image to a uint8 or uint16 tensor."
+ description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the GIF-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the GIF-encoded image is transformed to match the requested number\nof color channels."
+}
+op {
name: "DecodeRaw"
input_arg {
name: "bytes"
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index 443eabaee0..2c260b1a9a 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -19,8 +19,8 @@ limitations under the License.
// TensorFlow uses semantic versioning, see http://semver.org/.
#define TF_MAJOR_VERSION 0
-#define TF_MINOR_VERSION 9
-#define TF_PATCH_VERSION 0
+#define TF_MINOR_VERSION 10
+#define TF_PATCH_VERSION 0rc0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
diff --git a/tensorflow/examples/how_tos/reading_data/convert_to_records.py b/tensorflow/examples/how_tos/reading_data/convert_to_records.py
index 2e3035731a..566d554e7f 100644
--- a/tensorflow/examples/how_tos/reading_data/convert_to_records.py
+++ b/tensorflow/examples/how_tos/reading_data/convert_to_records.py
@@ -19,7 +19,6 @@ from __future__ import division
from __future__ import print_function
import os
-import numpy
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
diff --git a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py
index 8a43158062..9a33afd93a 100644
--- a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py
+++ b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py
@@ -30,10 +30,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import os.path
import time
-
-import numpy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
diff --git a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py
index 0711bed920..b4c80e53b6 100644
--- a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py
+++ b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py
@@ -29,10 +29,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import os.path
import time
-
-import numpy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
diff --git a/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py b/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py
index bdd821373f..351d531e25 100644
--- a/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py
+++ b/tensorflow/examples/how_tos/reading_data/fully_connected_reader.py
@@ -29,8 +29,6 @@ from __future__ import print_function
import os.path
import time
-
-import numpy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import mnist
diff --git a/tensorflow/examples/image_retraining/retrain_test.py b/tensorflow/examples/image_retraining/retrain_test.py
index 91108abde0..072998ae60 100644
--- a/tensorflow/examples/image_retraining/retrain_test.py
+++ b/tensorflow/examples/image_retraining/retrain_test.py
@@ -18,12 +18,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import os
import tensorflow as tf
from tensorflow.examples.image_retraining import retrain
from tensorflow.python.framework import test_util
-from tensorflow.python.platform import googletest
class ImageRetrainingTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/examples/learn/wide_n_deep_tutorial.py b/tensorflow/examples/learn/wide_n_deep_tutorial.py
index f80b839156..5a23087b5a 100644
--- a/tensorflow/examples/learn/wide_n_deep_tutorial.py
+++ b/tensorflow/examples/learn/wide_n_deep_tutorial.py
@@ -59,7 +59,7 @@ def maybe_download():
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
- print("Training data is downlaoded to %s" % train_file_name)
+ print("Training data is downloaded to %s" % train_file_name)
if FLAGS.test_data:
test_file_name = FLAGS.test_data
@@ -68,7 +68,7 @@ def maybe_download():
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
- print("Test data is downlaoded to %s" % test_file_name)
+ print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
diff --git a/tensorflow/examples/skflow/multioutput_regression.py b/tensorflow/examples/skflow/multioutput_regression.py
index ef76a6ce27..cf978e23d4 100644
--- a/tensorflow/examples/skflow/multioutput_regression.py
+++ b/tensorflow/examples/skflow/multioutput_regression.py
@@ -23,7 +23,6 @@ from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
-from sklearn import datasets
from sklearn.metrics import mean_squared_error
from tensorflow.contrib import learn
diff --git a/tensorflow/examples/tutorials/mnist/fully_connected_feed.py b/tensorflow/examples/tutorials/mnist/fully_connected_feed.py
index 5ab6024c2b..cd936d653e 100644
--- a/tensorflow/examples/tutorials/mnist/fully_connected_feed.py
+++ b/tensorflow/examples/tutorials/mnist/fully_connected_feed.py
@@ -83,7 +83,7 @@ def fill_feed_dict(data_set, images_pl, labels_pl):
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
- # `batch size ` examples.
+ # `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnv.md b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
index 1e5b0ade49..0010c0fbb2 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnv.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
@@ -38,7 +38,7 @@ Returns the file system schemes registered for this Env .
-#### `Status tensorflow::Env::NewRandomAccessFile(const string &fname, RandomAccessFile **result)` {#Status_tensorflow_Env_NewRandomAccessFile}
+#### `Status tensorflow::Env::NewRandomAccessFile(const string &fname, std::unique_ptr< RandomAccessFile > *result)` {#Status_tensorflow_Env_NewRandomAccessFile}
Creates a brand new random access read-only file with the specified name.
@@ -48,7 +48,7 @@ The returned file may be concurrently accessed by multiple threads.
The ownership of the returned RandomAccessFile is passed to the caller and the object should be deleted when is not used. The file object shouldn&apos;t live longer than the Env object.
-#### `Status tensorflow::Env::NewWritableFile(const string &fname, WritableFile **result)` {#Status_tensorflow_Env_NewWritableFile}
+#### `Status tensorflow::Env::NewWritableFile(const string &fname, std::unique_ptr< WritableFile > *result)` {#Status_tensorflow_Env_NewWritableFile}
Creates an object that writes to a new file with the specified name.
@@ -58,7 +58,7 @@ The returned file will only be accessed by one thread at a time.
The ownership of the returned WritableFile is passed to the caller and the object should be deleted when is not used. The file object shouldn&apos;t live longer than the Env object.
-#### `Status tensorflow::Env::NewAppendableFile(const string &fname, WritableFile **result)` {#Status_tensorflow_Env_NewAppendableFile}
+#### `Status tensorflow::Env::NewAppendableFile(const string &fname, std::unique_ptr< WritableFile > *result)` {#Status_tensorflow_Env_NewAppendableFile}
Creates an object that either appends to an existing file, or writes to a new file (if the file does not exist to begin with).
@@ -68,7 +68,7 @@ The returned file will only be accessed by one thread at a time.
The ownership of the returned WritableFile is passed to the caller and the object should be deleted when is not used. The file object shouldn&apos;t live longer than the Env object.
-#### `Status tensorflow::Env::NewReadOnlyMemoryRegionFromFile(const string &fname, ReadOnlyMemoryRegion **result)` {#Status_tensorflow_Env_NewReadOnlyMemoryRegionFromFile}
+#### `Status tensorflow::Env::NewReadOnlyMemoryRegionFromFile(const string &fname, std::unique_ptr< ReadOnlyMemoryRegion > *result)` {#Status_tensorflow_Env_NewReadOnlyMemoryRegionFromFile}
Creates a readonly region of memory with the file context.
@@ -96,6 +96,20 @@ Deletes the named file.
+#### `Status tensorflow::Env::DeleteRecursively(const string &dirname, int64 *undeleted_files, int64 *undeleted_dirs)` {#Status_tensorflow_Env_DeleteRecursively}
+
+Deletes the specified directory and all subdirectories and files underneath it. undeleted_files and undeleted_dirs stores the number of files and directories that weren&apos;t deleted (unspecified if the return status is not OK). REQUIRES: undeleted_files, undeleted_dirs to be not null. Typical return codes.
+
+
+
+OK - dirname exists and we were able to delete everything underneath.
+
+NOT_FOUND - dirname doesn&apos;t exist
+
+PERMISSION_DENIED - dirname or some descendant is not writable
+
+UNIMPLEMENTED - Some underlying functions (like Delete) are not implemented
+
#### `Status tensorflow::Env::CreateDir(const string &dirname)` {#Status_tensorflow_Env_CreateDir}
Creates the specified directory.
@@ -108,6 +122,28 @@ Deletes the specified directory.
+#### `Status tensorflow::Env::Stat(const string &fname, FileStatistics *stat)` {#Status_tensorflow_Env_Stat}
+
+Obtains statistics for the given path.
+
+
+
+#### `Status tensorflow::Env::IsDirectory(const string &fname)` {#Status_tensorflow_Env_IsDirectory}
+
+Returns whether the given path is a directory or not. Typical return codes (not guaranteed exhaustive):
+
+
+
+OK - The path exists and is a directory.
+
+FAILED_PRECONDITION - The path exists and is not a directory.
+
+NOT_FOUND - The path entry does not exist.
+
+PERMISSION_DENIED - Insufficient permissions.
+
+UNIMPLEMENTED - The file factory doesn&apos;t support directories.
+
#### `Status tensorflow::Env::GetFileSize(const string &fname, uint64 *file_size)` {#Status_tensorflow_Env_GetFileSize}
Stores the size of `fname` in `*file_size`.
@@ -126,7 +162,13 @@ Returns the number of micro-seconds since some fixed point in time. Only useful
-#### `virtual void tensorflow::Env::SleepForMicroseconds(int micros)=0` {#virtual_void_tensorflow_Env_SleepForMicroseconds}
+#### `virtual uint64 tensorflow::Env::NowSeconds()` {#virtual_uint64_tensorflow_Env_NowSeconds}
+
+Returns the number of seconds since some fixed point in time. Only useful for computing deltas of time.
+
+
+
+#### `virtual void tensorflow::Env::SleepForMicroseconds(int64 micros)=0` {#virtual_void_tensorflow_Env_SleepForMicroseconds}
Sleeps/delays the thread for the prescribed number of micro-seconds.
@@ -144,7 +186,7 @@ Caller takes ownership of the result and must delete it eventually (the deletion
-#### `virtual void tensorflow::Env::SchedClosureAfter(int micros, std::function< void()> closure)=0` {#virtual_void_tensorflow_Env_SchedClosureAfter}
+#### `virtual void tensorflow::Env::SchedClosureAfter(int64 micros, std::function< void()> closure)=0` {#virtual_void_tensorflow_Env_SchedClosureAfter}
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
index 2e284ac815..f0041f5be9 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
@@ -48,7 +48,7 @@ Returns the number of micro-seconds since some fixed point in time. Only useful
-#### `void tensorflow::EnvWrapper::SleepForMicroseconds(int micros) override` {#void_tensorflow_EnvWrapper_SleepForMicroseconds}
+#### `void tensorflow::EnvWrapper::SleepForMicroseconds(int64 micros) override` {#void_tensorflow_EnvWrapper_SleepForMicroseconds}
Sleeps/delays the thread for the prescribed number of micro-seconds.
@@ -66,7 +66,7 @@ Caller takes ownership of the result and must delete it eventually (the deletion
-#### `void tensorflow::EnvWrapper::SchedClosureAfter(int micros, std::function< void()> closure) override` {#void_tensorflow_EnvWrapper_SchedClosureAfter}
+#### `void tensorflow::EnvWrapper::SchedClosureAfter(int64 micros, std::function< void()> closure) override` {#void_tensorflow_EnvWrapper_SchedClosureAfter}
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensor.md b/tensorflow/g3doc/api_docs/cc/ClassTensor.md
index cc271aae37..e221a02693 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensor.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensor.md
@@ -8,9 +8,13 @@ Represents an n-dimensional array of values.
#### `tensorflow::Tensor::Tensor()` {#tensorflow_Tensor_Tensor}
-Default Tensor constructor. Creates a 1-dimension, 0-element float tensor.
+Creates a 1-dimensional, 0-element float tensor.
+The returned Tensor is not a scalar (shape {}), but is instead an empty one-dimensional Tensor (shape {0}, NumElements() == 0). Since it has no elements, it does not need to be assigned a value and is initialized by default ( IsInitialized() is true). If this is undesirable, consider creating a one-element scalar which does require initialization:
+```c++ Tensor(DT_FLOAT, TensorShape({}))
+
+```
#### `tensorflow::Tensor::Tensor(DataType type, const TensorShape &shape)` {#tensorflow_Tensor_Tensor}
@@ -32,9 +36,9 @@ Creates a tensor with the input `type` and `shape`, using the allocator `a` and
#### `tensorflow::Tensor::Tensor(DataType type)` {#tensorflow_Tensor_Tensor}
-Creates an uninitialized Tensor of the given data type.
-
+Creates an empty Tensor of the given data type.
+Like Tensor() , returns a 1-dimensional, 0-element Tensor with IsInitialized() returning True. See the Tensor() documentation for details.
#### `tensorflow::Tensor::Tensor(const Tensor &other)` {#tensorflow_Tensor_Tensor}
@@ -42,12 +46,18 @@ Creates an uninitialized Tensor of the given data type.
-#### `tensorflow::Tensor::~Tensor()` {#tensorflow_Tensor_Tensor}
+#### `tensorflow::Tensor::Tensor(Tensor &&other)` {#tensorflow_Tensor_Tensor}
Copy constructor.
+#### `tensorflow::Tensor::~Tensor()` {#tensorflow_Tensor_Tensor}
+
+
+
+
+
#### `DataType tensorflow::Tensor::dtype() const` {#DataType_tensorflow_Tensor_dtype}
Returns the data type.
@@ -98,9 +108,9 @@ Convenience accessor for the tensor shape.
#### `bool tensorflow::Tensor::IsInitialized() const` {#bool_tensorflow_Tensor_IsInitialized}
-Has this Tensor been initialized?
-
+If necessary, has this Tensor been initialized?
+Zero-element Tensors are always considered initialized, even if they have never been assigned to and do not have any memory allocated.
#### `size_t tensorflow::Tensor::TotalBytes() const` {#size_t_tensorflow_Tensor_TotalBytes}
@@ -120,6 +130,12 @@ Assign operator. This tensor shares other&apos;s underlying storage.
+#### `Tensor & tensorflow::Tensor::operator=(Tensor &&other)` {#Tensor_tensorflow_Tensor_operator_}
+
+Move operator. See move constructor for details.
+
+
+
#### `bool tensorflow::Tensor::CopyFrom(const Tensor &other, const TensorShape &shape) TF_MUST_USE_RESULT` {#bool_tensorflow_Tensor_CopyFrom}
Copy the other tensor into this tensor and reshape it.
@@ -190,6 +206,12 @@ auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
+#### `TTypes< T, NDIMS >::Tensor tensorflow::Tensor::bit_casted_tensor()` {#TTypes_T_NDIMS_Tensor_tensorflow_Tensor_bit_casted_tensor}
+
+Return the tensor data to an `Eigen::Tensor` with the same size but a bitwise cast to the specified dtype `T`.
+
+Using a bitcast is useful for move and copy operations. NOTE: this is the same as `tensor()` except a bitcast is allowed.
+
#### `TTypes<T>::Flat tensorflow::Tensor::flat()` {#TTypes_T_Flat_tensorflow_Tensor_flat}
Return the tensor data as an `Eigen::Tensor` of the data type and a specified shape.
@@ -239,6 +261,12 @@ Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all Tenso
+#### `TTypes< T, NDIMS >::Tensor tensorflow::Tensor::bit_casted_shaped(gtl::ArraySlice< int64 > new_sizes)` {#TTypes_T_NDIMS_Tensor_tensorflow_Tensor_bit_casted_shaped}
+
+Return the tensor data to an `Eigen::Tensor` with the new shape specified in `new_sizes` and cast to a new dtype `T`.
+
+Using a bitcast is useful for move and copy operations. The allowed bitcast is the only difference from `shaped()`.
+
#### `TTypes< T, NDIMS >::UnalignedTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes)` {#TTypes_T_NDIMS_UnalignedTensor_tensorflow_Tensor_unaligned_shaped}
@@ -269,6 +297,12 @@ Const versions of all the methods above.
+#### `TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::bit_casted_tensor() const` {#TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_bit_casted_tensor}
+
+Return the tensor data to an `Eigen::Tensor` with the same size but a bitwise cast to the specified dtype `T`.
+
+Using a bitcast is useful for move and copy operations. NOTE: this is the same as `tensor()` except a bitcast is allowed.
+
#### `TTypes<T>::ConstFlat tensorflow::Tensor::flat() const` {#TTypes_T_ConstFlat_tensorflow_Tensor_flat}
@@ -287,6 +321,12 @@ Const versions of all the methods above.
+#### `TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::bit_casted_shaped(gtl::ArraySlice< int64 > new_sizes) const` {#TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_bit_casted_shaped}
+
+Return the tensor data to an `Eigen::Tensor` with the new shape specified in `new_sizes` and cast to a new dtype `T`.
+
+Using a bitcast is useful for move and copy operations. The allowed bitcast is the only difference from `shaped()`.
+
#### `TTypes< T, NDIMS >::UnalignedConstTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes) const` {#TTypes_T_NDIMS_UnalignedConstTensor_tensorflow_Tensor_unaligned_shaped}
@@ -337,7 +377,7 @@ The returned ` StringPiece ` may point to memory location on devices that the CP
NOTE: The underlying tensor buffer is refcounted, so the lifetime of the contents mapped by the ` StringPiece ` matches the lifetime of the buffer; callers should arrange to make sure the buffer does not get destroyed while the ` StringPiece ` is still used.
-REQUIRES: `DataTypeCanUseMemcpy( dtype() )`.
+REQUIRES: `DataTypeCanUseMemcpy(dtype())`.
#### `void tensorflow::Tensor::UnsafeCopyFromInternal(const Tensor &, const TensorShape &)` {#void_tensorflow_Tensor_UnsafeCopyFromInternal}
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md b/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
index d0be205c3b..5eba11a0df 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
@@ -60,6 +60,18 @@ Copy the specified shape.
+#### `tensorflow::TensorShape::TensorShape(TensorShape &&b)` {#tensorflow_TensorShape_TensorShape}
+
+Move the specified shape. After moving, is safe for destruction and.
+
+
+
+#### `void tensorflow::TensorShape::operator=(TensorShape &&b)` {#void_tensorflow_TensorShape_operator_}
+
+
+
+
+
#### `void tensorflow::TensorShape::Clear()` {#void_tensorflow_TensorShape_Clear}
Clear a tensor shape.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
index 6010dd48b7..761feccae2 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
@@ -36,13 +36,25 @@ Static helper routines for ` TensorShape `. Includes a few common predicates on
-#### `static Status tensorflow::TensorShapeUtils::MakeShape(const int32 *dims, int n, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
+#### `static Status tensorflow::TensorShapeUtils::MakeShape(const int32 *dims, int64 n, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
Returns a ` TensorShape ` whose dimensions are `dims[0]`, `dims[1]`, ..., `dims[n-1]`.
-#### `static Status tensorflow::TensorShapeUtils::MakeShape(const int64 *dims, int n, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
+#### `static Status tensorflow::TensorShapeUtils::MakeShape(const int64 *dims, int64 n, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
+
+
+
+
+
+#### `static Status tensorflow::TensorShapeUtils::MakeShape(gtl::ArraySlice< int32 > shape, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
+
+
+
+
+
+#### `static Status tensorflow::TensorShapeUtils::MakeShape(gtl::ArraySlice< int64 > shape, TensorShape *out)` {#static_Status_tensorflow_TensorShapeUtils_MakeShape}
diff --git a/tensorflow/g3doc/api_docs/cc/StructTF_Buffer.md b/tensorflow/g3doc/api_docs/cc/StructTF_Buffer.md
index c435db8029..084beffe66 100644
--- a/tensorflow/g3doc/api_docs/cc/StructTF_Buffer.md
+++ b/tensorflow/g3doc/api_docs/cc/StructTF_Buffer.md
@@ -18,7 +18,7 @@
-#### `void(* TF_Buffer::data_deallocator) (void *data, size_t length))(void *data, size_t length)` {#void_TF_Buffer_data_deallocator_void_data_size_t_length_}
+#### `void(* TF_Buffer::data_deallocator)(void *data, size_t length))(void *data, size_t length)` {#void_TF_Buffer_data_deallocator_void_data_size_t_length_}
diff --git a/tensorflow/g3doc/api_docs/index.md b/tensorflow/g3doc/api_docs/index.md
index d074c0ece3..311908dca3 100644
--- a/tensorflow/g3doc/api_docs/index.md
+++ b/tensorflow/g3doc/api_docs/index.md
@@ -10,9 +10,9 @@ languages like Go, Java, JavaScript, Lua, R, and perhaps others. With
[SWIG](http://swig.org), it's relatively easy to develop a TensorFlow interface
for your favorite language.
-Note: Many practical aspects of usage are covered in the Mechanics tab, and
-some additional documentation not specific to any particular language API is
-available in the Resources tab.
+Note: Many practical aspects of usage are covered in the TUTORIALS and
+HOW TO tab, and some additional documentation not specific to any
+particular language API is available in the RESOURCES tab.
* [Python API](python/index.md)
* [C++ API](cc/index.md)
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.rnn.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.rnn.md
index 19caecfb70..d9e935f8fb 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.rnn.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard0/tf.nn.rnn.md
@@ -2,15 +2,15 @@
Creates a recurrent neural network specified by RNNCell `cell`.
-##### The simplest form of RNN network generated is:
-
+The simplest form of RNN network generated is:
+```py
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
-
+```
However, a few other options are available:
An initial state can be provided.
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.exponential_decay.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.exponential_decay.md
index d90c8ee726..42d8f10076 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.exponential_decay.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.exponential_decay.md
@@ -28,7 +28,7 @@ learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
- tf.GradientDescentOptimizer(learning_rate)
+ tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
diff --git a/tensorflow/g3doc/api_docs/python/nn.md b/tensorflow/g3doc/api_docs/python/nn.md
index 67f6e0f55e..075f85b249 100644
--- a/tensorflow/g3doc/api_docs/python/nn.md
+++ b/tensorflow/g3doc/api_docs/python/nn.md
@@ -1631,15 +1631,15 @@ automatically performed.
Creates a recurrent neural network specified by RNNCell `cell`.
-##### The simplest form of RNN network generated is:
-
+The simplest form of RNN network generated is:
+```py
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
-
+```
However, a few other options are available:
An initial state can be provided.
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index 92f77b27b0..ef837af395 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -61,31 +61,37 @@ Then, select the correct binary to install:
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
+
+# Mac OS X, GPU enabled, Python 2.7:
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
+
+# Mac OS X, GPU enabled, Python 3.4 or 3.5:
+$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
```
Install TensorFlow:
@@ -151,31 +157,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
+
+# Mac OS X, GPU enabled, Python 2.7:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
+
+# Mac OS X, GPU enabled, Python 3.4 or 3.5:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -228,6 +240,7 @@ packages needed by TensorFlow.
* Activate the conda environment and install TensorFlow in it.
* After the install you will activate the conda environment each time you
want to use TensorFlow.
+* Optionally install ipython and other packages into the conda environment
Install Anaconda:
@@ -248,6 +261,7 @@ $ conda create -n tensorflow python=3.5
Activate the environment and use conda or pip to install TensorFlow inside it.
+
### Using conda
A community maintained conda package is available [from conda-forge](https://github.com/conda-forge/tensorflow-feedstock).
@@ -275,31 +289,37 @@ Now, install TensorFlow just as you would for a regular Pip installation. First
```bash
# Ubuntu/Linux 64-bit, CPU only, Python 2.7
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 2.7
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only, Python 2.7:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py2-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py2-none-any.whl
+
+# Mac OS X, GPU enabled, Python 2.7:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py2-none-any.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.4
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.4
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp34-cp34m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp34-cp34m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, CPU only, Python 3.5
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled, Python 3.5
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp35-cp35m-linux_x86_64.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0rc0-cp35-cp35m-linux_x86_64.whl
# Mac OS X, CPU only, Python 3.4 or 3.5:
-(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/tensorflow-0.9.0-py3-none-any.whl
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.10.0rc0-py3-none-any.whl
+
+# Mac OS X, GPU enabled, Python 3.4 or 3.5:
+(tensorflow)$ export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.10.0rc0-py3-none-any.whl
```
Finally install TensorFlow:
@@ -336,6 +356,19 @@ $ source activate tensorflow
(tensorflow)$ source deactivate
```
+### Install IPython
+
+To use tensorflow with IPython it may be necessary to install IPython into the tensorflow environment:
+
+```bash
+$ source activate tensorflow
+(tensorflow)$ conda install ipython
+```
+
+Similarly, other Python packages like pandas may need to get installed into the tensorflow environment
+before they can be used together with tensorflow.
+
+
## Docker installation
[Docker](http://docker.com/) is a system to build self contained versions of a
@@ -352,7 +385,7 @@ code.
* `gcr.io/tensorflow/tensorflow:latest-devel-gpu`: GPU Binary image plus source
code.
-We also have tags with `latest` replaced by a released version (e.g., `0.9.0-gpu`).
+We also have tags with `latest` replaced by a released version (e.g., `0.10.0rc0-gpu`).
With Docker the installation is as follows:
@@ -594,6 +627,8 @@ which you can install as follows:
$ sudo easy_install ipython
```
+#### Optional: Setup GPU for Mac
+
If you plan to build with GPU support you will need to make sure you have
GNU coreutils installed via homebrew:
@@ -634,6 +669,26 @@ $ sudo mv lib/libcudnn* /Developer/NVIDIA/CUDA-7.5/lib
$ sudo ln -s /Developer/NVIDIA/CUDA-7.5/lib/libcudnn* /usr/local/cuda/lib/
```
+To verify the CUDA installation, you can build and run deviceQuery to make sure
+it passes.
+
+```bash
+$ cp -r /usr/local/cuda/samples ~/cuda-samples
+$ pushd ~/cuda-samples
+$ make
+$ popd
+$ ~/cuda-samples/bin/x86_64/darwin/release/deviceQuery
+```
+
+If you want to compile tensorflow and have the XCode 7.3 installed, note that
+Xcode 7.3 is not yet compatible with CUDA 7.5. You will need to download Xcode
+7.2 and select it as your default:
+
+```bash
+$ sudo xcode-select -s /Application/Xcode-7.2/Xcode.app
+```
+
+
### Configure the installation
Run the `configure` script at the root of the tree. The configure script
@@ -719,7 +774,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.9.0-py2-none-any.whl
+$ sudo pip install /tmp/tensorflow_pkg/tensorflow-0.10.0rc0-py2-none-any.whl
```
## Setting up TensorFlow for Development
diff --git a/tensorflow/g3doc/how_tos/using_gpu/index.md b/tensorflow/g3doc/how_tos/using_gpu/index.md
index e3e16fa575..47f14a9518 100644
--- a/tensorflow/g3doc/how_tos/using_gpu/index.md
+++ b/tensorflow/g3doc/how_tos/using_gpu/index.md
@@ -58,7 +58,7 @@ within that context will have the same device assignment.
with tf.device('/cpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
-c = tf.matmul(a, b)
+ c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index ed7412ba9d..48d9cab3e4 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -75,13 +75,14 @@ def rnn(cell, inputs, initial_state=None, dtype=None,
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
+ ```py
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
-
+ ```
However, a few other options are available:
An initial state can be provided.
diff --git a/tensorflow/python/ops/rnn_cell.py b/tensorflow/python/ops/rnn_cell.py
index 33ae2ee30b..1a6ea6fcec 100644
--- a/tensorflow/python/ops/rnn_cell.py
+++ b/tensorflow/python/ops/rnn_cell.py
@@ -87,6 +87,11 @@ def _state_size_with_prefix(state_size, prefix=None):
class RNNCell(object):
"""Abstract object representing an RNN cell.
+ The definition of cell in this package differs from the definition used in the
+ literature. In the literature, cell refers to an object with a single scalar
+ output. The definition in this package refers to a horizontal array of such
+ units.
+
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
diff --git a/tensorflow/python/platform/tf_logging.py b/tensorflow/python/platform/tf_logging.py
index dd3b380581..8a97ab2c9b 100644
--- a/tensorflow/python/platform/tf_logging.py
+++ b/tensorflow/python/platform/tf_logging.py
@@ -53,7 +53,7 @@ error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
-warning = _logger.warn
+warning = _logger.warning
_level_names = {
FATAL: 'FATAL',
diff --git a/tensorflow/python/training/learning_rate_decay.py b/tensorflow/python/training/learning_rate_decay.py
index f24f1f4a08..ef369e9095 100644
--- a/tensorflow/python/training/learning_rate_decay.py
+++ b/tensorflow/python/training/learning_rate_decay.py
@@ -54,7 +54,7 @@ def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
- tf.GradientDescentOptimizer(learning_rate)
+ tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
@@ -195,7 +195,7 @@ def polynomial_decay(learning_rate, global_step, decay_steps,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
- tf.GradientDescentOptimizer(learning_rate)
+ tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
@@ -268,7 +268,7 @@ def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
# Passing global_step to minimize() will increment it at each step.
learning_step = (
- tf.GradientDescentOptimizer(learning_rate)
+ tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
@@ -327,7 +327,7 @@ def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
# Passing global_step to minimize() will increment it at each step.
learning_step = (
- tf.GradientDescentOptimizer(learning_rate)
+ tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
diff --git a/tensorflow/tensorboard/README.md b/tensorflow/tensorboard/README.md
index a53a80eb47..49a1656cdd 100644
--- a/tensorflow/tensorboard/README.md
+++ b/tensorflow/tensorboard/README.md
@@ -54,18 +54,18 @@ work, but there may be bugs or performance issues.
The first step in using TensorBoard is acquiring data from your TensorFlow run.
For this, you need [summary
-ops](https://www.tensorflow.org/versions/r0.9/api_docs/python/train.html#summary-operations).
+ops](https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html#summary-operations).
Summary ops are ops, like
-[`tf.matmul`](https://www.tensorflow.org/versions/r0.9/api_docs/python/math_ops.html#matmul)
+[`tf.matmul`](https://www.tensorflow.org/versions/r0.10/api_docs/python/math_ops.html#matmul)
or
-[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.9/api_docs/python/nn.html#relu),
+[`tf.nn.relu`](https://www.tensorflow.org/versions/r0.10/api_docs/python/nn.html#relu),
which means they take in tensors, produce tensors, and are evaluated from within
a TensorFlow graph. However, summary ops have a twist: the Tensors they produce
contain serialized protobufs, which are written to disk and sent to TensorBoard.
To visualize the summary data in TensorBoard, you should evaluate the summary
op, retrieve the result, and then write that result to disk using a
SummaryWriter. A full explanation, with examples, is in [the
-tutorial](https://www.tensorflow.org/versions/r0.9/how_tos/summaries_and_tensorboard/index.html).
+tutorial](https://www.tensorflow.org/versions/r0.10/how_tos/summaries_and_tensorboard/index.html).
### Tags: Giving names to data
@@ -178,7 +178,7 @@ TensorFlow model. To get best use of the graph visualizer, you should use name
scopes to hierarchically group the ops in your graph - otherwise, the graph may
be difficult to decipher. For more information, including examples, see [the
graph visualizer
-tutorial](https://www.tensorflow.org/versions/r0.9/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
+tutorial](https://www.tensorflow.org/versions/r0.10/how_tos/graph_viz/index.html#tensorboard-graph-visualization).
# Frequently Asked Questions
diff --git a/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
index 2ed7a30824..fa74320b1e 100644
--- a/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.debian.jessie.cpu
@@ -10,6 +10,9 @@ RUN /install/install_deb_packages.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
+# Fix a virtualenv install issue specific to Debian Jessie.
+RUN pip install --upgrade virtualenv
+
# Set up bazelrc.
COPY install/.bazelrc /root/.bazelrc
ENV BAZELRC /root/.bazelrc
diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh
index 5ee57da4b3..8dffbfd2d9 100755
--- a/tensorflow/tools/ci_build/builds/pip.sh
+++ b/tensorflow/tools/ci_build/builds/pip.sh
@@ -106,7 +106,8 @@ fi
PIP_BUILD_TARGET="//tensorflow/tools/pip_package:build_pip_package"
GPU_FLAG=""
-if [[ ${CONTAINER_TYPE} == "cpu" ]]; then
+if [[ ${CONTAINER_TYPE} == "cpu" ]] || \
+ [[ ${CONTAINER_TYPE} == "debian.jessie.cpu" ]]; then
bazel build -c opt ${MAVX_FLAG} ${PIP_BUILD_TARGET} || \
die "Build failed."
elif [[ ${CONTAINER_TYPE} == "gpu" ]]; then
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index b231a9c202..73464ffc04 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -191,7 +191,7 @@ if [[ -z "$(which docker)" ]]; then
fi
# Process container type
-if [[ ${CTYPE} == "cpu" ]]; then
+if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "debian.jessie.cpu" ]]; then
:
elif [[ ${CTYPE} == "gpu" ]]; then
OPT_FLAG="${OPT_FLAG} --config=cuda"
@@ -298,7 +298,9 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}
fi
- if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "gpu" ]]; then
+ if [[ ${CTYPE} == "cpu" ]] || \
+ [[ ${CTYPE} == "debian.jessie.cpu" ]] || \
+ [[ ${CTYPE} == "gpu" ]]; then
# Run Bazel
NO_PIP_MAIN_CMD="${MAIN_CMD} ${BAZEL_CMD} ${OPT_FLAG} "\
"${EXTRA_ARGS} ${BAZEL_TARGET}"
diff --git a/tensorflow/tools/dist_test/Dockerfile b/tensorflow/tools/dist_test/Dockerfile
index 66787ca7f8..f39046252b 100644
--- a/tensorflow/tools/dist_test/Dockerfile
+++ b/tensorflow/tools/dist_test/Dockerfile
@@ -20,7 +20,7 @@ RUN /var/gcloud/google-cloud-sdk/bin/gcloud components install kubectl
# Install nightly TensorFlow pip
# TODO(cais): Should we build it locally instead?
RUN pip install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Copy test files
COPY scripts /var/tf-dist-test/scripts
diff --git a/tensorflow/tools/dist_test/server/Dockerfile b/tensorflow/tools/dist_test/server/Dockerfile
index c3bf751735..68bacefaca 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile
+++ b/tensorflow/tools/dist_test/server/Dockerfile
@@ -36,7 +36,7 @@ RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
# Install TensorFlow CPU version from nightly build
RUN pip --no-cache-dir install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/dist_test/server/Dockerfile.test b/tensorflow/tools/dist_test/server/Dockerfile.test
index de4411a05c..f0895acc5e 100644
--- a/tensorflow/tools/dist_test/server/Dockerfile.test
+++ b/tensorflow/tools/dist_test/server/Dockerfile.test
@@ -42,7 +42,7 @@ RUN pip install --upgrade pandas==0.18.1
# Install TensorFlow CPU version.
RUN pip --no-cache-dir install \
- http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.9.0-cp27-none-linux_x86_64.whl
+ http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.10.0rc0-cp27-none-linux_x86_64.whl
# Copy files, including the GRPC server binary at
# server/grpc_tensorflow_server.py
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index 31c3cd4d30..3bdebd69b9 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.9.0
+ENV TENSORFLOW_VERSION 0.10.0rc0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index 5e8693525b..4f994bdbc8 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -81,7 +81,7 @@ RUN mkdir /bazel && \
RUN git clone --recursive https://github.com/tensorflow/tensorflow.git && \
cd tensorflow && \
- git checkout r0.9
+ git checkout r0.10
WORKDIR /tensorflow
# TODO(craigcitro): Don't install the pip package, since it makes it
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 2be630b48c..e9081d5502 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -80,9 +80,9 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
-RUN git clone -b r0.9 --recursive --recurse-submodules https://github.com/tensorflow/tensorflow.git && \
+RUN git clone -b r0.10 --recursive --recurse-submodules https://github.com/tensorflow/tensorflow.git && \
cd tensorflow && \
- git checkout r0.9
+ git checkout r0.10
WORKDIR /tensorflow
# Configure the build for our CUDA configuration.
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index db91720cd9..e08ef1aa75 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -32,7 +32,7 @@ RUN pip --no-cache-dir install \
&& \
python -m ipykernel.kernelspec
-ENV TENSORFLOW_VERSION 0.9.0
+ENV TENSORFLOW_VERSION 0.10.0rc0
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
# These lines will be edited automatically by parameterized_docker_build.sh. #
diff --git a/tensorflow/tools/docker/parameterized_docker_build.sh b/tensorflow/tools/docker/parameterized_docker_build.sh
index 5a1324e09f..bfae655076 100755
--- a/tensorflow/tools/docker/parameterized_docker_build.sh
+++ b/tensorflow/tools/docker/parameterized_docker_build.sh
@@ -179,8 +179,10 @@ if [[ "${DO_PIP_BUILD}" == "1" ]]; then
export TF_BUILD_IS_OPT="OPT"
export TF_BUILD_IS_PIP="PIP"
- export TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
-"-e TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2"
+ if [[ "${TF_DOCKER_BUILD_TYPE}" == "gpu" ]]; then
+ export TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
+"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS} -e TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2"
+ fi
pushd "${SCRIPT_DIR}/../../../"
rm -rf pip_test/whl &&
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index b3787c0edc..1fda9fd49f 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -27,7 +27,7 @@ from setuptools import find_packages, setup, Command, Extension
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
-_VERSION = '0.9.0'
+_VERSION = '0.10.0rc0'
numpy_version = "1.8.2"
if platform.system() == "Darwin":
diff --git a/third_party/gpus/crosstool/CROSSTOOL b/third_party/gpus/crosstool/CROSSTOOL
index 8db81a9603..f72bb9321a 100644
--- a/third_party/gpus/crosstool/CROSSTOOL
+++ b/third_party/gpus/crosstool/CROSSTOOL
@@ -18,6 +18,10 @@ default_toolchain {
cpu: "darwin"
toolchain_identifier: "local_darwin"
}
+default_toolchain {
+ cpu: "ppc"
+ toolchain_identifier: "local_linux"
+}
toolchain {
abi_version: "local"