aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--ISSUE_TEMPLATE.md45
-rw-r--r--README.md12
-rw-r--r--RELEASE.md52
-rw-r--r--tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py40
-rw-r--r--tensorflow/contrib/cmake/README.md4
-rw-r--r--tensorflow/contrib/distributions/__init__.py13
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py8
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py74
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/bijector_test.py8
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py25
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py15
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py10
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py10
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py15
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py21
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py16
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py25
-rw-r--r--tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/__init__.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py6
-rw-r--r--tensorflow/contrib/distributions/python/ops/transformed_distribution.py76
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_student_t.py2
-rw-r--r--tensorflow/contrib/keras/python/keras/backend.py20
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py1
-rw-r--r--tensorflow/contrib/learn/python/learn/learn_io/generator_io.py5
-rw-r--r--tensorflow/contrib/slim/README.md8
-rw-r--r--tensorflow/core/kernels/conv_grad_filter_ops.cc11
-rw-r--r--tensorflow/core/kernels/conv_grad_input_ops.cc12
-rw-r--r--tensorflow/core/kernels/maxpooling_op_gpu.cu.cc4
-rw-r--r--tensorflow/core/kernels/xsmm_conv2d.cc16
-rw-r--r--tensorflow/core/ops/ops.pbtxt53
-rw-r--r--tensorflow/core/public/version.h4
-rw-r--r--tensorflow/docs_src/get_started/get_started.md1
-rw-r--r--tensorflow/docs_src/install/install_c.md2
-rw-r--r--tensorflow/docs_src/install/install_go.md2
-rw-r--r--tensorflow/docs_src/install/install_java.md16
-rw-r--r--tensorflow/docs_src/install/install_linux.md26
-rw-r--r--tensorflow/docs_src/install/install_mac.md14
-rw-r--r--tensorflow/docs_src/install/install_sources.md4
-rw-r--r--tensorflow/docs_src/install/install_windows.md4
-rw-r--r--tensorflow/java/README.md40
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/Shape.java27
-rw-r--r--tensorflow/java/src/main/native/operation_jni.h1
-rw-r--r--tensorflow/python/framework/tensor_util_test.py28
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py8
-rw-r--r--tensorflow/python/ops/nn_grad.py11
-rw-r--r--tensorflow/python/ops/nn_ops.py2
-rw-r--r--tensorflow/stream_executor/cuda/cuda_dnn.cc3
-rw-r--r--tensorflow/tensorboard/plugins/debugger/BUILD1
-rwxr-xr-xtensorflow/tools/ci_build/update_version.sh21
-rw-r--r--tensorflow/tools/docker/Dockerfile2
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu2
-rw-r--r--tensorflow/tools/graph_transforms/quantize_nodes.cc2
-rw-r--r--tensorflow/tools/pip_package/setup.py4
-rw-r--r--tensorflow/workspace.bzl8
59 files changed, 482 insertions, 382 deletions
diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md
index af76188c2f..50f67963bf 100644
--- a/ISSUE_TEMPLATE.md
+++ b/ISSUE_TEMPLATE.md
@@ -1,36 +1,15 @@
-NOTE: Only file GitHub issues for bugs and feature requests. All other topics will be closed.
+NOTE: Issues that are not bugs or feature requests will be closed. Please ask usage questions on StackOverflow.
-For general support from the community, see [StackOverflow](https://stackoverflow.com/questions/tagged/tensorflow).
-To make bugs and feature requests more easy to find and organize, we close issues that are deemed
-out of scope for GitHub Issues and point people to StackOverflow.
+### You must complete this information or else your issue will be closed
+- *Have I written custom code (as opposed to using a stock example script provided in TensorFlow)?*:
+- *TensorFlow installed from (source or binary)?*:
+- *TensorFlow version*:
+- *Bazel version (if compiling from source)*:
+- *CUDA/cuDNN version*:
+- *GPU Model and Memory*:
+- *Exact command to reproduce*:
-For bugs or installation issues, please provide the following information.
-The more information you provide, the more easily we will be able to offer
-help and advice.
+### Describe the problem clearly
-### What related GitHub issues or StackOverflow threads have you found by searching the web for your problem?
-
-### Environment info
-Operating System:
-
-Installed version of CUDA and cuDNN:
-(please attach the output of `ls -l /path/to/cuda/lib/libcud*`):
-
-If installed from binary pip package, provide:
-
-1. A link to the pip package you installed:
-2. The output from `python -c "import tensorflow; print(tensorflow.__version__)"`.
-
-If installed from source, provide
-
-1. The commit hash (`git rev-parse HEAD`)
-2. The output of `bazel version`
-
-### If possible, provide a minimal reproducible example (We usually don't have time to read hundreds of lines of your code)
-
-
-### What other attempted solutions have you tried?
-
-
-### Logs or other output that would be helpful
-(If logs are large, please upload as attachment or provide link).
+### Source Code / Logs
+Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full-traceback. Large logs and files should be attached. Try to reproducible test-case code the bare-minimum necessary to generate the problem
diff --git a/README.md b/README.md
index d9f05a67e0..cd0bffde79 100644
--- a/README.md
+++ b/README.md
@@ -34,12 +34,12 @@ and discussion.**
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
-* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.1.0rc0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/))
-* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.1.0rc0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.1.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.1.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
+* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.1.0rc1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=cpu,OS=windows/))
+* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.1.0rc1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/DEVICE=gpu,OS=windows/))
* Android: [demo APK](https://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/tensorflow_demo.apk), [native libs](http://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/native/)
([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-android/))
diff --git a/RELEASE.md b/RELEASE.md
index 156cc2e3af..ebb5c28451 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -15,6 +15,7 @@
* Ability to inspect Python source file against TF ops and tensors (command `print_source` / `ps`)
* New navigation bar in Curses-based UI
* NodeStepper (command `invoke_stepper`) now uses intermediate tensor dumps. It also uses `TensorHandles` as direct feeds during successive `cont` calls for improved performance and reduced memory consumption.
+* Initial release of installation guides for Java, C, and Go.
## Deprecations
@@ -68,38 +69,39 @@
* Multiple tfdbg bug fixes:
* Fixed Windows compatibility issues.
* Command history now persists across runs.
+ * Bug fix in graph validation related to `tf.while_loops`.
+* Java Maven fixes for bugs with Windows installation.
## Thanks to our Contributors
This release contains contributions from many people at Google, as well as:
A. Besir Kurtulmus, Adal Chiriliuc, @akash, Alec-Desouza, Alex Rothberg, Alex
-Sergeev, Alexander Heinecke, Allen Guo, Andreas Madsen, Ankesh Anand, Anton
+Sergeev, Alexander Heinecke, Allen Guo, Andreas Madsen, Ankesh Anand, Anton
Loss, @Aravind, @Arie, Ashutosh Das, AuréLien Geron, Bairen Yi, @bakunyo, Ben
-Visser, Brady Zhou, Calpa Liu, Changming Sun, Chi Zeng, Chih Cheng Liang,
-Christopher Berner, Clark Zinzow, @Conchylicultor, Courtial Florian, Dan Ellis,
-Dan J, Dan Jarvis, Daniel Ylitalo, Darren Garvey, David Norman, David Truong,
-@DavidNorman, Dimitar Pavlov, Dmitry Persiyanov, @Eddie, @elirex, Erfan
-Noury, Eron Wright, Evgeny Mazovetskiy, Fabrizio (Misto) Milo, @fanlu, Fisher
-Coder, Franck Dernoncourt, Gagan Goel, Gao, Xiang, @Gautam, Gefu Tang,
-@guilherme, @guschmue, Hannah Provenza, Hans Pabst, @hartb, Hsiao Yi, Huazuo
-Gao, Igor ChorążEwicz, Ivan Smirnov, Jakub Kolodziejczyk, Jason Gavris, Jason
-Morton, Jay Young, Jayaram Bobba, Jeremy Sawruk, Jiaming Liu, Jihun Choi,
-@jiqiu, Joan Thibault, John C F, Jojy G Varghese, Jon Malmaud, Julian Berman,
-Julian Niedermeier, Junpeng Lao, Kai Sasaki, @Kankroc, Karl Lessard, Kyle
-Bostelmann, @Lezcano, Li Yi, Luo Yun, @lurker, Mahmoud-Abuzaina, Mandeep Singh,
-Marek Kolodziej, Mark Szepieniec, Martial Hue, Medhat Omr, Memo Akten, Michael
-Gharbi, MichaëL Defferrard, Milan Straka, @MircoT, @mlucool, Muammar Ibn Faisal,
-Nayana Thorat, @nghiattran, Nicholas Connor, Nikolaas Steenbergen, Niraj Patel,
-Niranjan Hasabnis, @Panmari, Pavel Bulanov, Philip Pries Henningsen, Philipp
-Jund, @polonez, Prayag Verma, Rahul Kavi, Raphael Gontijo Lopes, @rasbt, Raven
-Iqqe, Reid Pryzant, Richard Shin, Rizwan Asif, Russell Kaplan, Ryo Asakura,
-RüDiger Busche, Saisai Shao, Sam Abrahams, @sanosay, Sean Papay, @seaotterman,
-@selay01, Shaurya Sharma, Sriram Narayanamoorthy, Stefano Probst, @taknevski,
-@tbonza, @teldridge11, Yuan (Terry) Tang, Tim Anglade, Tomas Reimers, Tomer Gafner,
-Valentin Iovene, Vamsi Sripathi, Viktor Malyi, Vit Stepanovs, Vivek Rane, Vlad
-Firoiu, @wangg12, @will, Xiaoyu Tao, Yaroslav Bulatov, Yuan (Terry) Tang,
-@Yufeng, Yuming Wang, Yuxin Wu, Zafar Takhirov, Ziming Dong
+Visser, Brady Zhou, Calpa Liu, Changming Sun, Chih Cheng Liang, Christopher
+Berner, Clark Zinzow, @Conchylicultor, Dan Ellis, Dan J, Dan Jarvis, Daniel
+Ylitalo, Darren Garvey, David Norman, David Truong, @DavidNorman, Dimitar
+Pavlov, Dmitry Persiyanov, @Eddie, @elirex, Erfan Noury, Eron Wright, Evgeny
+Mazovetskiy, Fabrizio (Misto) Milo, @fanlu, Fisher Coder, Florian Courtial,
+Franck Dernoncourt, Gagan Goel, Gao, Xiang, @Gautam, Gefu Tang, @guilherme,
+@guschmue, Hannah Provenza, Hans Pabst, @hartb, Hsiao Yi, Huazuo Gao, Igor
+ChorążEwicz, Ivan Smirnov, Jakub Kolodziejczyk, Jason Gavris, Jason Morton, Jay
+Young, Jayaram Bobba, Jeremy Sawruk, Jiaming Liu, Jihun Choi, @jiqiu, Joan Thibault,
+John C F, Jojy George Varghese, Jon Malmaud, Julian Berman, Julian Niedermeier,
+Junpeng Lao, Kai Sasaki, @Kankroc, Karl Lessard, Kyle Bostelmann, @Lezcano, Li
+Yi, Luo Yun, @lurker, Mahmoud-Abuzaina, Mandeep Singh, Marek Kolodziej, Mark
+Szepieniec, Martial Hue, Medhat Omr, Memo Akten, Michael Gharbi, MichaëL Defferrard,
+Milan Straka, @MircoT, @mlucool, Muammar Ibn Faisal, Nayana Thorat, @nghiattran,
+Nicholas Connor, Nikolaas Steenbergen, Niraj Patel, Niranjan Hasabnis, @Panmari,
+Pavel Bulanov, Philip Pries Henningsen, Philipp Jund, @polonez, Prayag Verma, Rahul
+Kavi, Raphael Gontijo Lopes, @rasbt, Raven Iqqe, Reid Pryzant, Richard Shin, Rizwan
+Asif, Russell Kaplan, Ryo Asakura, RüDiger Busche, Saisai Shao, Sam Abrahams, @sanosay,
+Sean Papay, @seaotterman, @selay01, Shaurya Sharma, Sriram Narayanamoorthy, Stefano
+Probst, @taknevski, @tbonza, @teldridge11, Tim Anglade, Tomas Reimers, Tomer Gafner,
+Valentin Iovene, Vamsi Sripathi, Viktor Malyi, Vit Stepanovs, Vivek Rane, Vlad Firoiu,
+@wangg12, @will, Xiaoyu Tao, Yaroslav Bulatov, Yi Liu, Yuan (Terry) Tang, @Yufeng,
+Yuming Wang, Yuxin Wu, Zafar Takhirov, Ziming Dong
We are also grateful to all who filed issues or helped resolve them, asked and
answered questions, and were part of inspiring discussions.
diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
index 5ce64cbe3d..81e40dbe5e 100644
--- a/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
+++ b/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
@@ -19,16 +19,16 @@ from __future__ import division
from __future__ import print_function
import numpy as np
-
+from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor_impl
-from tensorflow.contrib.distributions.python.ops import normal
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
+distributions = distributions_lib
sge = stochastic_gradient_estimators
st = stochastic_tensor_impl
@@ -42,20 +42,20 @@ class StochasticTensorTest(test.TestCase):
sigma2 = constant_op.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma),
dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
- prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
+ prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=prior, scale=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
@@ -85,7 +85,7 @@ class StochasticTensorTest(test.TestCase):
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
- prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
+ prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.MeanValue))
prior_mean = prior.mean()
@@ -102,7 +102,7 @@ class StochasticTensorTest(test.TestCase):
with st.value_type(st.SampleValue()):
prior_single = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma))
prior_single_value = prior_single.value()
@@ -113,7 +113,7 @@ class StochasticTensorTest(test.TestCase):
with st.value_type(st.SampleValue(1)):
prior_single = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
@@ -125,7 +125,7 @@ class StochasticTensorTest(test.TestCase):
with st.value_type(st.SampleValue(2)):
prior_double = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma))
prior_double_value = prior_double.value()
@@ -139,10 +139,10 @@ class StochasticTensorTest(test.TestCase):
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
- prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
+ prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
entropy = prior.entropy()
deep_entropy = prior.distribution.entropy()
- expected_deep_entropy = normal.Normal(
+ expected_deep_entropy = distributions.Normal(
loc=mu, scale=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
@@ -155,7 +155,7 @@ class StochasticTensorTest(test.TestCase):
# With default
with st.value_type(st.MeanValue(stop_gradient=True)):
- dt = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
+ dt = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(
@@ -163,7 +163,7 @@ class StochasticTensorTest(test.TestCase):
# With passed-in loss_fn.
dt = st.StochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma),
dist_value_type=st.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
@@ -199,7 +199,7 @@ class ObservedStochasticTensorTest(test.TestCase):
sigma = constant_op.constant([1.1, 1.2, 1.3])
obs = array_ops.zeros((2, 3))
z = st.ObservedStochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma), value=obs)
[obs_val, z_val] = sess.run([obs, z.value()])
self.assertAllEqual(obs_val, z_val)
@@ -212,14 +212,14 @@ class ObservedStochasticTensorTest(test.TestCase):
sigma = array_ops.placeholder(dtypes.float32)
obs = array_ops.placeholder(dtypes.float32)
z = st.ObservedStochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma), value=obs)
mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
z2 = st.ObservedStochasticTensor(
- normal.Normal(
+ distributions.Normal(
loc=mu2, scale=sigma2), value=obs2)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
@@ -231,19 +231,19 @@ class ObservedStochasticTensorTest(test.TestCase):
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma),
value=array_ops.zeros((3,)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma),
value=array_ops.zeros((3, 1)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
- normal.Normal(
+ distributions.Normal(
loc=mu, scale=sigma),
value=array_ops.zeros(
(1, 2), dtype=dtypes.int32))
diff --git a/tensorflow/contrib/cmake/README.md b/tensorflow/contrib/cmake/README.md
index af949f79fa..14d99a30cd 100644
--- a/tensorflow/contrib/cmake/README.md
+++ b/tensorflow/contrib/cmake/README.md
@@ -68,8 +68,8 @@ bindings.
- [Anaconda 4.1.1 (Python 3.5 64-bit)](https://www.continuum.io/downloads)
- [Git for Windows version 2.9.2.windows.1](https://git-scm.com/download/win)
- [swigwin-3.0.10](http://www.swig.org/download.html)
- - [NVidia CUDA Toolkit 8.0] (https://developer.nvidia.com/cuda-downloads)
- - [NVidia CUDNN 5.1] (https://developer.nvidia.com/cudnn)
+ - [NVidia CUDA Toolkit 8.0](https://developer.nvidia.com/cuda-downloads)
+ - [NVidia CUDNN 5.1](https://developer.nvidia.com/cudnn)
- [CMake 3.6](https://cmake.org/files/v3.6/cmake-3.6.3-win64-x64.msi)
* Ubuntu 14.04
diff --git a/tensorflow/contrib/distributions/__init__.py b/tensorflow/contrib/distributions/__init__.py
index 257aefa857..470b9edb79 100644
--- a/tensorflow/contrib/distributions/__init__.py
+++ b/tensorflow/contrib/distributions/__init__.py
@@ -90,9 +90,10 @@ from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
-from tensorflow.contrib.distributions.python.ops import bijectors
+
from tensorflow.contrib.distributions.python.ops.bernoulli import *
from tensorflow.contrib.distributions.python.ops.beta import *
+from tensorflow.contrib.distributions.python.ops.bijectors import *
from tensorflow.contrib.distributions.python.ops.binomial import *
from tensorflow.contrib.distributions.python.ops.categorical import *
from tensorflow.contrib.distributions.python.ops.chi2 import *
@@ -134,12 +135,8 @@ from tensorflow.contrib.distributions.python.ops.wishart import *
from tensorflow.python.util.all_util import remove_undocumented
-_allowed_symbols = [
- 'bijectors',
- 'ConditionalDistribution',
- 'ConditionalTransformedDistribution',
- 'FULLY_REPARAMETERIZED',
- 'NOT_REPARAMETERIZED',
-]
+_allowed_symbols = ['ConditionalDistribution',
+ 'ConditionalTransformedDistribution',
+ 'FULLY_REPARAMETERIZED', 'NOT_REPARAMETERIZED']
remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
index 0738754b21..d4f83567e4 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg
-from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
+from tensorflow.contrib.distributions.python.ops.bijectors import affine_linear_operator as affine_linear_operator_lib
from tensorflow.python.platform import test
@@ -29,7 +29,7 @@ class AffineLinearOperatorTest(test.TestCase):
def testIdentity(self):
with self.test_session():
- affine = AffineLinearOperator(
+ affine = affine_linear_operator_lib.AffineLinearOperator(
validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = x
@@ -48,7 +48,7 @@ class AffineLinearOperatorTest(test.TestCase):
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)
- affine = AffineLinearOperator(
+ affine = affine_linear_operator_lib.AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
@@ -73,7 +73,7 @@ class AffineLinearOperatorTest(test.TestCase):
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorTriL(tril, is_non_singular=True)
- affine = AffineLinearOperator(
+ affine = affine_linear_operator_lib.AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py
index 295e64a0d8..71460a1769 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py
@@ -22,8 +22,8 @@ import itertools
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
+from tensorflow.contrib.distributions.python.ops.bijectors import affine as affine_lib
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -36,7 +36,7 @@ class AffineBijectorTest(test.TestCase):
with self.test_session():
mu = -1.
# scale corresponds to 1.
- bijector = Affine(shift=mu, event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, event_ndims=0)
self.assertEqual("affine", bijector.name)
def testNoBatchScalarViaIdentity(self):
@@ -53,7 +53,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_identity_multiplier=2., event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 2, 3] # Three scalar samples (no batches).
@@ -76,7 +76,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
- bijector = Affine(shift=mu, scale_diag=[2.], event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, scale_diag=[2.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
@@ -98,7 +98,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2.
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_identity_multiplier=2., event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [[1., 2, 3], [4, 5, 6]] # Weird sample shape.
@@ -126,7 +126,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1.]
# One batch, scalar.
# Corresponds to scale = 1.
- bijector = Affine(shift=mu, event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1.] # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
@@ -148,7 +148,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1.]
# One batch, scalar.
# Corresponds to scale = 1.
- bijector = Affine(shift=mu, scale_diag=[1.], event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, scale_diag=[1.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1.] # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
@@ -170,7 +170,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
- bijector = Affine(shift=mu, event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
@@ -192,7 +192,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
- bijector = Affine(shift=mu, scale_diag=[1.], event_ndims=0)
+ bijector = affine_lib.Affine(shift=mu, scale_diag=[1.], event_ndims=0)
self.assertEqual(0, bijector.event_ndims.eval()) # "is scalar"
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
@@ -214,7 +214,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[1., 0], [0, 1.]]
- bijector = Affine(shift=mu)
+ bijector = affine_lib.Affine(shift=mu)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 1]
# matmul(sigma, x) + shift
@@ -245,7 +245,7 @@ class AffineBijectorTest(test.TestCase):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[2., 0], [0, 1.]]
- bijector = Affine(shift=mu, scale_diag=[2., 1])
+ bijector = affine_lib.Affine(shift=mu, scale_diag=[2., 1])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 1]
# matmul(sigma, x) + shift
@@ -287,7 +287,7 @@ class AffineBijectorTest(test.TestCase):
event_ndims: event_ndims_value
}
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)
self.assertEqual(1, sess.run(bijector.event_ndims, feed_dict))
self.assertAllClose([[3., 1]], sess.run(bijector.forward(x), feed_dict))
@@ -311,7 +311,7 @@ class AffineBijectorTest(test.TestCase):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale = 2.
- bijector = Affine(shift=mu, scale_identity_multiplier=scale)
+ bijector = affine_lib.Affine(shift=mu, scale_identity_multiplier=scale)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
@@ -334,7 +334,7 @@ class AffineBijectorTest(test.TestCase):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale_diag = [[2., 2]]
- bijector = Affine(shift=mu, scale_diag=scale_diag)
+ bijector = affine_lib.Affine(shift=mu, scale_diag=scale_diag)
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
@@ -361,7 +361,7 @@ class AffineBijectorTest(test.TestCase):
event_ndims: event_ndims_value
}
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_diag=scale_diag, event_ndims=event_ndims)
self.assertEqual(1, sess.run(bijector.event_ndims, feed_dict))
self.assertAllClose([[[3., 1]]], sess.run(bijector.forward(x), feed_dict))
@@ -384,7 +384,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_diag=[1.],
@@ -410,7 +410,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 2]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_tril=[[1., 0], [2., 1]])
@@ -435,7 +435,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 3]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [[1., 2]] # One multivariate sample.
@@ -458,7 +458,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[3., 0], [2, 4]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_identity_multiplier=1.0,
scale_diag=[1., 2.],
@@ -484,14 +484,14 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_identity_multiplier=2.,
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0],
[0., 0],
[0, 1]])
- bijector_ref = Affine(shift=mu, scale_diag=[10., 2, 3])
+ bijector_ref = affine_lib.Affine(shift=mu, scale_diag=[10., 2, 3])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
@@ -522,14 +522,14 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_diag=[2., 3, 4],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0],
[0., 0],
[0, 1]])
- bijector_ref = Affine(shift=mu, scale_diag=[10., 3, 5])
+ bijector_ref = affine_lib.Affine(shift=mu, scale_diag=[10., 3, 5])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
x = [1., 2, 3] # Vector.
@@ -559,7 +559,7 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_tril=[[2., 0, 0],
[1, 3, 0],
@@ -568,7 +568,7 @@ class AffineBijectorTest(test.TestCase):
scale_perturb_factor=[[2., 0],
[0., 0],
[0, 1]])
- bijector_ref = Affine(
+ bijector_ref = affine_lib.Affine(
shift=mu, scale_tril=[[10., 0, 0],
[1, 3, 0],
[2, 3, 5]])
@@ -601,12 +601,12 @@ class AffineBijectorTest(test.TestCase):
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=None,
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
- bijector_ref = Affine(
+ bijector_ref = affine_lib.Affine(
shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])
self.assertEqual(1, bijector.event_ndims.eval()) # "is vector"
@@ -626,7 +626,7 @@ class AffineBijectorTest(test.TestCase):
def testNoBatchMultivariateRaisesWhenSingular(self):
with self.test_session():
mu = [1., -1]
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
# Has zero on the diagonal.
scale_diag=[0., 1],
@@ -638,14 +638,14 @@ class AffineBijectorTest(test.TestCase):
with self.test_session():
mu = [1., -1]
# Scale corresponds to 2x2 identity matrix.
- bijector = Affine(shift=mu, event_ndims=2, validate_args=True)
+ bijector = affine_lib.Affine(shift=mu, event_ndims=2, validate_args=True)
bijector.forward([1., 1.]).eval()
def testScaleZeroScalarRaises(self):
with self.test_session():
mu = -1.
# Check Identity matrix with zero scaling.
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu,
scale_identity_multiplier=0.0,
event_ndims=0,
@@ -654,16 +654,16 @@ class AffineBijectorTest(test.TestCase):
bijector.forward(1.).eval()
# Check Diag matrix with zero scaling.
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=mu, scale_diag=[0.0], event_ndims=0, validate_args=True)
with self.assertRaisesOpError("Condition x > 0"):
bijector.forward(1.).eval()
def testScalarCongruency(self):
with self.test_session():
- bijector = Affine(
+ bijector = affine_lib.Affine(
shift=3.6, scale_identity_multiplier=0.42, event_ndims=0)
- assert_scalar_congruency(
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def _makeScale(self,
@@ -743,9 +743,9 @@ class AffineBijectorTest(test.TestCase):
# We haven't specified enough information for the scale.
if scale is None:
with self.assertRaisesRegexp(ValueError, ("must be specified.")):
- bijector = Affine(shift=shift, **bijector_args)
+ bijector = affine_lib.Affine(shift=shift, **bijector_args)
else:
- bijector = Affine(shift=shift, **bijector_args)
+ bijector = affine_lib.Affine(shift=shift, **bijector_args)
np_x = x
# For the case a vector is passed in, we need to make the shape
# match the matrix for matmul to work.
@@ -823,7 +823,7 @@ class AffineBijectorTest(test.TestCase):
def testScalePropertyAssertsCorrectly(self):
with self.test_session():
with self.assertRaises(NotImplementedError):
- scale = Affine( # pylint:disable=unused-variable
+ scale = affine_lib.Affine( # pylint:disable=unused-variable
scale_tril=[[1., 0], [2, 1]],
scale_perturb_factor=[2., 1.]).scale
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/bijector_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/bijector_test.py
index 3ba6aa5293..94f3bc959b 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/bijector_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/bijector_test.py
@@ -22,7 +22,7 @@ import abc
import six
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector import Bijector
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector as bijector_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@@ -36,10 +36,10 @@ class BaseBijectorTest(test.TestCase):
with self.assertRaisesRegexp(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
- Bijector()
+ bijector_lib.Bijector()
def testDefaults(self):
- class _BareBonesBijector(Bijector):
+ class _BareBonesBijector(bijector_lib.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
@@ -80,7 +80,7 @@ class IntentionallyMissingError(Exception):
pass
-class BrokenBijector(Bijector):
+class BrokenBijector(bijector_lib.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(self, forward_missing=False, inverse_missing=False):
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
index a647f963ed..ecf068bf6b 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
@@ -20,11 +20,11 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.chain import Chain
-from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
-from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
-from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import chain as chain_lib
+from tensorflow.contrib.distributions.python.ops.bijectors import exp as exp_lib
+from tensorflow.contrib.distributions.python.ops.bijectors import softmax_centered as softmax_centered_lib
+from tensorflow.contrib.distributions.python.ops.bijectors import softplus as softplus_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
@@ -34,7 +34,8 @@ class ChainBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- chain = Chain((Exp(event_ndims=1), Softplus(event_ndims=1)))
+ chain = chain_lib.Chain((exp_lib.Exp(event_ndims=1),
+ softplus_lib.Softplus(event_ndims=1)))
self.assertEqual("chain_of_exp_of_softplus", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
@@ -48,7 +49,7 @@ class ChainBijectorTest(test.TestCase):
def testBijectorIdentity(self):
with self.test_session():
- chain = Chain()
+ chain = chain_lib.Chain()
self.assertEqual("identity", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
@@ -59,16 +60,16 @@ class ChainBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
- bijector = Chain((Exp(), Softplus()))
- assert_scalar_congruency(
+ bijector = chain_lib.Chain((exp_lib.Exp(), softplus_lib.Softplus()))
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
- bijector = Chain([
- SoftmaxCentered(
+ bijector = chain_lib.Chain([
+ softmax_centered_lib.SoftmaxCentered(
event_ndims=1, validate_args=True),
- SoftmaxCentered(
+ softmax_centered_lib.SoftmaxCentered(
event_ndims=0, validate_args=True)])
x = tensor_shape.TensorShape([])
y = tensor_shape.TensorShape([2 + 1])
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py
index 267e4ad350..7b09553be9 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.contrib.distributions.python.ops import transformed_distribution as transformed_distribution_lib
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -57,7 +57,7 @@ class InvertBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
- assert_scalar_congruency(
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py
index 26e0d2a539..9c2a1c8f91 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py
@@ -18,12 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
+from tensorflow.contrib.distributions.python.ops.bijectors import conditional_bijector
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
-class _TestBijector(ConditionalBijector):
+class _TestBijector(conditional_bijector.ConditionalBijector):
def __init__(self):
super(_TestBijector, self).__init__(
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py
index c30ce60cac..04ddf09b69 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py
@@ -20,9 +20,8 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_bijective_and_finite
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import exp as exp_lib
from tensorflow.python.platform import test
@@ -31,7 +30,7 @@ class ExpBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- bijector = Exp(event_ndims=1)
+ bijector = exp_lib.Exp(event_ndims=1)
self.assertEqual("exp", bijector.name)
x = [[[1.], [2.]]]
y = np.exp(x)
@@ -45,16 +44,16 @@ class ExpBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
- bijector = Exp()
- assert_scalar_congruency(
+ bijector = exp_lib.Exp()
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.test_session():
- bijector = Exp(event_ndims=0)
+ bijector = exp_lib.Exp(event_ndims=0)
x = np.linspace(-10, 10, num=10).astype(np.float32)
y = np.logspace(-10, 10, num=10).astype(np.float32)
- assert_bijective_and_finite(bijector, x, y)
+ bijector_test_util.assert_bijective_and_finite(bijector, x, y)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py
index 0969c293d4..f6aabe0d63 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py
@@ -18,8 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.identity import Identity
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import identity as identity_lib
from tensorflow.python.platform import test
@@ -28,7 +28,7 @@ class IdentityBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- bijector = Identity()
+ bijector = identity_lib.Identity()
self.assertEqual("identity", bijector.name)
x = [[[0.], [1.]]]
self.assertAllEqual(x, bijector.forward(x).eval())
@@ -38,8 +38,8 @@ class IdentityBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
- bijector = Identity()
- assert_scalar_congruency(
+ bijector = identity_lib.Identity()
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
index 739fa6d439..c17c67c6f5 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
@@ -20,8 +20,8 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
-from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline
+from tensorflow.contrib.distributions.python.ops.bijectors import exp as exp_lib
+from tensorflow.contrib.distributions.python.ops.bijectors import inline as inline_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@@ -33,8 +33,8 @@ class InlineBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- exp = Exp(event_ndims=1)
- inline = Inline(
+ exp = exp_lib.Exp(event_ndims=1)
+ inline = inline_lib.Inline(
forward_fn=math_ops.exp,
inverse_fn=math_ops.log,
inverse_log_det_jacobian_fn=(
@@ -57,7 +57,7 @@ class InlineBijectorTest(test.TestCase):
def testShapeGetters(self):
with self.test_session():
- bijector = Inline(
+ bijector = inline_lib.Inline(
forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0),
forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_tensor_fn=lambda x: x[:-1],
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py
index 267e4ad350..7b09553be9 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.contrib.distributions.python.ops import transformed_distribution as transformed_distribution_lib
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -57,7 +57,7 @@ class InvertBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
- assert_scalar_congruency(
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py
index b30a3b599b..e95bbdf67e 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py
@@ -20,9 +20,8 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_bijective_and_finite
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import PowerTransform
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import power_transform as power_transform_lib
from tensorflow.python.platform import test
@@ -32,7 +31,7 @@ class PowerTransformBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
c = 0.2
- bijector = PowerTransform(
+ bijector = power_transform_lib.PowerTransform(
power=c, event_ndims=1, validate_args=True)
self.assertEqual("power_transform", bijector.name)
x = np.array([[[-1.], [2.], [-5. + 1e-4]]])
@@ -50,18 +49,18 @@ class PowerTransformBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
- bijector = PowerTransform(
+ bijector = power_transform_lib.PowerTransform(
power=0.2, validate_args=True)
- assert_scalar_congruency(
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.test_session():
- bijector = PowerTransform(
+ bijector = power_transform_lib.PowerTransform(
power=0.2, event_ndims=0, validate_args=True)
x = np.linspace(-4.999, 10, num=10).astype(np.float32)
y = np.logspace(0.001, 10, num=10).astype(np.float32)
- assert_bijective_and_finite(bijector, x, y, rtol=1e-3)
+ bijector_test_util.assert_bijective_and_finite(bijector, x, y, rtol=1e-3)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py
index 4ff3f334cc..8884da146d 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_centered_test.py
@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_centered import SigmoidCentered
+from tensorflow.contrib.distributions.python.ops.bijectors import sigmoid_centered as sigmoid_centered_lib
from tensorflow.python.platform import test
@@ -29,7 +29,7 @@ class SigmoidCenteredBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- sigmoid = SigmoidCentered()
+ sigmoid = sigmoid_centered_lib.SigmoidCentered()
self.assertEqual("sigmoid_centered", sigmoid.name)
x = np.log([[2., 3, 4],
[4., 8, 12]])
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py
index 9a96b91c6d..e16f9dff22 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py
@@ -21,9 +21,8 @@ from __future__ import print_function
import numpy as np
from scipy import special
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_bijective_and_finite
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import sigmoid
from tensorflow.python.platform import test
@@ -32,33 +31,35 @@ class SigmoidBijectorTest(test.TestCase):
def testBijector(self):
with self.test_session():
- self.assertEqual("sigmoid", Sigmoid().name)
+ self.assertEqual("sigmoid", sigmoid.Sigmoid().name)
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
self.assertAllClose(
- y, Sigmoid().forward(x).eval(),
+ y, sigmoid.Sigmoid().forward(x).eval(),
atol=0., rtol=1e-2)
self.assertAllClose(
- x, Sigmoid().inverse(y).eval(),
+ x, sigmoid.Sigmoid().inverse(y).eval(),
atol=0., rtol=1e-4)
self.assertAllClose(
- ildj, Sigmoid().inverse_log_det_jacobian(y).eval(),
+ ildj, sigmoid.Sigmoid().inverse_log_det_jacobian(y).eval(),
atol=0., rtol=1e-6)
self.assertAllClose(
- -ildj, Sigmoid().forward_log_det_jacobian(x).eval(),
+ -ildj, sigmoid.Sigmoid().forward_log_det_jacobian(x).eval(),
atol=0., rtol=1e-4)
def testScalarCongruency(self):
with self.test_session():
- assert_scalar_congruency(Sigmoid(), lower_x=-7., upper_x=7.)
+ bijector_test_util.assert_scalar_congruency(
+ sigmoid.Sigmoid(), lower_x=-7., upper_x=7.)
def testBijectiveAndFinite(self):
with self.test_session():
x = np.linspace(-7., 7., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
- assert_bijective_and_finite(Sigmoid(), x, y, atol=0., rtol=1e-4)
+ bijector_test_util.assert_bijective_and_finite(
+ sigmoid.Sigmoid(), x, y, atol=0., rtol=1e-4)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py
index 173d52686d..97df62ec05 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py
@@ -20,8 +20,8 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_bijective_and_finite
-from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import softmax_centered as softmax_centered_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
@@ -34,7 +34,7 @@ class SoftmaxCenteredBijectorTest(test.TestCase):
def testBijectorScalar(self):
with self.test_session():
- softmax = SoftmaxCentered() # scalar by default
+ softmax = softmax_centered_lib.SoftmaxCentered() # scalar by default
self.assertEqual("softmax_centered", softmax.name)
x = np.log([[2., 3, 4],
[4., 8, 12]])
@@ -59,7 +59,7 @@ class SoftmaxCenteredBijectorTest(test.TestCase):
def testBijectorVector(self):
with self.test_session():
- softmax = SoftmaxCentered(event_ndims=1)
+ softmax = softmax_centered_lib.SoftmaxCentered(event_ndims=1)
self.assertEqual("softmax_centered", softmax.name)
x = np.log([[2., 3, 4], [4., 8, 12]])
y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
@@ -80,11 +80,11 @@ class SoftmaxCenteredBijectorTest(test.TestCase):
with self.test_session():
for x, y, b in ((tensor_shape.TensorShape([]),
tensor_shape.TensorShape([2]),
- SoftmaxCentered(
+ softmax_centered_lib.SoftmaxCentered(
event_ndims=0, validate_args=True)),
(tensor_shape.TensorShape([4]),
tensor_shape.TensorShape([5]),
- SoftmaxCentered(
+ softmax_centered_lib.SoftmaxCentered(
event_ndims=1, validate_args=True))):
self.assertAllEqual(y, b.forward_event_shape(x))
self.assertAllEqual(y.as_list(),
@@ -95,7 +95,7 @@ class SoftmaxCenteredBijectorTest(test.TestCase):
def testBijectiveAndFinite(self):
with self.test_session():
- softmax = SoftmaxCentered(event_ndims=1)
+ softmax = softmax_centered_lib.SoftmaxCentered(event_ndims=1)
x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32)
# Make y values on the simplex with a wide range.
y_0 = np.ones(5).astype(np.float32)
@@ -104,7 +104,7 @@ class SoftmaxCenteredBijectorTest(test.TestCase):
y = np.array([y_0, y_1, y_2])
y /= y.sum(axis=0)
y = y.T # y.shape = [5, 3]
- assert_bijective_and_finite(softmax, x, y)
+ bijector_test_util.assert_bijective_and_finite(softmax, x, y)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py
index 2c58519fda..a7db1dcc6c 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py
@@ -20,9 +20,8 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_bijective_and_finite
-from tensorflow.contrib.distributions.python.ops.bijectors.bijector_test_util import assert_scalar_congruency
-from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
+from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
+from tensorflow.contrib.distributions.python.ops.bijectors import softplus as softplus_lib
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
@@ -43,7 +42,7 @@ class SoftplusBijectorTest(test.TestCase):
def testBijectorForwardInverseEventDimsZero(self):
with self.test_session():
- bijector = Softplus(event_ndims=0)
+ bijector = softplus_lib.Softplus(event_ndims=0)
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
@@ -53,7 +52,7 @@ class SoftplusBijectorTest(test.TestCase):
def testBijectorLogDetJacobianEventDimsZero(self):
with self.test_session():
- bijector = Softplus(event_ndims=0)
+ bijector = softplus_lib.Softplus(event_ndims=0)
y = 2 * rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softplus_ildj_before_reduction(y)
@@ -62,7 +61,7 @@ class SoftplusBijectorTest(test.TestCase):
def testBijectorForwardInverseEventDimsOne(self):
with self.test_session():
- bijector = Softplus(event_ndims=1)
+ bijector = softplus_lib.Softplus(event_ndims=1)
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
@@ -72,7 +71,7 @@ class SoftplusBijectorTest(test.TestCase):
def testBijectorLogDetJacobianEventDimsOne(self):
with self.test_session():
- bijector = Softplus(event_ndims=1)
+ bijector = softplus_lib.Softplus(event_ndims=1)
y = 2 * rng.rand(2, 10)
ildj_before = self._softplus_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
@@ -81,28 +80,28 @@ class SoftplusBijectorTest(test.TestCase):
def testScalarCongruency(self):
with self.test_session():
- bijector = Softplus(event_ndims=0)
- assert_scalar_congruency(
+ bijector = softplus_lib.Softplus(event_ndims=0)
+ bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testBijectiveAndFinite32bit(self):
with self.test_session():
- bijector = Softplus(event_ndims=0)
+ bijector = softplus_lib.Softplus(event_ndims=0)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
- assert_bijective_and_finite(
+ bijector_test_util.assert_bijective_and_finite(
bijector, x, y, rtol=1e-2, atol=1e-2)
def testBijectiveAndFinite16bit(self):
with self.test_session():
- bijector = Softplus(event_ndims=0)
+ bijector = softplus_lib.Softplus(event_ndims=0)
# softplus(-20) is zero, so we can't use such a large range as in 32bit.
x = np.linspace(-10., 20., 100).astype(np.float16)
# Note that float16 is only in the open set (0, inf) for a smaller
# logspace range. The actual range was (-7, 4), so use something smaller
# for the test.
y = np.logspace(-6, 3, 100).astype(np.float16)
- assert_bijective_and_finite(
+ bijector_test_util.assert_bijective_and_finite(
bijector, x, y, rtol=1e-1, atol=1e-3)
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py b/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py
index 545471907f..eacdfd9ccc 100644
--- a/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py
+++ b/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py
@@ -22,7 +22,7 @@ import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import transformed_distribution_test
-from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
+from tensorflow.contrib.distributions.python.ops.bijectors import conditional_bijector
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -32,7 +32,7 @@ from tensorflow.python.platform import test
ds = distributions
-class _ChooseLocation(ConditionalBijector):
+class _ChooseLocation(conditional_bijector.ConditionalBijector):
"""A Bijector which chooses between one of two location parameters."""
def __init__(self, loc, name="ChooseLocation"):
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/__init__.py b/tensorflow/contrib/distributions/python/ops/bijectors/__init__.py
index 40fcb36769..2e83ed6e41 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/__init__.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/__init__.py
@@ -56,7 +56,3 @@ from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered impo
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
-
-from tensorflow.python.util.all_util import remove_undocumented
-
-remove_undocumented(__name__)
diff --git a/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py b/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
index af675787b1..7fee2e1f3a 100644
--- a/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
+++ b/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
@@ -21,9 +21,7 @@ from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.contrib.distributions.python.ops import transformed_distribution
-# Bijectors must be directly imported because `remove_undocumented` prevents
-# individual file imports.
-from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
+from tensorflow.contrib.distributions.python.ops.bijectors import sigmoid as sigmoid_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -178,7 +176,7 @@ class RelaxedBernoulli(transformed_distribution.TransformedDistribution):
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name + "/Logistic"),
- bijector=Sigmoid(validate_args=validate_args),
+ bijector=sigmoid_lib.Sigmoid(validate_args=validate_args),
validate_args=validate_args,
name=name)
self._parameters = parameters
diff --git a/tensorflow/contrib/distributions/python/ops/transformed_distribution.py b/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
index 1403adbda2..844f78ca96 100644
--- a/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/transformed_distribution.py
@@ -19,11 +19,9 @@ from __future__ import print_function
import numpy as np
-from tensorflow.contrib.distributions.python.ops import distribution as distribution_lib
+from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
-# Bijectors must be directly imported because `remove_undocumented` prevents
-# individual file imports.
-from tensorflow.contrib.distributions.python.ops.bijectors.identity import Identity
+from tensorflow.contrib.distributions.python.ops.bijectors import identity as identity_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -121,7 +119,7 @@ def _is_scalar_from_shape(shape):
return _logical_equal(_ndims_from_shape(shape), 0)
-class TransformedDistribution(distribution_lib.Distribution):
+class TransformedDistribution(distributions.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
@@ -148,19 +146,49 @@ class TransformedDistribution(distribution_lib.Distribution):
A `TransformedDistribution` implements the following operations:
- * `sample`
- Mathematically: `Y = g(X)`
- Programmatically: `bijector.forward(distribution.sample(...))`
+ * `sample`:
- * `log_prob`
- Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
- + (log o abs o det o J o g^{-1})(y)`
- Programmatically: `(distribution.log_prob(bijector.inverse(y))
- + bijector.inverse_log_det_jacobian(y))`
+ Mathematically:
- * `log_cdf`
- Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
- Programmatically: `distribution.log_cdf(bijector.inverse(x))`
+ ```none
+ Y = g(X)
+ ```
+
+ Programmatically:
+
+ ```python
+ return bijector.forward(distribution.sample(...))
+ ```
+
+ * `log_prob`:
+
+ Mathematically:
+
+ ```none
+ (log o pdf)(Y=y) = (log o pdf o g^{-1})(y) +
+ (log o abs o det o J o g^{-1})(y)
+ ```
+
+ Programmatically:
+
+ ```python
+ return (distribution.log_prob(bijector.inverse(y)) +
+ bijector.inverse_log_det_jacobian(y))
+ ```
+
+ * `log_cdf`:
+
+ Mathematically:
+
+ ```none
+ (log o cdf)(Y=y) = (log o cdf o g^{-1})(y)
+ ```
+
+ Programmatically:
+
+ ```python
+ return distribution.log_cdf(bijector.inverse(x))
+ ```
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
@@ -171,7 +199,7 @@ class TransformedDistribution(distribution_lib.Distribution):
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
- distribution=ds.Normal(loc=0., scale=1.),
+ distribution=ds.Normal(loc=mu, scale=sigma),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
@@ -181,7 +209,7 @@ class TransformedDistribution(distribution_lib.Distribution):
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
- distribution=ds.Normal(loc=0., scale=1.),
+ distribution=ds.Normal(loc=mu, scale=sigma),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
@@ -195,11 +223,8 @@ class TransformedDistribution(distribution_lib.Distribution):
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
- distribution=ds.Normal(loc=0., scale=1.),
- bijector=ds.bijectors.Affine(
- shift=-1.,
- scale_identity_multiplier=2.,
- event_ndims=0),
+ distribution=ds.Normal(loc=0, scale=1),
+ bijector=ds.bijectors.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0),
name="NormalTransformedDistribution")
```
@@ -212,6 +237,7 @@ class TransformedDistribution(distribution_lib.Distribution):
multivariate Normal as a `TransformedDistribution`.
```python
+ bs = tf.contrib.distributions.bijector
ds = tf.contrib.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
@@ -222,7 +248,7 @@ class TransformedDistribution(distribution_lib.Distribution):
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
- bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
+ bijector=bs.Affine(shift=mean, tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
@@ -265,7 +291,7 @@ class TransformedDistribution(distribution_lib.Distribution):
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
- bijector = Identity(validate_args=validate_args)
+ bijector = identity_lib.Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
diff --git a/tensorflow/contrib/distributions/python/ops/vector_student_t.py b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
index 8d680ab27a..2f4b33b7e7 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_student_t.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
@@ -191,7 +191,7 @@ class _VectorStudentT(transformed_distribution.TransformedDistribution):
```
For more examples of how to construct the `scale` matrix, see the
- `tf.contrib.distributions.bijectors.Affine` docstring.
+ `bijectors.Affine` docstring.
"""
diff --git a/tensorflow/contrib/keras/python/keras/backend.py b/tensorflow/contrib/keras/python/keras/backend.py
index 1882ae35f5..d149138796 100644
--- a/tensorflow/contrib/keras/python/keras/backend.py
+++ b/tensorflow/contrib/keras/python/keras/backend.py
@@ -3625,3 +3625,23 @@ if os.path.exists(_config_path):
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
+
+# Save config file.
+if os.access(_keras_base_dir, os.W_OK):
+ if not os.path.exists(_keras_dir):
+ try:
+ os.makedirs(_keras_dir)
+ except OSError:
+ # Except potential race conditions
+ # in multi-threaded environments.
+ pass
+
+ if not os.path.exists(_config_path):
+ _config = {
+ 'floatx': floatx(),
+ 'epsilon': epsilon(),
+ 'backend': 'tensorflow',
+ 'image_data_format': image_data_format()
+ }
+ with open(_config_path, 'w') as f:
+ f.write(json.dumps(_config, indent=4))
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
index b891bf2301..dfe08bb863 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
@@ -25,4 +25,5 @@ from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue
from tensorflow.python.estimator.inputs.queues.feeding_functions import _GeneratorFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _OrderedDictNumpyFeedFn
from tensorflow.python.estimator.inputs.queues.feeding_functions import _PandasFeedFn
+from tensorflow.python.estimator.inputs.queues.feeding_functions import _GeneratorFeedFn
# pylint: enable=unused-import
diff --git a/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py b/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py
index 5859bb6b47..c302c7725a 100644
--- a/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py
+++ b/tensorflow/contrib/learn/python/learn/learn_io/generator_io.py
@@ -18,8 +18,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from types import FunctionType, GeneratorType
from collections import Container
+from types import FunctionType
+from types import GeneratorType
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
@@ -33,7 +34,7 @@ def generator_input_fn(x,
num_threads=1):
"""Returns input function that would dicts of numpy arrays
yielded from a generator.
-
+
It is assumed that every dict yielded from the dictionary represents
a single sample. The generator should consume a single epoch of the data.
diff --git a/tensorflow/contrib/slim/README.md b/tensorflow/contrib/slim/README.md
index 94b0263ae8..dae50e67c5 100644
--- a/tensorflow/contrib/slim/README.md
+++ b/tensorflow/contrib/slim/README.md
@@ -109,7 +109,7 @@ weights = slim.variable('weights',
Note that in native TensorFlow, there are two types of variables: regular
variables and local (transient) variables. The vast majority of variables are
regular variables: once created, they can be saved to disk using a
-[saver](https://www.tensorflow.org/versions/r0.11/api_docs/python/state_ops.html#Saver).
+[saver](https://www.tensorflow.org/api_docs/python/tf/train/Saver).
Local variables are those variables that only exist for the duration of a
session and are not saved to disk.
@@ -289,10 +289,10 @@ slim.stack(x, slim.conv2d, [(32, [3, 3]), (32, [1, 1]), (64, [3, 3]), (64, [1, 1
### Scopes
In addition to the types of scope mechanisms in TensorFlow
-([name_scope](https://www.tensorflow.org/api_docs/python/framework.html#name_scope),
-[variable_scope](https://www.tensorflow.org/api_docs/python/state_layers.html#variable_scope),
+([name_scope](https://www.tensorflow.org/api_docs/python/tf/name_scope),
+[variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope),
TF-Slim adds a new scoping mechanism called
-[arg_scope](https://www.tensorflow.org/code/tensorflow/contrib/framework/python/ops/arg_scope.py).
+[arg_scope](https://www.tensorflow.org/api_docs/python/tf/contrib/framework/arg_scope),
This new scope allows a user to specify one or more operations and a set of
arguments which will be passed to each of the operations defined in the
`arg_scope`. This functionality is best illustrated by example. Consider the
diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc
index f88862bfeb..20394cad43 100644
--- a/tensorflow/core/kernels/conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc
@@ -208,7 +208,6 @@ class Conv2DFastBackpropFilterOp : public OpKernel {
context->allocate_output(0, filter_shape, &filter_backprop));
#if defined TENSORFLOW_USE_LIBXSMM && defined TENSORFLOW_USE_LIBXSMM_BACKWARD
-
int64 pad_top, pad_bottom;
int64 pad_left, pad_right;
OP_REQUIRES_OK(
@@ -229,8 +228,9 @@ class Conv2DFastBackpropFilterOp : public OpKernel {
context, context->eigen_device<Device>(), input.tensor<T, 4>(),
filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(),
dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size,
- (int)dims.spatial_dims[0].stride,
- (int)dims.spatial_dims[1].stride, (int)pad_top, (int)pad_left,
+ static_cast<int>(dims.spatial_dims[0].stride),
+ static_cast<int>(dims.spatial_dims[1].stride),
+ static_cast<int>(pad_top), static_cast<int>(pad_left),
data_format_)) {
return;
}
@@ -321,8 +321,9 @@ class Conv2DCustomBackpropFilterOp : public OpKernel {
context, context->eigen_device<Device>(), input.tensor<T, 4>(),
filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(),
dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size,
- (int)dims.spatial_dims[0].stride,
- (int)dims.spatial_dims[1].stride, (int)pad_top, (int)pad_left,
+ static_cast<int>(dims.spatial_dims[0].stride),
+ static_cast<int>(dims.spatial_dims[1].stride),
+ static_cast<int>(pad_top), static_cast<int>(pad_left),
data_format_)) {
return;
}
diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc
index e79c9465cb..9a50431a2f 100644
--- a/tensorflow/core/kernels/conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_input_ops.cc
@@ -258,8 +258,10 @@ class Conv2DFastBackpropInputOp : public OpKernel {
context, context->eigen_device<Device>(),
in_backprop->tensor<T, 4>(), filter.tensor<T, 4>(),
out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size,
- dims.spatial_dims[1].input_size, (int)dims.spatial_dims[0].stride,
- (int)dims.spatial_dims[1].stride, (int)pad_top, (int)pad_left,
+ dims.spatial_dims[1].input_size,
+ static_cast<int>(dims.spatial_dims[0].stride),
+ static_cast<int>(dims.spatial_dims[1].stride),
+ static_cast<int>(pad_top), static_cast<int>(pad_left),
data_format_)) {
return;
}
@@ -352,8 +354,10 @@ class Conv2DCustomBackpropInputOp : public OpKernel {
context, context->eigen_device<Device>(),
in_backprop->tensor<T, 4>(), filter.tensor<T, 4>(),
out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size,
- dims.spatial_dims[1].input_size, (int)dims.spatial_dims[0].stride,
- (int)dims.spatial_dims[1].stride, (int)pad_top, (int)pad_left,
+ dims.spatial_dims[1].input_size,
+ static_cast<int>(dims.spatial_dims[0].stride),
+ static_cast<int>(dims.spatial_dims[1].stride),
+ static_cast<int>(pad_top), static_cast<int>(pad_left),
data_format_)) {
return;
}
diff --git a/tensorflow/core/kernels/maxpooling_op_gpu.cu.cc b/tensorflow/core/kernels/maxpooling_op_gpu.cu.cc
index 0c638ca233..32b210ecb7 100644
--- a/tensorflow/core/kernels/maxpooling_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/maxpooling_op_gpu.cu.cc
@@ -379,10 +379,10 @@ bool MaxPoolBackwardWithArgmax<T>::operator()(
T* bottom_diff, const Eigen::GpuDevice& d) {
const int kThreadsPerBlock = 1024;
SetZero<<<(input_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
- kThreadsPerBlock, 0, d.stream()>>>(input_size, bottom_diff);
+ kThreadsPerBlock, 0, d.stream()>>>(input_size, bottom_diff);
MaxPoolBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, d.stream()>>>(
- output_size, top_diff, mask, top_offset, bottom_offset, bottom_diff);
+ output_size, top_diff, mask, top_offset, bottom_offset, bottom_diff);
return d.ok();
}
diff --git a/tensorflow/core/kernels/xsmm_conv2d.cc b/tensorflow/core/kernels/xsmm_conv2d.cc
index 878abe9712..7936cbcd46 100644
--- a/tensorflow/core/kernels/xsmm_conv2d.cc
+++ b/tensorflow/core/kernels/xsmm_conv2d.cc
@@ -145,8 +145,8 @@ struct HashFunction {
S << w.d.S; u << w.d.u;
v << w.d.v; padh << w.d.pad_h_in;
padw << w.d.pad_w_in;
-
-
+
+
std::string out_ = N.str() + C.str()\
+ H.str() + W.str()\
+ K.str() + R.str()\
@@ -172,8 +172,9 @@ class handles {
chk_libxsmm_err(status, "Create handle");
libxsmm_handles.insert(std::make_pair(w, libxsmm_handle));
return libxsmm_handle;
- } else
+ } else {
return i->second;
+ }
}
~handles() {
std::unordered_map<libxsmm_dnn_conv_desc_wrap, libxsmm_dnn_layer*,
@@ -191,7 +192,7 @@ class handles {
static handles libxsmm_handles;
-//#define LIBXSMM_DETAILED_TIMING
+// #define LIBXSMM_DETAILED_TIMING
template <typename InputPtr, typename FilterPtr, typename OutputPtr>
static bool CallLibxsmmConvGeneric(OpKernelContext* ctx,
@@ -287,9 +288,8 @@ static bool CallLibxsmmConvGeneric(OpKernelContext* ctx,
}
count.Wait();
}
- }
- // Added: for weight update
- else if (kind == LIBXSMM_DNN_COMPUTE_KIND_UPD) {
+ } else if (kind == LIBXSMM_DNN_COMPUTE_KIND_UPD) {
+ // Added: for weight update
libxsmm_filter =
libxsmm_dnn_link_filter(libxsmm_handle, LIBXSMM_DNN_FILTER, filter,
LIBXSMM_DNN_TENSOR_FORMAT_RSCK_PTR, &status);
@@ -352,7 +352,7 @@ static bool CallLibxsmmConvGeneric(OpKernelContext* ctx,
chk_libxsmm_err(libxsmm_dnn_bind_buffer(libxsmm_handle, libxsmm_input,
LIBXSMM_DNN_REGULAR_INPUT),
- "Bind input weight udpate");
+ "Bind input weight update");
chk_libxsmm_err(libxsmm_dnn_bind_buffer(libxsmm_handle, libxsmm_output,
LIBXSMM_DNN_GRADIENT_OUTPUT),
"Bind output weight update");
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 172bdf02e2..4a644b55e6 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -26141,6 +26141,59 @@ op {
description: "Read [the section on\nSegmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation\nof segments.\n\nComputes a tensor such that\n`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such\nthat `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\nrange of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/UnsortedSegmentSum.png\" alt>\n</div>"
}
op {
+ name: "UnsortedSegmentSum"
+ input_arg {
+ name: "data"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "segment_ids"
+ description: "A tensor whose shape is a prefix of `data.shape`."
+ type_attr: "Tindices"
+ }
+ input_arg {
+ name: "num_segments"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "output"
+ description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`."
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tindices"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ summary: "Computes the max along segments of a tensor."
+ description: "Read [the section on\nSegmentation](../../api_docs/python/math_ops.md#segmentation) for an explanation\nof segments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\n range of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"../../images/UnsortedSegmentSum.png\" alt>\n</div>"
+}
+op {
name: "Unstage"
output_arg {
name: "values"
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index d175107be0..14e45c37d9 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -19,8 +19,8 @@ limitations under the License.
// TensorFlow uses semantic versioning, see http://semver.org/.
#define TF_MAJOR_VERSION 1
-#define TF_MINOR_VERSION 0
-#define TF_PATCH_VERSION 1
+#define TF_MINOR_VERSION 1
+#define TF_PATCH_VERSION 0-rc1
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
diff --git a/tensorflow/docs_src/get_started/get_started.md b/tensorflow/docs_src/get_started/get_started.md
index 81426bbb92..6116c7d87f 100644
--- a/tensorflow/docs_src/get_started/get_started.md
+++ b/tensorflow/docs_src/get_started/get_started.md
@@ -1,4 +1,3 @@
-
# Getting Started With TensorFlow
This guide gets you started programming in TensorFlow. Before using this guide,
diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md
index c1c7b66546..0f3914d52d 100644
--- a/tensorflow/docs_src/install/install_c.md
+++ b/tensorflow/docs_src/install/install_c.md
@@ -35,7 +35,7 @@ enable TensorFlow for C:
OS="linux" # Change to "darwin" for Mac OS
TARGET_DIRECTORY="/usr/local"
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.1.0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.1.0-rc1.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md
index c9abaf2aca..6874a1f03f 100644
--- a/tensorflow/docs_src/install/install_go.md
+++ b/tensorflow/docs_src/install/install_go.md
@@ -35,7 +35,7 @@ steps to install this library and enable TensorFlow for Go:
TF_TYPE="cpu" # Change to "gpu" for GPU support
TARGET_DIRECTORY='/usr/local'
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.1.0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.1.0-rc1.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md
index 111b046689..127d8fd029 100644
--- a/tensorflow/docs_src/install/install_java.md
+++ b/tensorflow/docs_src/install/install_java.md
@@ -34,7 +34,7 @@ following to the project's `pom.xml` to use the TensorFlow Java APIs:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.1.0</version>
+ <version>1.1.0-rc1</version>
</dependency>
```
@@ -63,7 +63,7 @@ As an example, these steps will create a Maven project that uses TensorFlow:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.1.0</version>
+ <version>1.1.0-rc1</version>
</dependency>
</dependencies>
</project>
@@ -122,7 +122,7 @@ refer to the simpler instructions above instead.
Take the following steps to install TensorFlow for Java on Linux or Mac OS:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.1.0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.1.0-rc1.jar),
which is the TensorFlow Java Archive (JAR).
2. Decide whether you will run TensorFlow for Java on CPU(s) only or with
@@ -141,7 +141,7 @@ Take the following steps to install TensorFlow for Java on Linux or Mac OS:
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
mkdir -p ./jni
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.1.0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.1.0-rc1.tar.gz" |
tar -xz -C ./jni
### Install on Windows
@@ -149,10 +149,10 @@ Take the following steps to install TensorFlow for Java on Linux or Mac OS:
Take the following steps to install TensorFlow for Java on Windows:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.1.0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.1.0-rc1.jar),
which is the TensorFlow Java Archive (JAR).
2. Download the following Java Native Interface (JNI) file appropriate for
- [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.1.0.zip).
+ [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.1.0-rc1.zip).
3. Extract this .zip file.
@@ -200,7 +200,7 @@ must be part of your `classpath`. For example, you can include the
downloaded `.jar` in your `classpath` by using the `-cp` compilation flag
as follows:
-<pre><b>javac -cp libtensorflow-1.1.0.jar HelloTF.java</b></pre>
+<pre><b>javac -cp libtensorflow-1.1.0-rc1.jar HelloTF.java</b></pre>
### Running
@@ -213,7 +213,7 @@ two files are available to the JVM:
For example, the following command line executes the `HelloTF` program:
-<pre><b>java -cp libtensorflow-1.1.0.jar:. -Djava.library.path=./jni HelloTF</b></pre>
+<pre><b>java -cp libtensorflow-1.1.0-rc1.jar:. -Djava.library.path=./jni HelloTF</b></pre>
If the program prints <tt>Hello from <i>version</i></tt>, you've successfully
installed TensorFlow for Java and are ready to use the API. If the program
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index 4a537135b6..4a5d63f337 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -166,7 +166,7 @@ Take the following steps to install TensorFlow with Virtualenv:
virtualenv environment:
<pre> (tensorflow)$ <b>pip install --upgrade \\
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp27-none-linux_x86_64.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#common_installation_problems).
@@ -270,7 +270,7 @@ take the following steps:
the following command:
<pre> $ <b>sudo pip install --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp27-none-linux_x86_64.whl</b></pre>
If this step fails, see
[Common Installation Problems](#common_installation_problems).
@@ -336,9 +336,9 @@ where:
* <tt>gcr.io/tensorflow/tensorflow:latest-devel</tt>, which is the latest
TensorFlow CPU Binary image plus source code.
* <tt>gcr.io/tensorflow/tensorflow:<i>version</i></tt>, which is the
- specified version (for example, 1.0.1) of TensorFlow CPU binary image.
+ specified version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
* <tt>gcr.io/tensorflow/tensorflow:<i>version</i>-devel</tt>, which is
- the specified version (for example, 1.0.1) of the TensorFlow GPU
+ the specified version (for example, 1.1.0rc1) of the TensorFlow GPU
binary image plus source code.
<tt>gcr.io</tt> is the Google Container Registry. Note that some
@@ -456,7 +456,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
<pre>
(tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp27-none-linux_x86_64.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -624,14 +624,14 @@ This section documents the relevant values for Linux installations.
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp27-none-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.1.0rc1-cp27-none-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -643,14 +643,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp34-cp34m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.1.0rc1-cp34-cp34m-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -662,14 +662,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp35-cp35m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.1.0rc1-cp35-cp35m-linux_x86_64.whl
</pre>
@@ -681,14 +681,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.0.1-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.1.0rc1-cp36-cp36m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.1.0rc1-cp36-cp36m-linux_x86_64.whl
</pre>
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index 68ad3ceba9..c989b31ca4 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -163,7 +163,7 @@ Take the following steps to install TensorFlow with Virtualenv:
TensorFlow in the active Virtualenv is as follows:
<pre> $ <b>pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.1.0rc1-py2-none-any.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#CommonInstallationProblems).
@@ -286,7 +286,7 @@ take the following steps:
support, issue the following command:
<pre> $ <b>sudo pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl</b> </pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.1.0rc1-py2-none-any.whl</b> </pre>
If the preceding command fails, see
[Common installation problems](#CommonInstallationProblems).
@@ -398,7 +398,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
TensorFlow for Python 2.7:
<pre> (tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.1.0rc1-py2-none-any.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -604,13 +604,13 @@ This section documents the relevant values for Mac OS installations.
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.1.0rc1-py2-none-any.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.1-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.1.0rc1-py2-none-any.whl
</pre>
Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
@@ -622,13 +622,13 @@ Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.0.1-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.1.0rc1-py3-none-any.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.0.1-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow_gpu-1.1.0rc1-py3-none-any.whl
</pre>
Requires CUDA toolkit 8.0 and CuDNN v5. For other versions, see
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index 25e428f574..7d452feafa 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -319,10 +319,10 @@ $ <b>bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pk
Invoke `pip install` to install that pip package.
The filename of the `.whl` file depends on your platform.
For example, the following command will install the pip package
-for TensorFlow 1.0.1 on Linux:
+for TensorFlow 1.1.0rc1 on Linux:
<pre>
-$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.0.1-py2-none-any.whl</b>
+$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.1.0rc1-py2-none-any.whl</b>
</pre>
## Validate your installation
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 7a6f579341..7d3d13c34a 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -114,12 +114,12 @@ Take the following steps to install TensorFlow in an Anaconda environment:
environment. To install the CPU-only version of TensorFlow, enter the
following command:
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.0.1-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.1.0rc1-cp35-cp35m-win_amd64.whl</b> </pre>
To install the GPU version of TensorFlow, enter the following command
(on a single line):
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.1.0rc1-cp35-cp35m-win_amd64.whl</b> </pre>
## Validate your installation
diff --git a/tensorflow/java/README.md b/tensorflow/java/README.md
index fd24016884..42cd24a7df 100644
--- a/tensorflow/java/README.md
+++ b/tensorflow/java/README.md
@@ -34,26 +34,26 @@ That's all. As an example, to create a Maven project for the
1. Create a `pom.xml`:
```xml
-<project>
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.myorg</groupId>
- <artifactId>label-image</artifactId>
- <version>1.0-SNAPSHOT</version>
- <properties>
- <exec.mainClass>org.tensorflow.examples.LabelImage</exec.mainClass>
- <!-- The LabelImage example code requires at least JDK 1.7. -->
- <!-- The maven compiler plugin defaults to a lower version -->
- <maven.compiler.source>1.7</maven.compiler.source>
- <maven.compiler.target>1.7</maven.compiler.target>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.tensorflow</groupId>
- <artifactId>tensorflow</artifactId>
- <version>1.1.0-rc1</version>
- </dependency>
- </dependencies>
-</project>
+ <project>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.myorg</groupId>
+ <artifactId>label-image</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <properties>
+ <exec.mainClass>org.tensorflow.examples.LabelImage</exec.mainClass>
+ <!-- The LabelImage example code requires at least JDK 1.7. -->
+ <!-- The maven compiler plugin defaults to a lower version -->
+ <maven.compiler.source>1.7</maven.compiler.source>
+ <maven.compiler.target>1.7</maven.compiler.target>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>org.tensorflow</groupId>
+ <artifactId>tensorflow</artifactId>
+ <version>1.1.0-rc1</version>
+ </dependency>
+ </dependencies>
+ </project>
```
2. Download the [example source](https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/java/src/main/java/org/tensorflow/examples/LabelImage.java)
diff --git a/tensorflow/java/src/main/java/org/tensorflow/Shape.java b/tensorflow/java/src/main/java/org/tensorflow/Shape.java
index f6677e9a15..90d6cf7b85 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/Shape.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/Shape.java
@@ -1,16 +1,17 @@
-// Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
package org.tensorflow;
diff --git a/tensorflow/java/src/main/native/operation_jni.h b/tensorflow/java/src/main/native/operation_jni.h
index fe14882dde..6292a48069 100644
--- a/tensorflow/java/src/main/native/operation_jni.h
+++ b/tensorflow/java/src/main/native/operation_jni.h
@@ -1,4 +1,3 @@
-
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py
index dfefc27f99..5eb5230404 100644
--- a/tensorflow/python/framework/tensor_util_test.py
+++ b/tensorflow/python/framework/tensor_util_test.py
@@ -18,8 +18,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import numpy as np
import sys
+import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@@ -49,7 +49,7 @@ class TensorUtilTest(test.TestCase):
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -67,7 +67,7 @@ class TensorUtilTest(test.TestCase):
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -85,7 +85,7 @@ class TensorUtilTest(test.TestCase):
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -104,7 +104,7 @@ class TensorUtilTest(test.TestCase):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -122,7 +122,7 @@ class TensorUtilTest(test.TestCase):
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -140,7 +140,7 @@ class TensorUtilTest(test.TestCase):
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
@@ -168,7 +168,7 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
@@ -259,7 +259,7 @@ class TensorUtilTest(test.TestCase):
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\\n\000\000\000\024\000\000\000\036\000\000\000("
@@ -329,7 +329,7 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
@@ -347,7 +347,7 @@ class TensorUtilTest(test.TestCase):
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
@@ -368,7 +368,7 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
@@ -405,7 +405,7 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
@@ -422,7 +422,7 @@ class TensorUtilTest(test.TestCase):
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
- self.assertProtoEquals("""
+ self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index 03291bbb0d..53a0d7d806 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -97,7 +97,7 @@ class PoolingTest(test.TestCase):
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
- with self.test_session(use_gpu=use_gpu) as sess:
+ with self.test_session(use_gpu=use_gpu):
t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format == "NCHW":
t = test_util.NHWCToNCHW(t)
@@ -497,7 +497,7 @@ class PoolingTest(test.TestCase):
strides,
error_msg,
use_gpu=False):
- with self.test_session(use_gpu=use_gpu) as sess:
+ with self.test_session(use_gpu=use_gpu):
t = constant_op.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg):
t = nn_ops.max_pool(
@@ -620,7 +620,7 @@ class PoolingTest(test.TestCase):
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
- with self.test_session(use_gpu=True) as sess:
+ with self.test_session(use_gpu=True):
orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = constant_op.constant(
@@ -952,7 +952,7 @@ class PoolingTest(test.TestCase):
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu):
- with self.test_session(use_gpu=use_gpu) as sess:
+ with self.test_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(input_data, shape=input_sizes)
output_tensor = nn_ops.max_pool(input_tensor,
[1, window_rows, window_cols, 1],
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py
index f5e9550b97..9b765390b3 100644
--- a/tensorflow/python/ops/nn_grad.py
+++ b/tensorflow/python/ops/nn_grad.py
@@ -21,10 +21,10 @@ from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
-from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
@@ -330,7 +330,7 @@ def _EluGradGrad(op, grad):
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(
x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + 1),
- array_ops.zeros(shape = array_ops.shape(x), dtype = x.dtype)))
+ array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)))
@ops.RegisterGradient("Relu6")
@@ -387,12 +387,13 @@ def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
- if grad_grad.op.type not in ('ZerosLike', 'Zeros'):
+ if grad_grad.op.type not in ("ZerosLike", "Zeros"):
logits = op.inputs[0]
softmax = nn_ops.softmax(logits)
- grad += ((grad_grad - array_ops.squeeze(math_ops.matmul(grad_grad[:, None, :],
- softmax[:, :, None]), axis=1)) * softmax)
+ grad += ((grad_grad - array_ops.squeeze(
+ math_ops.matmul(grad_grad[:, None, :],
+ softmax[:, :, None]), axis=1)) * softmax)
return grad, None
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 2d44f55e2c..901d6a12f6 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -2044,7 +2044,7 @@ def conv1d(value, filters, stride, padding,
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
- (where out_width is a function of the stride and padding as in conv2d) and
+ \(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc
index 6c06a73943..b6d841f365 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.cc
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc
@@ -1202,7 +1202,8 @@ class CudnnRnnSequenceTensorDescriptor
// Only the first one needs to be destroyed. All others are the same.
cudnnStatus_t status =
wrap::cudnnDestroyTensorDescriptor(parent_, handles_[0]);
- CUDNN_RETURN_IF_FAIL(status, "Failed to destroy sequence tensor descriptor");
+ CUDNN_RETURN_IF_FAIL(status,
+ "Failed to destroy sequence tensor descriptor");
}
const cudnnTensorDescriptor_t* handles() const {
diff --git a/tensorflow/tensorboard/plugins/debugger/BUILD b/tensorflow/tensorboard/plugins/debugger/BUILD
index 86254dc3aa..38aa719b9b 100644
--- a/tensorflow/tensorboard/plugins/debugger/BUILD
+++ b/tensorflow/tensorboard/plugins/debugger/BUILD
@@ -30,6 +30,7 @@ py_test(
srcs = ["debugger_plugin_test.py"],
main = "debugger_plugin_test.py",
srcs_version = "PY2AND3",
+ tags = ["no_pip"],
deps = [
":debugger_plugin",
"//tensorflow/core:protos_all_py",
diff --git a/tensorflow/tools/ci_build/update_version.sh b/tensorflow/tools/ci_build/update_version.sh
index cde0ab7909..429437f807 100755
--- a/tensorflow/tools/ci_build/update_version.sh
+++ b/tensorflow/tools/ci_build/update_version.sh
@@ -73,6 +73,7 @@ OLD_MINOR=$(cat ${VERSION_H} | grep -E "^#define TF_MINOR_VERSION [0-9]+" | \
cut -d ' ' -f 3)
OLD_PATCH=$(cat ${VERSION_H} | grep -E "^#define TF_PATCH_VERSION [[:alnum:]-]+" | \
cut -d ' ' -f 3)
+OLD_PIP_PATCH="${OLD_PATCH//-}"
sed -i -e "s/^#define TF_MAJOR_VERSION ${OLD_MAJOR}/#define TF_MAJOR_VERSION ${MAJOR}/g" ${VERSION_H}
sed -i -e "s/^#define TF_MINOR_VERSION ${OLD_MINOR}/#define TF_MINOR_VERSION ${MINOR}/g" ${VERSION_H}
@@ -92,6 +93,26 @@ check_existence file "${README_MD}"
sed -i -r -e "s/${OLD_MAJOR}\.${OLD_MINOR}\.([[:alnum:]]+)-/${MAJOR}.${MINOR}.${PIP_PATCH}-/g" "${README_MD}"
+# Update the install md files
+NEW_PIP_TAG=$MAJOR.$MINOR.$PIP_PATCH
+OLD_PIP_TAG=$OLD_MAJOR.$OLD_MINOR.$OLD_PIP_PATCH
+
+for file in ${TF_SRC_DIR}/docs_src/install/install_{linux,mac,windows,sources}.md
+do
+ sed -i "s/tensorflow-${OLD_PIP_TAG}/tensorflow-${NEW_PIP_TAG}/g" $file
+ sed -i "s/tensorflow_gpu-${OLD_PIP_TAG}/tensorflow_gpu-${NEW_PIP_TAG}/g" $file
+ sed -i "s/TensorFlow ${OLD_PIP_TAG}/TensorFlow ${NEW_PIP_TAG}/g" $file
+done
+
+NEW_TAG=$MAJOR.$MINOR.$PATCH
+OLD_TAG=$OLD_MAJOR.$OLD_MINOR.$OLD_PATCH
+
+for file in ${TF_SRC_DIR}/docs_src/install/install_{java,go,c}.md
+do
+ sed -i "s/x86_64-${OLD_TAG}/x86_64-${NEW_TAG}/g" $file
+ sed -i "s/libtensorflow-${OLD_TAG}.jar/libtensorflow-${NEW_TAG}.jar/g" $file
+ sed -i "s/<version>${OLD_TAG}<\/version>/<version>${NEW_TAG}<\/version>/g" $file
+done
# Updates to be made if there are major / minor version changes
MAJOR_MINOR_CHANGE=0
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index b61eb5db37..5b3f1f936a 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -66,4 +66,4 @@ EXPOSE 8888
WORKDIR "/notebooks"
-CMD ["/run_jupyter.sh"]
+CMD ["/run_jupyter.sh", "--allow-root"]
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index b2b39501cd..88876421f5 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -69,4 +69,4 @@ EXPOSE 8888
WORKDIR "/notebooks"
-CMD ["/run_jupyter.sh"]
+CMD ["/run_jupyter.sh", "--allow-root"]
diff --git a/tensorflow/tools/graph_transforms/quantize_nodes.cc b/tensorflow/tools/graph_transforms/quantize_nodes.cc
index 5d1c76834f..78078ab6ab 100644
--- a/tensorflow/tools/graph_transforms/quantize_nodes.cc
+++ b/tensorflow/tools/graph_transforms/quantize_nodes.cc
@@ -941,7 +941,7 @@ Status QuantizeNodes(const GraphDef& input_graph_def,
// keep interoperability with float ops.
TF_RETURN_IF_ERROR(RemoveRedundantQuantizations(deduped_graph_def, context,
output_graph_def));
- TF_RETURN_IF_ERROR(IsGraphValid(merged_graph_def));
+ TF_RETURN_IF_ERROR(IsGraphValid(*output_graph_def));
return Status::OK();
}
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index f591e50ac9..3253440a23 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,14 +29,14 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.1.0-rc0'
+_VERSION = '1.1.0-rc1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf >= 3.2.0',
'werkzeug >= 0.11.10',
- 'html5lib == 1.0b8',
+ 'html5lib == 0.9999999', # identical to 1.0b8
'markdown == 2.2.0',
'bleach == 1.5.0',
]
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 7bcdb1613d..93d4605ad8 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -271,11 +271,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
native.new_http_archive(
name="org_html5lib",
urls=[
- "http://bazel-mirror.storage.googleapis.com/github.com/html5lib/html5lib-python/archive/1.0b8.tar.gz",
- "https://github.com/html5lib/html5lib-python/archive/1.0b8.tar.gz",
+ "http://bazel-mirror.storage.googleapis.com/github.com/html5lib/html5lib-python/archive/0.9999999.tar.gz",
+ "https://github.com/html5lib/html5lib-python/archive/0.9999999.tar.gz", # identical to 1.0b8
],
- sha256="adb36c879264e8880b92589c4c4fe0814cd9d157b73328b14d728f48a6bab0a4",
- strip_prefix="html5lib-python-1.0b8",
+ sha256="184257f98539159a433e2a2197309657ae1283b4c44dbd9c87b2f02ff36adce8",
+ strip_prefix="html5lib-python-0.9999999",
build_file=str(Label("//third_party:html5lib.BUILD")),)
native.new_http_archive(