aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CODEOWNERS2
-rw-r--r--README.md11
-rw-r--r--RELEASE.md21
-rw-r--r--tensorflow/compiler/tests/build_defs.bzl3
-rw-r--r--tensorflow/compiler/tests/plugin.bzl9
-rw-r--r--tensorflow/contrib/cmake/external/grpc.cmake6
-rw-r--r--tensorflow/contrib/cmake/tf_tests.cmake2
-rw-r--r--tensorflow/contrib/rnn/BUILD4
-rw-r--r--tensorflow/contrib/rnn/python/tools/checkpoint_convert.py8
-rw-r--r--tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py18
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py2
-rw-r--r--tensorflow/contrib/signal/BUILD1
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_allocator.cc36
-rw-r--r--tensorflow/core/common_runtime/sycl/sycl_allocator.h13
-rw-r--r--tensorflow/core/kernels/mkl_conv_ops.cc7
-rw-r--r--tensorflow/core/ops/linalg_ops.cc2
-rw-r--r--tensorflow/core/platform/default/gpu_tracer.cc12
-rw-r--r--tensorflow/core/public/version.h6
-rw-r--r--tensorflow/docs_src/install/install_c.md2
-rw-r--r--tensorflow/docs_src/install/install_go.md2
-rw-r--r--tensorflow/docs_src/install/install_java.md18
-rw-r--r--tensorflow/docs_src/install/install_linux.md22
-rw-r--r--tensorflow/docs_src/install/install_mac.md10
-rw-r--r--tensorflow/docs_src/install/install_sources.md4
-rw-r--r--tensorflow/docs_src/install/install_windows.md4
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java207
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java80
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java100
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java25
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/LegacyCameraConnectionFragment.java208
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java155
-rw-r--r--tensorflow/go/example_inception_inference_test.go2
-rw-r--r--tensorflow/python/debug/lib/debug_data.py24
-rw-r--r--tensorflow/stream_executor/lib/demangle.cc2
-rw-r--r--tensorflow/tools/api/lib/python_object_to_proto_visitor.py2
-rwxr-xr-xtensorflow/tools/ci_build/builds/pip.sh2
-rw-r--r--tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb2
-rw-r--r--tensorflow/tools/gcs_test/python/gcs_smoke.py138
-rw-r--r--tensorflow/tools/pip_package/setup.py7
-rw-r--r--third_party/boringssl/add_boringssl_s390x.patch120
40 files changed, 930 insertions, 369 deletions
diff --git a/CODEOWNERS b/CODEOWNERS
index 69393c3775..1401951b86 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -17,7 +17,7 @@ tensorflow/contrib/data/* @mrry
tensorflow/contrib/distributions/* @jvdillon @langmore @rsepassi
tensorflow/contrib/factorization/* @agarwal-ashish @xavigonzalvo
tensorflow/contrib/ffmpeg/* @fredbertsch
-# NEED OWNERT: tensorflow/contrib/framework/*
+# NEED OWNER: tensorflow/contrib/framework/*
tensorflow/contrib/graph_editor/* @purpledog
# NEED OWNER: tensorflow/contrib/grid_rnn/*
tensorflow/contrib/hvx/* @satok16
diff --git a/README.md b/README.md
index 4e17182f81..a1f9dad22c 100644
--- a/README.md
+++ b/README.md
@@ -35,12 +35,11 @@ and discussion, and please direct specific questions to [Stack Overflow](https:/
People who are a little more adventurous can also try our nightly binaries:
-* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
-* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc0-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc0-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc0-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
-* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
-* Mac GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc0-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-mac/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc0-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-mac-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-mac/))
-* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=35/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.3.0rc0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=35/)) / [Python 3.6 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=36/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.3.0rc0-cp36-cp36m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=36/))
-* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=35/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.3.0rc0-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=35/)) / [Python 3.6 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=36/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.3.0rc0-cp36-cp36m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=36/))
+* Linux CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=cpu-slave/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/))
+* Linux GPU: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc1-cp27-none-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=gpu-linux/)) / [Python 3.4](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc1-cp34-cp34m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=gpu-linux/)) / [Python 3.5](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow_gpu-1.3.0rc1-cp35-cp35m-linux_x86_64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-linux-gpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3.5,label=gpu-linux/))
+* Mac CPU-only: [Python 2](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc1-py2-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/)) / [Python 3](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-1.3.0rc1-py3-none-any.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON3,label=mac-slave/))
+* Windows CPU-only: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=35/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.3.0rc1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=35/)) / [Python 3.6 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=36/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow-1.3.0rc1-cp36-cp36m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows,PY=36/))
+* Windows GPU: [Python 3.5 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=35/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.3.0rc1-cp35-cp35m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=35/)) / [Python 3.6 64-bit](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=36/lastSuccessfulBuild/artifact/cmake_build/tf_python/dist/tensorflow_gpu-1.3.0rc1-cp36-cp36m-win_amd64.whl) ([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-win/M=windows-gpu,PY=36/))
* Android: [demo APK](https://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/tensorflow_demo.apk), [native libs](http://ci.tensorflow.org/view/Nightly/job/nightly-android/lastSuccessfulBuild/artifact/out/native/)
([build history](https://ci.tensorflow.org/view/Nightly/job/nightly-android/))
diff --git a/RELEASE.md b/RELEASE.md
index e7c086164a..da297b2e86 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,7 +1,13 @@
# Release 1.3.0
## Major Features and Improvements
-* Added canned estimators to Tensorflow library. List of added estimators: `DNNClassifier`, `DNNRegressor`, `LinearClassifer`, `LinearRegressor`, `DNNLinearCombinedClassifier`, `DNNLinearCombinedRegressor`.
+* Added canned estimators to Tensorflow library. List of added estimators:
+ * `DNNClassifier`
+ * `DNNRegressor`
+ * `LinearClassifier`
+ * `LinearRegressor`
+ * `DNNLinearCombinedClassifier`
+ * `DNNLinearCombinedRegressor`.
* All our prebuilt binaries have been built with cuDNN 6.
* Adds a file cache to the GCS filesystem with configurable max staleness for file contents. This permits caching of file contents across close/open boundaries.
* Added an axis parameter to `tf.gather`.
@@ -17,10 +23,14 @@
* Introduces base implementations of ClusterResolvers.
* Unify memory representations of TensorShape and PartialTensorShape. As a consequence, tensors now have a maximum of 254 dimensions, not 255.
* Changed references to LIBXSMM to use version 1.8.1.
-* TensorFlow Debugger (tfdbg): Display summaries of numeric tensor values with the `-s` flag to command `print_tensor` or `pt`.
+* TensorFlow Debugger (tfdbg):
+ * Display summaries of numeric tensor values with the `-s` flag to command `print_tensor` or `pt`.
+ * Display feed values with the `print_feed` or `pf` command and clickable links in the curses UI.
+ * Runtime profiler at the op level and the Python source line level with the `run -p` command.
* Initial release of the statistical distribution library `tf.distributions`.
* GPU kernels and speed improvements for for unary `tf.where` and `tf.nn.top_k`.
* Monotonic Attention wrappers added to `tf.contrib.seq2seq`.
+* Added `tf.contrib.signal`, a library for signal processing primitives.
## Breaking Changes to the API
* `tf.RewriterConfig` was removed from the Python API after being available in 1.2 release candidates (it was never in an actual release). Graph rewriting is still available, just not as `tf.RewriterConfig`. Instead add an explicit import.
@@ -33,7 +43,7 @@
* Adds FULLY_CONNECTED Op to tensorflow/contrib/lite/schema.fbs
## Bug Fixes and Other Changes
-* Fixes 'strides' and 'begin' dtype mismatch when slicing using int64 Tensor index in python.
+* Fixes `strides` and `begin` dtype mismatch when slicing using int64 Tensor index in python.
* Improved convolution padding documentation.
* Add a tag constant, gpu, to present graph with GPU support.
* `saved_model.utils` now support SparseTensors transparently.
@@ -65,8 +75,9 @@
* Framework now supports armv7, cocoapods.org now displays correct page.
* Script to create iOS framework for CocoaPods.
* Android releases of TensorFlow are now pushed to jcenter for easier integration into apps. See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/android/README.md for more details.
-* Fixed a bug that prevented tfdbg from functioning with multi-GPU setups.
-* Fixed a bug that prevented tfdbg from working with `tf.Session.make_callable`.
+* TensorFlow Debugger (tfdbg):
+ * Fixed a bug that prevented tfdbg from functioning with multi-GPU setups.
+ * Fixed a bug that prevented tfdbg from working with `tf.Session.make_callable`.
## Thanks to our Contributors
diff --git a/tensorflow/compiler/tests/build_defs.bzl b/tensorflow/compiler/tests/build_defs.bzl
index 0bde616521..a56c53de0f 100644
--- a/tensorflow/compiler/tests/build_defs.bzl
+++ b/tensorflow/compiler/tests/build_defs.bzl
@@ -59,6 +59,9 @@ def tf_xla_py_test(name, srcs=[], deps=[], tags=[], data=[], main=None,
backend_args += ["--test_device=" + plugins[backend]["device"],
"--types=" + plugins[backend]["types"]]
backend_tags += plugins[backend]["tags"]
+ backend_args += plugins[backend]["args"]
+ backend_deps += plugins[backend]["deps"]
+ backend_data += plugins[backend]["data"]
else:
fail("Unknown backend {}".format(backend))
diff --git a/tensorflow/compiler/tests/plugin.bzl b/tensorflow/compiler/tests/plugin.bzl
index b6eb7a9e39..fbc8781a3e 100644
--- a/tensorflow/compiler/tests/plugin.bzl
+++ b/tensorflow/compiler/tests/plugin.bzl
@@ -18,6 +18,13 @@
# git update-index --assume-unchanged tensorflow/compiler/tests/plugin.bzl
plugins = {
- #"poplar": {"device":"XLA_IPU", "types":"DT_FLOAT,DT_INT32", "tags":[]},
+ #"example": {
+ # "device":"XLA_MY_DEVICE",
+ # "types":"DT_FLOAT,DT_HALF,DT_INT32",
+ # "tags":[],
+ # "args":["--disabled_manifest=tensorflow/compiler/plugin/example/disabled_manifest.txt"],
+ # "data":["//tensorflow/compiler/plugin/example:disabled_manifest.txt"],
+ # "deps":[],
+ #},
}
diff --git a/tensorflow/contrib/cmake/external/grpc.cmake b/tensorflow/contrib/cmake/external/grpc.cmake
index 0740c38dd3..b06755afc2 100644
--- a/tensorflow/contrib/cmake/external/grpc.cmake
+++ b/tensorflow/contrib/cmake/external/grpc.cmake
@@ -21,9 +21,9 @@ set(GRPC_TAG 781fd6f6ea03645a520cd5c675da67ab61f87e4b)
if(WIN32)
set(grpc_STATIC_LIBRARIES
- ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/$(Configuration)/grpc++_unsecure.lib
- ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/$(Configuration)/grpc_unsecure.lib
- ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/$(Configuration)/gpr.lib)
+ ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/Release/grpc++_unsecure.lib
+ ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/Release/grpc_unsecure.lib
+ ${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/Release/gpr.lib)
else()
set(grpc_STATIC_LIBRARIES
${CMAKE_CURRENT_BINARY_DIR}/grpc/src/grpc/libgrpc++_unsecure.a
diff --git a/tensorflow/contrib/cmake/tf_tests.cmake b/tensorflow/contrib/cmake/tf_tests.cmake
index 216c10dc32..a4ee010fce 100644
--- a/tensorflow/contrib/cmake/tf_tests.cmake
+++ b/tensorflow/contrib/cmake/tf_tests.cmake
@@ -183,6 +183,8 @@ if (tensorflow_BUILD_PYTHON_TESTS)
# Loading resources in contrib doesn't seem to work on Windows
"${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/client/random_forest_test.py"
"${tensorflow_source_dir}/tensorflow/contrib/tensor_forest/python/tensor_forest_test.py"
+ # Test is flaky on Windows GPU builds (b/38283730).
+ "${tensorflow_source_dir}/tensorflow/contrib/factorization/python/ops/gmm_test.py"
)
if (WIN32)
set(tf_test_src_py_exclude
diff --git a/tensorflow/contrib/rnn/BUILD b/tensorflow/contrib/rnn/BUILD
index 784ac96149..1232e056f6 100644
--- a/tensorflow/contrib/rnn/BUILD
+++ b/tensorflow/contrib/rnn/BUILD
@@ -26,7 +26,9 @@ load(
tf_custom_op_py_library(
name = "rnn_py",
- srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
+ srcs = ["__init__.py"] + glob(["python/ops/*.py"]) + [
+ "python/tools/checkpoint_convert.py",
+ ],
dso = [
":python/ops/_gru_ops.so",
":python/ops/_lstm_ops.so",
diff --git a/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py b/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
index da129b68a6..5536a01328 100644
--- a/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
+++ b/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py
@@ -53,7 +53,9 @@ from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
-_RNN_NAME_REPLACEMENTS = collections.OrderedDict([
+# Mapping between old <=> new names. Externalized so that user scripts that
+# may need to consume multiple checkpoint formats can use this metadata.
+RNN_NAME_REPLACEMENTS = collections.OrderedDict([
############################################################################
# contrib/rnn/python/ops/core_rnn_cell_impl.py
# BasicRNNCell
@@ -149,10 +151,10 @@ _RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([
def _rnn_name_replacement(var_name):
- for pattern in _RNN_NAME_REPLACEMENTS:
+ for pattern in RNN_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
- var_name = var_name.replace(pattern, _RNN_NAME_REPLACEMENTS[pattern])
+ var_name = var_name.replace(pattern, RNN_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
break
return var_name
diff --git a/tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py b/tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
index e2fc2fa80e..a9e7949463 100644
--- a/tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
+++ b/tensorflow/contrib/rnn/python/tools/checkpoint_convert_test.py
@@ -43,8 +43,8 @@ class CheckpointConvertTest(test.TestCase):
os.remove(file_name)
def testReplacementDictsContainUniqueAndNonEmptyVariableNames(self):
- for old_name in checkpoint_convert._RNN_NAME_REPLACEMENTS:
- new_name = checkpoint_convert._RNN_NAME_REPLACEMENTS[old_name]
+ for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
+ new_name = checkpoint_convert.RNN_NAME_REPLACEMENTS[old_name]
self.assertTrue(old_name)
self.assertTrue(new_name)
self.assertNotEqual(old_name, new_name)
@@ -56,7 +56,7 @@ class CheckpointConvertTest(test.TestCase):
def testConversionFromV2WithConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
- for old_name in checkpoint_convert._RNN_NAME_REPLACEMENTS:
+ for old_name in checkpoint_convert.RNN_NAME_REPLACEMENTS:
variables.Variable(20.0, name=old_name)
with session.Session() as sess:
saver = saver_lib.Saver()
@@ -67,9 +67,9 @@ class CheckpointConvertTest(test.TestCase):
self._old_ckpt_path, self._new_ckpt_path)
self.assertTrue(glob.glob(self._new_ckpt_path + "*"))
self.assertItemsEqual(
- ["a"] + list(checkpoint_convert._RNN_NAME_REPLACEMENTS.values()),
+ ["a"] + list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values()),
new_var_map.keys())
- self.assertEqual(checkpoint_convert._RNN_NAME_REPLACEMENTS, conversion_map)
+ self.assertEqual(checkpoint_convert.RNN_NAME_REPLACEMENTS, conversion_map)
def testConversionFromV2WithoutConvertedVariableNamesSucceeds(self):
variables.Variable(10.0, name="a")
@@ -86,7 +86,7 @@ class CheckpointConvertTest(test.TestCase):
def testConversionToV1Succeeds(self):
variables.Variable(10.0, name="a")
variables.Variable(
- 20.0, name=list(checkpoint_convert._RNN_NAME_REPLACEMENTS.keys())[-1])
+ 20.0, name=list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1])
with session.Session() as sess:
saver = saver_lib.Saver()
@@ -96,11 +96,11 @@ class CheckpointConvertTest(test.TestCase):
new_var_map, conversion_map = checkpoint_convert.convert_names(
self._old_ckpt_path, self._new_ckpt_path, write_v1_checkpoint=True)
self.assertItemsEqual(
- ["a", list(checkpoint_convert._RNN_NAME_REPLACEMENTS.values())[-1]],
+ ["a", list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]],
new_var_map.keys())
self.assertEqual(
- {list(checkpoint_convert._RNN_NAME_REPLACEMENTS.keys())[-1]:
- list(checkpoint_convert._RNN_NAME_REPLACEMENTS.values())[-1]},
+ {list(checkpoint_convert.RNN_NAME_REPLACEMENTS.keys())[-1]:
+ list(checkpoint_convert.RNN_NAME_REPLACEMENTS.values())[-1]},
conversion_map)
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index a162a919cf..c434113520 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -299,7 +299,7 @@ def _luong_score(query, keys, scale):
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_time, 1, max_time].
- # we then squeee out the center singleton dimension.
+ # we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
diff --git a/tensorflow/contrib/signal/BUILD b/tensorflow/contrib/signal/BUILD
index c3982cb169..118d72f020 100644
--- a/tensorflow/contrib/signal/BUILD
+++ b/tensorflow/contrib/signal/BUILD
@@ -73,6 +73,7 @@ cuda_py_tests(
"//tensorflow/python:platform_test",
"//tensorflow/python:spectral_ops_test_util",
],
+ tags = ["nomac"],
)
cuda_py_tests(
diff --git a/tensorflow/core/common_runtime/sycl/sycl_allocator.cc b/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
index 485e5397e8..0ddd4dce51 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
+++ b/tensorflow/core/common_runtime/sycl/sycl_allocator.cc
@@ -19,6 +19,14 @@ limitations under the License.
namespace tensorflow {
+SYCLAllocator::SYCLAllocator(Eigen::QueueInterface *queue)
+ : sycl_device_(new Eigen::SyclDevice(queue)) {
+ cl::sycl::queue& sycl_queue = sycl_device_->sycl_queue();
+ const cl::sycl::device& device = sycl_queue.get_device();
+ stats_.bytes_limit =
+ device.get_info<cl::sycl::info::device::max_mem_alloc_size>();
+}
+
SYCLAllocator::~SYCLAllocator() {
if(sycl_device_) {
delete sycl_device_;
@@ -30,18 +38,44 @@ string SYCLAllocator::Name() { return "device:SYCL"; }
void *SYCLAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
assert(sycl_device_);
if (num_bytes == 0) {
- return sycl_device_->allocate(1);
+ // Cannot allocate no bytes in SYCL, so instead allocate a single byte
+ num_bytes = 1;
}
auto p = sycl_device_->allocate(num_bytes);
+ const auto& allocated_buffer = sycl_device_->get_sycl_buffer(p);
+ const std::size_t bytes_allocated = allocated_buffer.get_range().size();
+
+ mutex_lock lock(mu_);
+ ++stats_.num_allocs;
+ stats_.bytes_in_use += bytes_allocated;
+ stats_.max_bytes_in_use =
+ std::max<int64>(stats_.max_bytes_in_use, stats_.bytes_in_use);
+ stats_.max_alloc_size =
+ std::max<int64>(stats_.max_alloc_size, bytes_allocated);
+
return p;
}
void SYCLAllocator::DeallocateRaw(void *ptr) {
+ const auto& buffer_to_delete = sycl_device_->get_sycl_buffer(ptr);
+ const std::size_t dealloc_size = buffer_to_delete.get_range().size();
+ mutex_lock lock(mu_);
+ stats_.bytes_in_use -= dealloc_size;
if (sycl_device_) {
sycl_device_->deallocate(ptr);
}
}
+void SYCLAllocator::GetStats(AllocatorStats* stats) {
+ mutex_lock lock(mu_);
+ *stats = stats_;
+}
+
+size_t SYCLAllocator::RequestedSize(void* ptr) {
+ const auto& buffer = sycl_device_->get_sycl_buffer(ptr);
+ return buffer.get_size();
+}
+
} // namespace tensorflow
#endif // TENSORFLOW_USE_SYCL
diff --git a/tensorflow/core/common_runtime/sycl/sycl_allocator.h b/tensorflow/core/common_runtime/sycl/sycl_allocator.h
index 8668cba06a..3597afa5ba 100644
--- a/tensorflow/core/common_runtime/sycl/sycl_allocator.h
+++ b/tensorflow/core/common_runtime/sycl/sycl_allocator.h
@@ -21,6 +21,7 @@ limitations under the License.
#define TENSORFLOW_COMMON_RUNTIME_SYCL_SYCL_ALLOCATOR_H_
#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
@@ -28,7 +29,7 @@ namespace tensorflow {
class SYCLAllocator : public Allocator {
public:
- SYCLAllocator(Eigen::QueueInterface *queue) : sycl_device_(new Eigen::SyclDevice(queue)) {}
+ SYCLAllocator(Eigen::QueueInterface *queue);
virtual ~SYCLAllocator() override;
string Name() override;
void *AllocateRaw(size_t alignment, size_t num_bytes) override;
@@ -37,10 +38,20 @@ class SYCLAllocator : public Allocator {
virtual bool ShouldAllocateEmptyTensors() override final { return true; }
void Synchronize() { sycl_device_->synchronize(); }
bool Ok() { return sycl_device_->ok(); }
+ void GetStats(AllocatorStats* stats) override;
+ // The SYCL buffers keep track of their size, so we already have tracking.
+ bool TracksAllocationSizes() override { return true; }
+ // Get the size of the corresponding SYCL buffer.
+ // Implementing this also provides an implementation of
+ // AllocatedSize(void* ptr) by default.
+ size_t RequestedSize(void* ptr) override;
Eigen::SyclDevice* getSyclDevice() { return sycl_device_; }
private:
Eigen::SyclDevice *sycl_device_; // owned
+ mutable mutex mu_;
+ AllocatorStats stats_ GUARDED_BY(mu_);
+
TF_DISALLOW_COPY_AND_ASSIGN(SYCLAllocator);
};
diff --git a/tensorflow/core/kernels/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl_conv_ops.cc
index 135dd254a4..45d22556aa 100644
--- a/tensorflow/core/kernels/mkl_conv_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_ops.cc
@@ -165,8 +165,11 @@ class MklConv2DOp : public OpKernel {
// If there is nothing to compute, return.
if (out_shape.num_elements() == 0) {
- // TODO(jbobba): Verify correctness here
- // Need semantics for Null MKL tensor
+ // Nothing to do, allocate output tensor and return
+ MklShape mkl_output_mkl_shape;
+ mkl_output_mkl_shape.SetMklTensor(false);
+ AllocateOutputSetMklShape(context, 0, &output, input.shape(),
+ mkl_output_mkl_shape);
return;
}
diff --git a/tensorflow/core/ops/linalg_ops.cc b/tensorflow/core/ops/linalg_ops.cc
index b0f95c91fd..52f69f76a4 100644
--- a/tensorflow/core/ops/linalg_ops.cc
+++ b/tensorflow/core/ops/linalg_ops.cc
@@ -204,7 +204,7 @@ REGISTER_OP("MatrixDeterminant")
return Status::OK();
})
.Doc(R"doc(
-Computes the determinant of one ore more square matrices.
+Computes the determinant of one or more square matrices.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor containing the determinants
diff --git a/tensorflow/core/platform/default/gpu_tracer.cc b/tensorflow/core/platform/default/gpu_tracer.cc
index 7375287e1b..86ab70afdd 100644
--- a/tensorflow/core/platform/default/gpu_tracer.cc
+++ b/tensorflow/core/platform/default/gpu_tracer.cc
@@ -54,6 +54,8 @@ const char *getMemcpyKindString(CUpti_ActivityMemcpyKind kind) {
return "DtoD";
case CUPTI_ACTIVITY_MEMCPY_KIND_HTOH:
return "HtoH";
+ case CUPTI_ACTIVITY_MEMCPY_KIND_PTOP:
+ return "PtoP";
default:
break;
}
@@ -544,6 +546,15 @@ void GPUTracerImpl::ActivityCallback(const CUpti_Activity &record) {
memcpy->dstKind, memcpy->bytes});
break;
}
+ case CUPTI_ACTIVITY_KIND_MEMCPY2: {
+ if (memcpy_records_.size() >= kMaxRecords) return;
+ auto *memcpy = reinterpret_cast<const CUpti_ActivityMemcpy2 *>(&record);
+ memcpy_records_.push_back(MemcpyRecord{
+ memcpy->start, memcpy->end, memcpy->deviceId, memcpy->streamId,
+ memcpy->correlationId, memcpy->copyKind, memcpy->srcKind,
+ memcpy->dstKind, memcpy->bytes});
+ break;
+ }
case CUPTI_ACTIVITY_KIND_KERNEL:
case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL: {
if (kernel_records_.size() >= kMaxRecords) return;
@@ -554,6 +565,7 @@ void GPUTracerImpl::ActivityCallback(const CUpti_Activity &record) {
break;
}
default:
+ VLOG(1) << "ActivityCallback unhandled kind";
break;
}
}
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index 9ba6b0ed5a..4626ab8ea5 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -19,12 +19,12 @@ limitations under the License.
// TensorFlow uses semantic versioning, see http://semver.org/.
#define TF_MAJOR_VERSION 1
-#define TF_MINOR_VERSION 3
-#define TF_PATCH_VERSION 0
+#define TF_MINOR_VERSION 2
+#define TF_PATCH_VERSION 1
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
-#define TF_VERSION_SUFFIX "-rc0"
+#define TF_VERSION_SUFFIX "-rc1"
#define TF_STR_HELPER(x) #x
#define TF_STR(x) TF_STR_HELPER(x)
diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md
index b83113438c..1426fb3e02 100644
--- a/tensorflow/docs_src/install/install_c.md
+++ b/tensorflow/docs_src/install/install_c.md
@@ -35,7 +35,7 @@ enable TensorFlow for C:
OS="linux" # Change to "darwin" for Mac OS
TARGET_DIRECTORY="/usr/local"
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.3.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.3.0-rc1.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md
index 068a42d16b..f0299f516d 100644
--- a/tensorflow/docs_src/install/install_go.md
+++ b/tensorflow/docs_src/install/install_go.md
@@ -35,7 +35,7 @@ steps to install this library and enable TensorFlow for Go:
TF_TYPE="cpu" # Change to "gpu" for GPU support
TARGET_DIRECTORY='/usr/local'
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.3.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.3.0-rc1.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md
index bf0d03903d..2d177d7ffd 100644
--- a/tensorflow/docs_src/install/install_java.md
+++ b/tensorflow/docs_src/install/install_java.md
@@ -34,7 +34,7 @@ following to the project's `pom.xml` to use the TensorFlow Java APIs:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.3.0-rc0</version>
+ <version>1.3.0-rc1</version>
</dependency>
```
@@ -63,7 +63,7 @@ As an example, these steps will create a Maven project that uses TensorFlow:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.3.0-rc0</version>
+ <version>1.3.0-rc1</version>
</dependency>
</dependencies>
</project>
@@ -122,7 +122,7 @@ refer to the simpler instructions above instead.
Take the following steps to install TensorFlow for Java on Linux or Mac OS:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.3.0-rc0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.3.0-rc1.jar),
which is the TensorFlow Java Archive (JAR).
2. Decide whether you will run TensorFlow for Java on CPU(s) only or with
@@ -141,7 +141,7 @@ Take the following steps to install TensorFlow for Java on Linux or Mac OS:
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
mkdir -p ./jni
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.3.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.3.0-rc1.tar.gz" |
tar -xz -C ./jni
### Install on Windows
@@ -149,10 +149,10 @@ Take the following steps to install TensorFlow for Java on Linux or Mac OS:
Take the following steps to install TensorFlow for Java on Windows:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.3.0-rc0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.3.0-rc1.jar),
which is the TensorFlow Java Archive (JAR).
2. Download the following Java Native Interface (JNI) file appropriate for
- [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.3.0-rc0.zip).
+ [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.3.0-rc1.zip).
3. Extract this .zip file.
@@ -200,7 +200,7 @@ must be part of your `classpath`. For example, you can include the
downloaded `.jar` in your `classpath` by using the `-cp` compilation flag
as follows:
-<pre><b>javac -cp libtensorflow-1.3.0-rc0.jar HelloTF.java</b></pre>
+<pre><b>javac -cp libtensorflow-1.3.0-rc1.jar HelloTF.java</b></pre>
### Running
@@ -214,11 +214,11 @@ two files are available to the JVM:
For example, the following command line executes the `HelloTF` program on Linux
and Mac OS X:
-<pre><b>java -cp libtensorflow-1.3.0-rc0.jar:. -Djava.library.path=./jni HelloTF</b></pre>
+<pre><b>java -cp libtensorflow-1.3.0-rc1.jar:. -Djava.library.path=./jni HelloTF</b></pre>
And the following command line executes the `HelloTF` program on Windows:
-<pre><b>java -cp libtensorflow-1.3.0-rc0.jar;. -Djava.library.path=jni HelloTF</b></pre>
+<pre><b>java -cp libtensorflow-1.3.0-rc1.jar;. -Djava.library.path=jni HelloTF</b></pre>
If the program prints <tt>Hello from <i>version</i></tt>, you've successfully
installed TensorFlow for Java and are ready to use the API. If the program
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index 55e1a35a4d..4885bb12c5 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -172,7 +172,7 @@ Take the following steps to install TensorFlow with Virtualenv:
virtualenv environment:
<pre>(tensorflow)$ <b>pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp34-cp34m-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp34-cp34m-linux_x86_64.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#common_installation_problems).
@@ -277,7 +277,7 @@ take the following steps:
<pre>
$ <b>sudo pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp34-cp34m-linux_x86_64.whl</b>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp34-cp34m-linux_x86_64.whl</b>
</pre>
If this step fails, see
@@ -464,7 +464,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
<pre>
(tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp34-cp34m-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp34-cp34m-linux_x86_64.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -632,14 +632,14 @@ This section documents the relevant values for Linux installations.
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp27-none-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc1-cp27-none-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -651,14 +651,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp34-cp34m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc1-cp34-cp34m-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
@@ -670,14 +670,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp35-cp35m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc1-cp35-cp35m-linux_x86_64.whl
</pre>
@@ -689,14 +689,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.3.0rc1-cp36-cp36m-linux_x86_64.whl
</pre>
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.3.0rc1-cp36-cp36m-linux_x86_64.whl
</pre>
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index 0a17b6bf67..6fa63dd14c 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -109,7 +109,7 @@ Take the following steps to install TensorFlow with Virtualenv:
TensorFlow in the active Virtualenv is as follows:
<pre> $ <b>pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py2-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc1-py2-none-any.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#common-installation-problems).
@@ -230,7 +230,7 @@ take the following steps:
issue the following command:
<pre> $ <b>sudo pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py2-none-any.whl</b> </pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc1-py2-none-any.whl</b> </pre>
If the preceding command fails, see
[installation problems](#common-installation-problems).
@@ -339,7 +339,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
TensorFlow for Python 2.7:
<pre> (tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py2-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc1-py2-none-any.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -512,7 +512,7 @@ This section documents the relevant values for Mac OS installations.
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc1-py2-none-any.whl
</pre>
@@ -520,7 +520,7 @@ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py2-none-a
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc0-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.3.0rc1-py3-none-any.whl
</pre>
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index 63c8c625b0..4ac5fbc143 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -342,10 +342,10 @@ Invoke `pip install` to install that pip package.
The filename of the `.whl` file depends on your platform.
For example, the following command will install the pip package
-for TensorFlow 1.3.0rc0 on Linux:
+for TensorFlow 1.3.0rc1 on Linux:
<pre>
-$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.3.0rc0-py2-none-any.whl</b>
+$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.3.0rc1-py2-none-any.whl</b>
</pre>
## Validate your installation
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 58749a53f1..2895438f46 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -115,12 +115,12 @@ Take the following steps to install TensorFlow in an Anaconda environment:
environment. To install the CPU-only version of TensorFlow, enter the
following command:
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.3.0rc0-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.3.0rc1-cp35-cp35m-win_amd64.whl</b> </pre>
To install the GPU version of TensorFlow, enter the following command
(on a single line):
- <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.3.0rc0-cp35-cp35m-win_amd64.whl</b> </pre>
+ <pre>(tensorflow)C:\> <b>pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/gpu/tensorflow_gpu-1.3.0rc1-cp35-cp35m-win_amd64.whl</b> </pre>
## Validate your installation
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
index 27d7e41487..b9542582e6 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
@@ -19,22 +19,37 @@ package org.tensorflow.demo;
import android.Manifest;
import android.app.Activity;
import android.app.Fragment;
+import android.content.Context;
import android.content.pm.PackageManager;
+import android.graphics.Bitmap;
+import android.hardware.Camera;
+import android.hardware.camera2.CameraAccessException;
+import android.hardware.camera2.CameraCharacteristics;
+import android.hardware.camera2.CameraManager;
+import android.hardware.camera2.params.StreamConfigurationMap;
+import android.media.Image;
import android.media.Image.Plane;
+import android.media.ImageReader;
import android.media.ImageReader.OnImageAvailableListener;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.HandlerThread;
+import android.os.Trace;
import android.util.Size;
import android.view.KeyEvent;
import android.view.WindowManager;
import android.widget.Toast;
import java.nio.ByteBuffer;
+
+import org.tensorflow.demo.env.ImageUtils;
import org.tensorflow.demo.env.Logger;
+
+// Explicit import needed for internal Google builds.
import org.tensorflow.demo.R;
-public abstract class CameraActivity extends Activity implements OnImageAvailableListener {
+public abstract class CameraActivity extends Activity implements OnImageAvailableListener, Camera.
+ PreviewCallback {
private static final Logger LOGGER = new Logger();
private static final int PERMISSIONS_REQUEST = 1;
@@ -46,6 +61,20 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
private Handler handler;
private HandlerThread handlerThread;
+ private boolean useCamera2API;
+ protected Bitmap rgbFrameBitmap = null;
+ private int[] rgbBytes = null;
+ protected int previewWidth = 0;
+ protected int previewHeight = 0;
+ protected Bitmap croppedBitmap = null;
+ protected static final boolean SAVE_PREVIEW_BITMAP = false;
+ protected long lastProcessingTimeMs;
+ protected Bitmap cropCopyBitmap;
+ protected ResultsView resultsView;
+ protected boolean computing = false;
+ protected Runnable postInferenceCallback;
+ protected byte[][] yuvBytes=new byte[3][];
+ protected int yRowStride;
@Override
protected void onCreate(final Bundle savedInstanceState) {
@@ -62,6 +91,93 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
}
}
+ /**
+ * Callback for android.hardware.Camera API
+ */
+ @Override
+ public void onPreviewFrame(final byte[] bytes, final Camera camera) {
+ if (computing) {
+ return;
+ }
+ computing = true;
+ yuvBytes[0] = bytes;
+ try {
+ // Initialize the storage bitmaps once when the resolution is known.
+ if (rgbBytes == null) {
+ Camera.Size previewSize = camera.getParameters().getPreviewSize();
+ previewHeight = previewSize.height;
+ previewWidth = previewSize.width;
+ rgbBytes = new int[previewWidth * previewHeight];
+ onPreviewSizeChosen(new Size(previewSize.width, previewSize.height), 90);
+ }
+ ImageUtils.convertYUV420SPToARGB8888(bytes, rgbBytes, previewWidth, previewHeight, false);
+ } catch (final Exception e) {
+ LOGGER.e(e, "Exception!");
+ return;
+ }
+ postInferenceCallback = new Runnable() {
+ @Override
+ public void run() {
+ camera.addCallbackBuffer(bytes);
+ }
+ };
+ processImageRGBbytes(rgbBytes);
+ }
+
+ /**
+ * Callback for Camera2 API
+ */
+ @Override
+ public void onImageAvailable(final ImageReader reader) {
+ Image image = null;
+ //We need wait until we have some size from onPreviewSizeChosen
+ if (previewWidth == 0 || previewHeight == 0) {
+ return;
+ }
+ rgbBytes = new int[previewWidth * previewHeight];
+ try {
+ image = reader.acquireLatestImage();
+
+ if (image == null) {
+ return;
+ }
+
+ if (computing) {
+ image.close();
+ return;
+ }
+ computing = true;
+ Trace.beginSection("imageAvailable");
+ final Plane[] planes = image.getPlanes();
+ fillBytes(planes, yuvBytes);
+ yRowStride = planes[0].getRowStride();
+ final int uvRowStride = planes[1].getRowStride();
+ final int uvPixelStride = planes[1].getPixelStride();
+ ImageUtils.convertYUV420ToARGB8888(
+ yuvBytes[0],
+ yuvBytes[1],
+ yuvBytes[2],
+ rgbBytes,
+ previewWidth,
+ previewHeight,
+ yRowStride,
+ uvRowStride,
+ uvPixelStride,
+ false);
+ image.close();
+
+ } catch (final Exception e) {
+ if (image != null) {
+ image.close();
+ }
+ LOGGER.e(e, "Exception!");
+ Trace.endSection();
+ return;
+ }
+ processImageRGBbytes(rgbBytes);
+ Trace.endSection();
+ }
+
@Override
public synchronized void onStart() {
LOGGER.d("onStart " + this);
@@ -123,8 +239,8 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
switch (requestCode) {
case PERMISSIONS_REQUEST: {
if (grantResults.length > 0
- && grantResults[0] == PackageManager.PERMISSION_GRANTED
- && grantResults[1] == PackageManager.PERMISSION_GRANTED) {
+ && grantResults[0] == PackageManager.PERMISSION_GRANTED
+ && grantResults[1] == PackageManager.PERMISSION_GRANTED) {
setFragment();
} else {
requestPermission();
@@ -135,7 +251,8 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
private boolean hasPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
- return checkSelfPermission(PERMISSION_CAMERA) == PackageManager.PERMISSION_GRANTED && checkSelfPermission(PERMISSION_STORAGE) == PackageManager.PERMISSION_GRANTED;
+ return checkSelfPermission(PERMISSION_CAMERA) == PackageManager.PERMISSION_GRANTED &&
+ checkSelfPermission(PERMISSION_STORAGE) == PackageManager.PERMISSION_GRANTED;
} else {
return true;
}
@@ -143,25 +260,80 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
private void requestPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
- if (shouldShowRequestPermissionRationale(PERMISSION_CAMERA) || shouldShowRequestPermissionRationale(PERMISSION_STORAGE)) {
- Toast.makeText(CameraActivity.this, "Camera AND storage permission are required for this demo", Toast.LENGTH_LONG).show();
+ if (shouldShowRequestPermissionRationale(PERMISSION_CAMERA) ||
+ shouldShowRequestPermissionRationale(PERMISSION_STORAGE)) {
+ Toast.makeText(CameraActivity.this,
+ "Camera AND storage permission are required for this demo", Toast.LENGTH_LONG).show();
}
requestPermissions(new String[] {PERMISSION_CAMERA, PERMISSION_STORAGE}, PERMISSIONS_REQUEST);
}
}
+ // Returns true if the device supports the required hardware level, or better.
+ boolean isHardwareLevelSupported(CameraCharacteristics characteristics, int requiredLevel) {
+ int deviceLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL);
+ if (deviceLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
+ return requiredLevel == deviceLevel;
+ }
+ // deviceLevel is not LEGACY, can use numerical sort
+ return requiredLevel <= deviceLevel;
+ }
+
+ private String chooseCamera() {
+ final CameraManager manager = (CameraManager) getSystemService(Context.CAMERA_SERVICE);
+ try {
+ for (final String cameraId : manager.getCameraIdList()) {
+ final CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);
+
+ // We don't use a front facing camera in this sample.
+ final Integer facing = characteristics.get(CameraCharacteristics.LENS_FACING);
+ if (facing != null && facing == CameraCharacteristics.LENS_FACING_FRONT) {
+ continue;
+ }
+
+ final StreamConfigurationMap map =
+ characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
+
+ if (map == null) {
+ continue;
+ }
+
+ useCamera2API = isHardwareLevelSupported(characteristics,
+ CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL);
+ LOGGER.i("Camera API lv2?: %s", useCamera2API);
+ return cameraId;
+ }
+ } catch (CameraAccessException e) {
+ LOGGER.e(e, "Not allowed to access camera");
+ }
+
+ return null;
+ }
+
protected void setFragment() {
- final Fragment fragment =
- CameraConnectionFragment.newInstance(
- new CameraConnectionFragment.ConnectionCallback() {
- @Override
- public void onPreviewSizeChosen(final Size size, final int rotation) {
- CameraActivity.this.onPreviewSizeChosen(size, rotation);
- }
- },
- this,
- getLayoutId(),
- getDesiredPreviewFrameSize());
+ String cameraId = chooseCamera();
+
+ Fragment fragment;
+ if (useCamera2API) {
+ CameraConnectionFragment camera2Fragment =
+ CameraConnectionFragment.newInstance(
+ new CameraConnectionFragment.ConnectionCallback() {
+ @Override
+ public void onPreviewSizeChosen(final Size size, final int rotation) {
+ previewHeight = size.getHeight();
+ previewWidth = size.getWidth();
+ CameraActivity.this.onPreviewSizeChosen(size, rotation);
+ }
+ },
+ this,
+ getLayoutId(),
+ getDesiredPreviewFrameSize());
+
+ camera2Fragment.setCamera(cameraId);
+ fragment = camera2Fragment;
+ } else {
+ fragment = new LegacyCameraConnectionFragment(this, getLayoutId());
+ }
getFragmentManager()
.beginTransaction()
@@ -213,6 +385,7 @@ public abstract class CameraActivity extends Activity implements OnImageAvailabl
return super.onKeyDown(keyCode, event);
}
+ protected abstract void processImageRGBbytes(int[] rgbBytes ) ;
protected abstract void onPreviewSizeChosen(final Size size, final int rotation);
protected abstract int getLayoutId();
protected abstract Size getDesiredPreviewFrameSize();
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
index 76bd61d00f..986f2777b2 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraConnectionFragment.java
@@ -353,58 +353,44 @@ public class CameraConnectionFragment extends Fragment {
super.onPause();
}
+ public void setCamera(String cameraId) {
+ this.cameraId = cameraId;
+ }
+
/**
* Sets up member variables related to camera.
- *
- * @param width The width of available size for camera preview
- * @param height The height of available size for camera preview
*/
- private void setUpCameraOutputs(final int width, final int height) {
+ private void setUpCameraOutputs() {
final Activity activity = getActivity();
final CameraManager manager = (CameraManager) activity.getSystemService(Context.CAMERA_SERVICE);
try {
- for (final String cameraId : manager.getCameraIdList()) {
- final CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);
-
- // We don't use a front facing camera in this sample.
- final Integer facing = characteristics.get(CameraCharacteristics.LENS_FACING);
- if (facing != null && facing == CameraCharacteristics.LENS_FACING_FRONT) {
- continue;
- }
-
- final StreamConfigurationMap map =
- characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
-
- if (map == null) {
- continue;
- }
-
- // For still image captures, we use the largest available size.
- final Size largest =
- Collections.max(
- Arrays.asList(map.getOutputSizes(ImageFormat.YUV_420_888)),
- new CompareSizesByArea());
-
- sensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
-
- // Danger, W.R.! Attempting to use too large a preview size could exceed the camera
- // bus' bandwidth limitation, resulting in gorgeous previews but the storage of
- // garbage capture data.
- previewSize =
- chooseOptimalSize(
- map.getOutputSizes(SurfaceTexture.class),
- inputSize.getWidth(),
- inputSize.getHeight());
-
- // We fit the aspect ratio of TextureView to the size of preview we picked.
- final int orientation = getResources().getConfiguration().orientation;
- if (orientation == Configuration.ORIENTATION_LANDSCAPE) {
- textureView.setAspectRatio(previewSize.getWidth(), previewSize.getHeight());
- } else {
- textureView.setAspectRatio(previewSize.getHeight(), previewSize.getWidth());
- }
-
- CameraConnectionFragment.this.cameraId = cameraId;
+ final CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);
+
+ final StreamConfigurationMap map =
+ characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
+
+ // For still image captures, we use the largest available size.
+ final Size largest =
+ Collections.max(
+ Arrays.asList(map.getOutputSizes(ImageFormat.YUV_420_888)),
+ new CompareSizesByArea());
+
+ sensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
+
+ // Danger, W.R.! Attempting to use too large a preview size could exceed the camera
+ // bus' bandwidth limitation, resulting in gorgeous previews but the storage of
+ // garbage capture data.
+ previewSize =
+ chooseOptimalSize(map.getOutputSizes(SurfaceTexture.class),
+ inputSize.getWidth(),
+ inputSize.getHeight());
+
+ // We fit the aspect ratio of TextureView to the size of preview we picked.
+ final int orientation = getResources().getConfiguration().orientation;
+ if (orientation == Configuration.ORIENTATION_LANDSCAPE) {
+ textureView.setAspectRatio(previewSize.getWidth(), previewSize.getHeight());
+ } else {
+ textureView.setAspectRatio(previewSize.getHeight(), previewSize.getWidth());
}
} catch (final CameraAccessException e) {
LOGGER.e(e, "Exception!");
@@ -425,7 +411,7 @@ public class CameraConnectionFragment extends Fragment {
* Opens the camera specified by {@link CameraConnectionFragment#cameraId}.
*/
private void openCamera(final int width, final int height) {
- setUpCameraOutputs(width, height);
+ setUpCameraOutputs();
configureTransform(width, height);
final Activity activity = getActivity();
final CameraManager manager = (CameraManager) activity.getSystemService(Context.CAMERA_SERVICE);
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
index bc39126925..ab48e2265b 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/ClassifierActivity.java
@@ -22,21 +22,21 @@ import android.graphics.Canvas;
import android.graphics.Matrix;
import android.graphics.Paint;
import android.graphics.Typeface;
-import android.media.Image;
-import android.media.Image.Plane;
-import android.media.ImageReader;
+
import android.media.ImageReader.OnImageAvailableListener;
import android.os.SystemClock;
-import android.os.Trace;
import android.util.Size;
import android.util.TypedValue;
import android.view.Display;
+
import java.util.List;
import java.util.Vector;
import org.tensorflow.demo.OverlayView.DrawCallback;
import org.tensorflow.demo.env.BorderedText;
import org.tensorflow.demo.env.ImageUtils;
import org.tensorflow.demo.env.Logger;
+
+// Explicit import needed for internal Google builds.
import org.tensorflow.demo.R;
public class ClassifierActivity extends CameraActivity implements OnImageAvailableListener {
@@ -64,39 +64,25 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
private static final String INPUT_NAME = "input";
private static final String OUTPUT_NAME = "output";
+
private static final String MODEL_FILE = "file:///android_asset/tensorflow_inception_graph.pb";
private static final String LABEL_FILE =
"file:///android_asset/imagenet_comp_graph_label_strings.txt";
- private static final boolean SAVE_PREVIEW_BITMAP = false;
private static final boolean MAINTAIN_ASPECT = true;
private static final Size DESIRED_PREVIEW_SIZE = new Size(640, 480);
- private Classifier classifier;
private Integer sensorOrientation;
-
- private int previewWidth = 0;
- private int previewHeight = 0;
- private byte[][] yuvBytes;
- private int[] rgbBytes = null;
- private Bitmap rgbFrameBitmap = null;
- private Bitmap croppedBitmap = null;
-
- private Bitmap cropCopyBitmap;
-
- private boolean computing = false;
-
+ private Classifier classifier;
private Matrix frameToCropTransform;
private Matrix cropToFrameTransform;
- private ResultsView resultsView;
private BorderedText borderedText;
- private long lastProcessingTimeMs;
@Override
protected int getLayoutId() {
@@ -112,9 +98,8 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
- final float textSizePx =
- TypedValue.applyDimension(
- TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
+ final float textSizePx = TypedValue.applyDimension(
+ TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
@@ -129,7 +114,6 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
INPUT_NAME,
OUTPUT_NAME);
- resultsView = (ResultsView) findViewById(R.id.results);
previewWidth = size.getWidth();
previewHeight = size.getHeight();
@@ -141,15 +125,13 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
sensorOrientation = rotation + screenOrientation;
LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
- rgbBytes = new int[previewWidth * previewHeight];
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Config.ARGB_8888);
- frameToCropTransform =
- ImageUtils.getTransformationMatrix(
- previewWidth, previewHeight,
- INPUT_SIZE, INPUT_SIZE,
- sensorOrientation, MAINTAIN_ASPECT);
+ frameToCropTransform = ImageUtils.getTransformationMatrix(
+ previewWidth, previewHeight,
+ INPUT_SIZE, INPUT_SIZE,
+ sensorOrientation, MAINTAIN_ASPECT);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
@@ -165,52 +147,7 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
});
}
- @Override
- public void onImageAvailable(final ImageReader reader) {
- Image image = null;
-
- try {
- image = reader.acquireLatestImage();
-
- if (image == null) {
- return;
- }
-
- if (computing) {
- image.close();
- return;
- }
- computing = true;
-
- Trace.beginSection("imageAvailable");
-
- final Plane[] planes = image.getPlanes();
- fillBytes(planes, yuvBytes);
-
- final int yRowStride = planes[0].getRowStride();
- final int uvRowStride = planes[1].getRowStride();
- final int uvPixelStride = planes[1].getPixelStride();
- ImageUtils.convertYUV420ToARGB8888(
- yuvBytes[0],
- yuvBytes[1],
- yuvBytes[2],
- previewWidth,
- previewHeight,
- yRowStride,
- uvRowStride,
- uvPixelStride,
- rgbBytes);
-
- image.close();
- } catch (final Exception e) {
- if (image != null) {
- image.close();
- }
- LOGGER.e(e, "Exception!");
- Trace.endSection();
- return;
- }
-
+ protected void processImageRGBbytes(int[] rgbBytes ) {
rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
@@ -219,7 +156,6 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
if (SAVE_PREVIEW_BITMAP) {
ImageUtils.saveBitmap(croppedBitmap);
}
-
runInBackground(
new Runnable() {
@Override
@@ -227,15 +163,19 @@ public class ClassifierActivity extends CameraActivity implements OnImageAvailab
final long startTime = SystemClock.uptimeMillis();
final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
-
+ LOGGER.i("Detect: %s", results);
cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
+ if (resultsView==null) {
+ resultsView = (ResultsView) findViewById(R.id.results);
+ }
resultsView.setResults(results);
requestRender();
computing = false;
+ if (postInferenceCallback != null) {
+ postInferenceCallback.run();
+ }
}
});
-
- Trace.endSection();
}
@Override
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java
index 5800f80651..acace0eace 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/DetectorActivity.java
@@ -66,7 +66,7 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
// must be manually placed in the assets/ directory by the user.
// Graphs and models downloaded from http://pjreddie.com/darknet/yolo/ may be converted e.g. via
// DarkFlow (https://github.com/thtrieu/darkflow). Sample command:
- // ./flow --model cfg/tiny-yolo-voc.cfg --load bin/tiny-yolo-voc.weights --savepb --verbalise=True
+ // ./flow --model cfg/tiny-yolo-voc.cfg --load bin/tiny-yolo-voc.weights --savepb --verbalise
private static final String YOLO_MODEL_FILE = "file:///android_asset/graph-tiny-yolo-voc.pb";
private static final int YOLO_INPUT_SIZE = 416;
private static final String YOLO_INPUT_NAME = "input";
@@ -126,6 +126,7 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
tracker = new MultiBoxTracker(this);
+
if (USE_YOLO) {
detector =
TensorFlowYoloDetector.create(
@@ -270,15 +271,17 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
ImageUtils.convertYUV420ToARGB8888(
- yuvBytes[0],
- yuvBytes[1],
- yuvBytes[2],
- previewWidth,
- previewHeight,
- yRowStride,
- uvRowStride,
- uvPixelStride,
- rgbBytes);
+ yuvBytes[0],
+ yuvBytes[1],
+ yuvBytes[2],
+ rgbBytes,
+ previewWidth,
+ previewHeight,
+ yRowStride,
+ uvRowStride,
+ uvPixelStride,
+ false);
+
image.close();
} catch (final Exception e) {
@@ -344,6 +347,8 @@ public class DetectorActivity extends CameraActivity implements OnImageAvailable
Trace.endSection();
}
+ protected void processImageRGBbytes(int[] rgbBytes ) {}
+
@Override
protected int getLayoutId() {
return R.layout.camera_connection_fragment_tracking;
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/LegacyCameraConnectionFragment.java b/tensorflow/examples/android/src/org/tensorflow/demo/LegacyCameraConnectionFragment.java
new file mode 100644
index 0000000000..e5b3eeeceb
--- /dev/null
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/LegacyCameraConnectionFragment.java
@@ -0,0 +1,208 @@
+package org.tensorflow.demo;
+
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import android.app.Fragment;
+import android.graphics.SurfaceTexture;
+import android.os.Bundle;
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.util.SparseIntArray;
+import android.view.LayoutInflater;
+import android.view.Surface;
+import android.view.TextureView;
+import android.view.View;
+import android.view.ViewGroup;
+
+import java.io.IOException;
+
+import android.hardware.Camera;
+import android.hardware.Camera.CameraInfo;
+
+import org.tensorflow.demo.env.Logger;
+
+// Explicit import needed for internal Google builds.
+import org.tensorflow.demo.R;
+
+public class LegacyCameraConnectionFragment extends Fragment {
+
+ private Camera camera;
+ private static final Logger LOGGER = new Logger();
+ private Camera.PreviewCallback imageListener;
+
+ /**
+ * The layout identifier to inflate for this Fragment.
+ */
+ private int layout;
+
+ public LegacyCameraConnectionFragment(
+ final Camera.PreviewCallback imageListener,
+ final int layout) {
+ this.imageListener = imageListener;
+ this.layout = layout;
+ }
+
+ /**
+ * Conversion from screen rotation to JPEG orientation.
+ */
+ private static final SparseIntArray ORIENTATIONS = new SparseIntArray();
+
+ static {
+ ORIENTATIONS.append(Surface.ROTATION_0, 90);
+ ORIENTATIONS.append(Surface.ROTATION_90, 0);
+ ORIENTATIONS.append(Surface.ROTATION_180, 270);
+ ORIENTATIONS.append(Surface.ROTATION_270, 180);
+ }
+
+ /**
+ * {@link android.view.TextureView.SurfaceTextureListener} handles several lifecycle events on a
+ * {@link TextureView}.
+ */
+ private final TextureView.SurfaceTextureListener surfaceTextureListener =
+ new TextureView.SurfaceTextureListener() {
+ @Override
+ public void onSurfaceTextureAvailable(
+ final SurfaceTexture texture, final int width, final int height) {
+
+ int index = getCameraId();
+ camera = Camera.open(index);
+
+ try {
+ Camera.Parameters parameters = camera.getParameters();
+ parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE);
+
+ camera.setDisplayOrientation(90);
+ camera.setParameters(parameters);
+ camera.setPreviewTexture(texture);
+ } catch (IOException exception) {
+ camera.release();
+ }
+
+ camera.setPreviewCallbackWithBuffer(imageListener);
+ Camera.Size s = camera.getParameters().getPreviewSize();
+ int bufferSize = s.height * s.width * 3 / 2;
+ camera.addCallbackBuffer(new byte[bufferSize]);
+
+ textureView.setAspectRatio(s.height, s.width);
+
+ camera.startPreview();
+ }
+
+ @Override
+ public void onSurfaceTextureSizeChanged(
+ final SurfaceTexture texture, final int width, final int height) {
+ }
+
+ @Override
+ public boolean onSurfaceTextureDestroyed(final SurfaceTexture texture) {
+ return true;
+ }
+
+ @Override
+ public void onSurfaceTextureUpdated(final SurfaceTexture texture) {
+ }
+ };
+
+ /**
+ * An {@link AutoFitTextureView} for camera preview.
+ */
+ private AutoFitTextureView textureView;
+
+ /**
+ * An additional thread for running tasks that shouldn't block the UI.
+ */
+ private HandlerThread backgroundThread;
+
+ @Override
+ public View onCreateView(
+ final LayoutInflater inflater, final ViewGroup container, final Bundle savedInstanceState) {
+ return inflater.inflate(layout, container, false);
+ }
+
+ @Override
+ public void onViewCreated(final View view, final Bundle savedInstanceState) {
+ textureView = (AutoFitTextureView) view.findViewById(R.id.texture);
+ }
+
+ @Override
+ public void onActivityCreated(final Bundle savedInstanceState) {
+ super.onActivityCreated(savedInstanceState);
+ }
+
+ @Override
+ public void onResume() {
+ super.onResume();
+ startBackgroundThread();
+ // When the screen is turned off and turned back on, the SurfaceTexture is already
+ // available, and "onSurfaceTextureAvailable" will not be called. In that case, we can open
+ // a camera and start preview from here (otherwise, we wait until the surface is ready in
+ // the SurfaceTextureListener).
+
+ if (textureView.isAvailable()) {
+ camera.startPreview();
+ } else {
+ textureView.setSurfaceTextureListener(surfaceTextureListener);
+ }
+ }
+
+ @Override
+ public void onPause() {
+ stopCamera();
+ stopBackgroundThread();
+ super.onPause();
+ }
+
+ /**
+ * Starts a background thread and its {@link Handler}.
+ */
+ private void startBackgroundThread() {
+ backgroundThread = new HandlerThread("CameraBackground");
+ backgroundThread.start();
+ }
+
+ /**
+ * Stops the background thread and its {@link Handler}.
+ */
+ private void stopBackgroundThread() {
+ backgroundThread.quitSafely();
+ try {
+ backgroundThread.join();
+ backgroundThread = null;
+ } catch (final InterruptedException e) {
+ LOGGER.e(e, "Exception!");
+ }
+ }
+
+ protected void stopCamera() {
+ if (camera != null) {
+ camera.stopPreview();
+ camera.setPreviewCallback(null);
+ camera.release();
+ camera = null;
+ }
+ }
+
+ private int getCameraId() {
+ CameraInfo ci = new CameraInfo();
+ for (int i = 0; i < Camera.getNumberOfCameras(); i++) {
+ Camera.getCameraInfo(i, ci);
+ if (ci.facing == CameraInfo.CAMERA_FACING_BACK)
+ return i;
+ }
+ return -1; // No camera found
+ }
+}
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java
index 7afe2bf541..58dd5c6069 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/StylizeActivity.java
@@ -28,6 +28,7 @@ import android.graphics.Paint;
import android.graphics.Paint.Style;
import android.graphics.Rect;
import android.graphics.Typeface;
+import android.hardware.Camera;
import android.media.Image;
import android.media.Image.Plane;
import android.media.ImageReader;
@@ -58,6 +59,8 @@ import org.tensorflow.demo.OverlayView.DrawCallback;
import org.tensorflow.demo.env.BorderedText;
import org.tensorflow.demo.env.ImageUtils;
import org.tensorflow.demo.env.Logger;
+
+// Explicit import needed for internal Google builds.
import org.tensorflow.demo.R;
/**
@@ -97,10 +100,6 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
private int previewWidth = 0;
private int previewHeight = 0;
- private byte[][] yuvBytes;
- private int[] rgbBytes = null;
- private Bitmap rgbFrameBitmap = null;
- private Bitmap croppedBitmap = null;
private final float[] styleVals = new float[NUM_STYLES];
private int[] intValues;
@@ -108,18 +107,13 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
private int frameNum = 0;
- private Bitmap cropCopyBitmap;
private Bitmap textureCopyBitmap;
- private boolean computing = false;
-
private Matrix frameToCropTransform;
private Matrix cropToFrameTransform;
private BorderedText borderedText;
- private long lastProcessingTimeMs;
-
private TensorFlowInferenceInterface inferenceInterface;
private int lastOtherStyle = 1;
@@ -363,9 +357,8 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
- final float textSizePx =
- TypedValue.applyDimension(
- TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
+ final float textSizePx = TypedValue.applyDimension(
+ TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
@@ -393,7 +386,6 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
grid = (GridView) findViewById(R.id.grid_layout);
grid.setAdapter(adapter);
grid.setOnTouchListener(gridTouchAdapter);
-
setStyle(adapter.items[0], 1.0f);
}
@@ -455,78 +447,42 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
}
}
- @Override
- public void onImageAvailable(final ImageReader reader) {
- Image image = null;
-
- try {
- image = reader.acquireLatestImage();
-
- if (image == null) {
- return;
- }
-
- if (computing) {
- image.close();
- return;
- }
-
- if (desiredSize != initializedSize) {
- LOGGER.i(
- "Initializing at size preview size %dx%d, stylize size %d",
- previewWidth, previewHeight, desiredSize);
- rgbBytes = new int[previewWidth * previewHeight];
- rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
- croppedBitmap = Bitmap.createBitmap(desiredSize, desiredSize, Config.ARGB_8888);
-
- frameToCropTransform =
- ImageUtils.getTransformationMatrix(
- previewWidth, previewHeight,
- desiredSize, desiredSize,
- sensorOrientation, true);
-
- cropToFrameTransform = new Matrix();
- frameToCropTransform.invert(cropToFrameTransform);
-
- yuvBytes = new byte[3][];
-
- intValues = new int[desiredSize * desiredSize];
- floatValues = new float[desiredSize * desiredSize * 3];
- initializedSize = desiredSize;
- }
-
- computing = true;
-
- Trace.beginSection("imageAvailable");
-
- final Plane[] planes = image.getPlanes();
- fillBytes(planes, yuvBytes);
+ private void resetPreviewBuffers() {
+ croppedBitmap = Bitmap.createBitmap(desiredSize, desiredSize, Config.ARGB_8888);
- final int yRowStride = planes[0].getRowStride();
- final int uvRowStride = planes[1].getRowStride();
- final int uvPixelStride = planes[1].getPixelStride();
+ frameToCropTransform = ImageUtils.getTransformationMatrix(
+ previewWidth, previewHeight,
+ desiredSize, desiredSize,
+ sensorOrientation, true);
- ImageUtils.convertYUV420ToARGB8888(
- yuvBytes[0],
- yuvBytes[1],
- yuvBytes[2],
- previewWidth,
- previewHeight,
- yRowStride,
- uvRowStride,
- uvPixelStride,
- rgbBytes);
+ cropToFrameTransform = new Matrix();
+ frameToCropTransform.invert(cropToFrameTransform);
+ yuvBytes = new byte[3][];
+ intValues = new int[desiredSize * desiredSize];
+ floatValues = new float[desiredSize * desiredSize * 3];
+ initializedSize = desiredSize;
+ }
- image.close();
- } catch (final Exception e) {
- if (image != null) {
- image.close();
- }
- LOGGER.e(e, "Exception!");
- Trace.endSection();
- return;
+ protected void processImageRGBbytes(int[] rgbBytes ) {
+ if (desiredSize != initializedSize) {
+ LOGGER.i(
+ "Initializing at size preview size %dx%d, stylize size %d",
+ previewWidth, previewHeight, desiredSize);
+
+ rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
+ croppedBitmap = Bitmap.createBitmap(desiredSize, desiredSize, Config.ARGB_8888);
+ frameToCropTransform = ImageUtils.getTransformationMatrix(
+ previewWidth, previewHeight,
+ desiredSize, desiredSize,
+ sensorOrientation, true);
+
+ cropToFrameTransform = new Matrix();
+ frameToCropTransform.invert(cropToFrameTransform);
+ yuvBytes = new byte[3][];
+ intValues = new int[desiredSize * desiredSize];
+ floatValues = new float[desiredSize * desiredSize * 3];
+ initializedSize = desiredSize;
}
-
rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
@@ -536,24 +492,24 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
ImageUtils.saveBitmap(croppedBitmap);
}
- runInBackground(
- new Runnable() {
- @Override
- public void run() {
- cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
-
- final long startTime = SystemClock.uptimeMillis();
- stylizeImage(croppedBitmap);
- lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
-
- textureCopyBitmap = Bitmap.createBitmap(croppedBitmap);
-
- requestRender();
- computing = false;
- }
- });
-
- Trace.endSection();
+ runInBackground(new Runnable() {
+ @Override
+ public void run() {
+ cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
+ final long startTime = SystemClock.uptimeMillis();
+ stylizeImage(croppedBitmap);
+ lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
+ textureCopyBitmap = Bitmap.createBitmap(croppedBitmap);
+ requestRender();
+ computing = false;
+ if (postInferenceCallback != null) {
+ postInferenceCallback.run();
+ }
+ }
+ });
+ if (desiredSize != initializedSize) {
+ resetPreviewBuffers();
+ }
}
private void stylizeImage(final Bitmap bitmap) {
@@ -584,6 +540,7 @@ public class StylizeActivity extends CameraActivity implements OnImageAvailableL
}
// Copy the input data into TensorFlow.
+ LOGGER.i("Width: %s , Height: %s",bitmap.getWidth(),bitmap.getHeight());
inferenceInterface.feed(
INPUT_NODE, floatValues, 1, bitmap.getWidth(), bitmap.getHeight(), 3);
inferenceInterface.feed(STYLE_NODE, styleVals, NUM_STYLES);
diff --git a/tensorflow/go/example_inception_inference_test.go b/tensorflow/go/example_inception_inference_test.go
index 682bd245cc..2162fbe484 100644
--- a/tensorflow/go/example_inception_inference_test.go
+++ b/tensorflow/go/example_inception_inference_test.go
@@ -63,7 +63,7 @@ func Example() {
// this example:
// - Constructs another TensorFlow graph to normalize the image into a
// form suitable for the model (for example, resizing the image)
- // - Creates an executes a Session to obtain a Tensor in this normalized form.
+ // - Creates and executes a Session to obtain a Tensor in this normalized form.
modeldir := flag.String("dir", "", "Directory containing the trained model files. The directory will be created and the model downloaded into it if necessary")
imagefile := flag.String("image", "", "Path of a JPEG-image to extract labels for")
flag.Parse()
diff --git a/tensorflow/python/debug/lib/debug_data.py b/tensorflow/python/debug/lib/debug_data.py
index 3335657a61..a51d8a7774 100644
--- a/tensorflow/python/debug/lib/debug_data.py
+++ b/tensorflow/python/debug/lib/debug_data.py
@@ -1397,7 +1397,7 @@ class DebugDumpDir(object):
Args:
node_name: Name of the node in question.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
Attributes of the node.
@@ -1419,7 +1419,7 @@ class DebugDumpDir(object):
is_control: (`bool`) Whether control inputs, rather than non-control
inputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) inputs to the node, as a list of node names.
@@ -1455,7 +1455,7 @@ class DebugDumpDir(object):
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
@@ -1524,7 +1524,7 @@ class DebugDumpDir(object):
the source (e.g., A in this case). So the reverse direction of the ref
edge reflects the direction of information flow.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
A path from the src_node_name to dst_node_name, as a `list` of `str`, if
@@ -1581,7 +1581,7 @@ class DebugDumpDir(object):
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
@@ -1675,7 +1675,7 @@ class DebugDumpDir(object):
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
(`str`) op type of the node.
@@ -1698,7 +1698,7 @@ class DebugDumpDir(object):
Args:
node_name: (`str`) name of the node.
device_name: (`str`) name of the device. If there is only one device or if
- node_name exists on only one device, this argumnet is optional.
+ node_name exists on only one device, this argument is optional.
Returns:
(`list` of `str`) all debug tensor watch keys. Returns an empty list if
@@ -1732,7 +1732,7 @@ class DebugDumpDir(object):
Args:
debug_watch_key: (`str`) debug watch key.
device_name: (`str`) name of the device. If there is only one device or if
- the specified debug_watch_key exists on only one device, this argumnet
+ the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
@@ -1813,7 +1813,7 @@ class DebugDumpDir(object):
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
- the specified debug_watch_key exists on only one device, this argumnet
+ the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
@@ -1846,7 +1846,7 @@ class DebugDumpDir(object):
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
- the specified debug_watch_key exists on only one device, this argumnet
+ the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
@@ -1884,7 +1884,7 @@ class DebugDumpDir(object):
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
- the specified debug_watch_key exists on only one device, this argumnet
+ the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
@@ -1918,7 +1918,7 @@ class DebugDumpDir(object):
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
- the specified debug_watch_key exists on only one device, this argumnet
+ the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
diff --git a/tensorflow/stream_executor/lib/demangle.cc b/tensorflow/stream_executor/lib/demangle.cc
index 8dea7534e5..fa2b4fa005 100644
--- a/tensorflow/stream_executor/lib/demangle.cc
+++ b/tensorflow/stream_executor/lib/demangle.cc
@@ -41,7 +41,7 @@ string Demangle(const char *mangled) {
#if HAS_CXA_DEMANGLE
result = abi::__cxa_demangle(mangled, nullptr, nullptr, &status);
#endif
- if (status == 0 && result != nullptr) { // Demangling succeeeded.
+ if (status == 0 && result != nullptr) { // Demangling succeeded.
demangled.append(result);
free(result);
}
diff --git a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
index 43ba52f983..0b30f7b4d1 100644
--- a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
+++ b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
@@ -13,7 +13,7 @@
# limitations under the License.
#
# ==============================================================================
-"""A visitor class that generates protobufs for each pyton object."""
+"""A visitor class that generates protobufs for each python object."""
from __future__ import absolute_import
from __future__ import division
diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh
index b5cfa1e185..3fc61a66f5 100755
--- a/tensorflow/tools/ci_build/builds/pip.sh
+++ b/tensorflow/tools/ci_build/builds/pip.sh
@@ -354,7 +354,7 @@ do_virtualenv_pip_test() {
# Create virtualenv directory for install test
VENV_DIR="${PIP_TEST_ROOT}/venv"
create_activate_virtualenv_and_install_tensorflow \
- "${CLEAN_VENV_DIR}" "${WHL_PATH}"
+ "${VENV_DIR}" "${WHL_PATH}"
# Install extra pip packages required by the test-on-install
for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do
diff --git a/tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb b/tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb
index c9f2b1ab9e..fddb624853 100644
--- a/tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb
+++ b/tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb
@@ -1149,7 +1149,7 @@
"\n",
"Here, we'll do some customizations depending on which graph we're constructing. `train_prediction` holds the training graph, for which we use cross-entropy loss and weight regularization. We'll adjust the learning rate during training -- that's handled by the `exponential_decay` operation, which is itself an argument to the `MomentumOptimizer` that performs the actual training.\n",
"\n",
- "The vaildation and prediction graphs are much simpler the generate -- we need only create copies of the model with the validation and test inputs and a softmax classifier as the output."
+ "The validation and prediction graphs are much simpler to generate -- we need only create copies of the model with the validation and test inputs and a softmax classifier as the output."
]
},
{
diff --git a/tensorflow/tools/gcs_test/python/gcs_smoke.py b/tensorflow/tools/gcs_test/python/gcs_smoke.py
index 51933a52a6..9882f75a8a 100644
--- a/tensorflow/tools/gcs_test/python/gcs_smoke.py
+++ b/tensorflow/tools/gcs_test/python/gcs_smoke.py
@@ -50,55 +50,135 @@ def create_examples(num_examples, input_mean):
return examples
def create_dir_test():
- """Verifies file_io directory handling methods ."""
+ """Verifies file_io directory handling methods."""
- starttime = int(round(time.time() * 1000))
- dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime)
+ # Test directory creation.
+ starttime_ms = int(round(time.time() * 1000))
+ dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
- elapsed = int(round(time.time() * 1000)) - starttime
- print("Created directory in: %d milliseconds" % elapsed)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Created directory in: %d milliseconds" % elapsed_ms)
+
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
+ assert dir_exists
print("%s directory exists: %s" % (dir_name, dir_exists))
- # List contents of just created directory.
- print("Listing directory %s." % dir_name)
- starttime = int(round(time.time() * 1000))
- print(file_io.list_directory(dir_name))
- elapsed = int(round(time.time() * 1000)) - starttime
- print("Listed directory %s in %s milliseconds" % (dir_name, elapsed))
+ # Test recursive directory creation.
+ starttime_ms = int(round(time.time() * 1000))
+ recursive_dir_name = "%s/%s/%s" % (dir_name,
+ "nested_dir1",
+ "nested_dir2")
+ print("Creating recursive dir %s" % recursive_dir_name)
+ file_io.recursive_create_dir(recursive_dir_name)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Created directory recursively in: %d milliseconds" % elapsed_ms)
- # Delete directory.
- print("Deleting directory %s." % dir_name)
- starttime = int(round(time.time() * 1000))
+ # Check that the directory exists.
+ recursive_dir_exists = file_io.is_directory(recursive_dir_name)
+ assert recursive_dir_exists
+ print("%s directory exists: %s" % (recursive_dir_name, recursive_dir_exists))
+
+ # Create some contents in the just created directory and list the contents.
+ num_files = 10
+ files_to_create = ["file_%d.txt" % n for n in range(num_files)]
+ for file_num in files_to_create:
+ file_name = "%s/%s" % (dir_name, file_num)
+ print("Creating file %s." % file_name)
+ file_io.write_string_to_file(file_name, "test file.")
+
+ print("Listing directory %s." % dir_name)
+ starttime_ms = int(round(time.time() * 1000))
+ directory_contents = file_io.list_directory(dir_name)
+ print(directory_contents)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Listed directory %s in %s milliseconds" % (dir_name, elapsed_ms))
+ assert set(directory_contents) == set(files_to_create + ["nested_dir1/"])
+
+ # Test directory renaming.
+ dir_to_rename = "%s/old_dir" % dir_name
+ new_dir_name = "%s/new_dir" % dir_name
+ file_io.create_dir(dir_to_rename)
+ assert file_io.is_directory(dir_to_rename)
+ assert not file_io.is_directory(new_dir_name)
+
+ starttime_ms = int(round(time.time() * 1000))
+ print("Will try renaming directory %s to %s" % (dir_to_rename, new_dir_name))
+ file_io.rename(dir_to_rename, new_dir_name)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Renamed directory %s to %s in %s milliseconds" % (
+ dir_to_rename, new_dir_name, elapsed_ms))
+ assert not file_io.is_directory(dir_to_rename)
+ assert file_io.is_directory(new_dir_name)
+
+ # Test Delete directory recursively.
+ print("Deleting directory recursively %s." % dir_name)
+ starttime_ms = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
- elapsed = int(round(time.time() * 1000)) - starttime
- print("Deleted directory %s in %s milliseconds" % (dir_name, elapsed))
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ dir_exists = file_io.is_directory(dir_name)
+ assert not dir_exists
+ print("Deleted directory recursively %s in %s milliseconds" % (
+ dir_name, elapsed_ms))
def create_object_test():
"""Verifies file_io's object manipulation methods ."""
- starttime = int(round(time.time() * 1000))
- dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime)
+ starttime_ms = int(round(time.time() * 1000))
+ dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s." % dir_name)
file_io.create_dir(dir_name)
- # Create a file in this directory.
- file_name = "%s/test_file.txt" % dir_name
- print("Creating file %s." % file_name)
- file_io.write_string_to_file(file_name, "test file creation.")
-
+ num_files = 5
+ # Create files of 2 different patterns in this directory.
+ files_pattern_1 = ["%s/test_file_%d.txt" % (dir_name, n)
+ for n in range(num_files)]
+ files_pattern_2 = ["%s/testfile%d.txt" % (dir_name, n)
+ for n in range(num_files)]
+
+ starttime_ms = int(round(time.time() * 1000))
+ files_to_create = files_pattern_1 + files_pattern_2
+ for file_name in files_to_create:
+ print("Creating file %s." % file_name)
+ file_io.write_string_to_file(file_name, "test file creation.")
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Created %d files in %s milliseconds" % (len(files_to_create), elapsed_ms))
+
+ # Listing files of pattern1.
list_files_pattern = "%s/test_file*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
+ starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
+ assert set(files_list) == set(files_pattern_1)
- assert len(files_list) == 1
- assert files_list[0] == file_name
-
- # Cleanup test files.
- print("Deleting file %s." % file_name)
- file_io.delete_file(file_name)
+ # Listing files of pattern2.
+ list_files_pattern = "%s/testfile*.txt" % dir_name
+ print("Getting files matching pattern %s." % list_files_pattern)
+ starttime_ms = int(round(time.time() * 1000))
+ files_list = file_io.get_matching_files(list_files_pattern)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("Listed files in %s milliseconds" % elapsed_ms)
+ print(files_list)
+ assert set(files_list) == set(files_pattern_2)
+
+ # Test renaming file.
+ file_to_rename = "%s/oldname.txt" % dir_name
+ file_new_name = "%s/newname.txt" % dir_name
+ file_io.write_string_to_file(file_to_rename, "test file.")
+ assert file_io.file_exists(file_to_rename)
+ assert not file_io.file_exists(file_new_name)
+
+ print("Will try renaming file %s to %s" % (file_to_rename, file_new_name))
+ starttime_ms = int(round(time.time() * 1000))
+ file_io.rename(file_to_rename, file_new_name)
+ elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
+ print("File %s renamed to %s in %s milliseconds" % (
+ file_to_rename, file_new_name, elapsed_ms))
+ assert not file_io.file_exists(file_to_rename)
+ assert file_io.file_exists(file_new_name)
# Delete directory.
print("Deleting directory %s." % dir_name)
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 3dc2c5d63c..5b6ab44b59 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.3.0-rc0'
+_VERSION = '1.3.0-rc1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
@@ -60,6 +60,11 @@ if sys.version_info < (3, 4):
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
+ # We need to keep the TensorBoard command, even though the console script
+ # is now declared by the tensorboard pip package. If we remove the
+ # TensorBoard command, pip will inappropriately remove it during install,
+ # even though the command is not removed, just moved to a different wheel.
+ 'tensorboard = tensorboard.main:main',
]
# pylint: enable=line-too-long
diff --git a/third_party/boringssl/add_boringssl_s390x.patch b/third_party/boringssl/add_boringssl_s390x.patch
index 0b41a4aa96..9a34a59a1d 100644
--- a/third_party/boringssl/add_boringssl_s390x.patch
+++ b/third_party/boringssl/add_boringssl_s390x.patch
@@ -11,3 +11,123 @@ index 7a3adfb..88012ad 100644
#else
#error "Unknown target CPU"
#endif
+diff --git a/BUILD b/BUILD
+index 6b645e61..c90b7beb 100644
+--- a/BUILD
++++ b/BUILD
+@@ -40,29 +40,46 @@ config_setting(
+ values = {"cpu": "darwin"},
+ )
+
+-boringssl_copts = [
+- # Assembler option --noexecstack adds .note.GNU-stack to each object to
+- # ensure that binaries can be built with non-executable stack.
+- "-Wa,--noexecstack",
+-
+- # This is needed on Linux systems (at least) to get rwlock in pthread.
+- "-D_XOPEN_SOURCE=700",
+-
+- # This list of warnings should match those in the top-level CMakeLists.txt.
+- "-Wall",
+- "-Werror",
+- "-Wformat=2",
+- "-Wsign-compare",
+- "-Wmissing-field-initializers",
+- "-Wwrite-strings",
+- "-Wshadow",
+- "-fno-common",
+-
+- # Modern build environments should be able to set this to use atomic
+- # operations for reference counting rather than locks. However, it's
+- # known not to work on some Android builds.
+- # "-DOPENSSL_C11_ATOMIC",
+-] + select({
++config_setting(
++ name = "windows",
++ values = {"cpu": "x64_windows"},
++ visibility = ["//visibility:public"],
++)
++
++config_setting(
++ name = "windows_msvc",
++ values = {"cpu": "x64_windows_msvc"},
++ visibility = ["//visibility:public"],
++)
++
++boringssl_copts = select({
++ ":windows": [
++ "-DWIN32_LEAN_AND_MEAN",
++ ],
++ "//conditions:default": [
++ # Assembler option --noexecstack adds .note.GNU-stack to each object to
++ # ensure that binaries can be built with non-executable stack.
++ "-Wa,--noexecstack",
++
++ # This is needed on Linux systems (at least) to get rwlock in pthread.
++ "-D_XOPEN_SOURCE=700",
++
++ # This list of warnings should match those in the top-level CMakeLists.txt.
++ "-Wall",
++ "-Werror",
++ "-Wformat=2",
++ "-Wsign-compare",
++ "-Wmissing-field-initializers",
++ "-Wwrite-strings",
++ "-Wshadow",
++ "-fno-common",
++
++ # Modern build environments should be able to set this to use atomic
++ # operations for reference counting rather than locks. However, it's
++ # known not to work on some Android builds.
++ # "-DOPENSSL_C11_ATOMIC",
++ ],
++}) + select({
+ ":linux_x86_64": [],
+ ":mac_x86_64": [],
+ "//conditions:default": ["-DOPENSSL_NO_ASM"],
+@@ -75,18 +92,26 @@ crypto_sources_asm = select({
+ })
+
+ # For C targets only (not C++), compile with C11 support.
+-boringssl_copts_c11 = boringssl_copts + [
+- "-std=c11",
+- "-Wmissing-prototypes",
+- "-Wold-style-definition",
+- "-Wstrict-prototypes",
+-]
++boringssl_copts_c11 = boringssl_copts + select({
++ ":windows": [],
++ ":windows_msvc": [],
++ "//conditions:default": [
++ "-std=c11",
++ "-Wmissing-prototypes",
++ "-Wold-style-definition",
++ "-Wstrict-prototypes",
++ ],
++})
+
+ # For C targets only (not C++), compile with C11 support.
+-boringssl_copts_cxx = boringssl_copts + [
+- "-std=c++11",
+- "-Wmissing-declarations",
+-]
++boringssl_copts_cxx = boringssl_copts + select({
++ ":windows": [],
++ ":windows_msvc": [],
++ "//conditions:default": [
++ "-std=c++11",
++ "-Wmissing-declarations",
++ ],
++})
+
+ cc_library(
+ name = "crypto",
+@@ -96,6 +121,8 @@ cc_library(
+ includes = ["src/include"],
+ linkopts = select({
+ ":mac_x86_64": [],
++ ":windows": [],
++ ":windows_msvc": [],
+ "//conditions:default": ["-lpthread"],
+ }),
+ visibility = ["//visibility:public"],