aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/tools')
-rw-r--r--tensorflow/tools/api/tests/api_compatibility_test.py2
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cmake5
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh3
-rw-r--r--tensorflow/tools/def_file_filter/BUILD0
-rw-r--r--tensorflow/tools/def_file_filter/BUILD.tpl15
-rw-r--r--tensorflow/tools/def_file_filter/def_file_filter.py.tpl168
-rw-r--r--tensorflow/tools/def_file_filter/def_file_filter_configure.bzl56
-rw-r--r--tensorflow/tools/dist_test/README.md8
-rwxr-xr-xtensorflow/tools/dist_test/local_test.sh22
-rwxr-xr-xtensorflow/tools/git/gen_git_source.py7
-rw-r--r--tensorflow/tools/graph_transforms/BUILD1
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms.cc67
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc95
-rw-r--r--tensorflow/tools/pip_package/BUILD129
-rw-r--r--tensorflow/tools/pip_package/setup.py4
-rw-r--r--tensorflow/tools/test/upload_test_benchmarks.py9
16 files changed, 494 insertions, 97 deletions
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 5268bba3cc..baa7a0889d 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -247,6 +247,8 @@ class ApiCompatibilityTest(test.TestCase):
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
+ # TODO(annarev): Make slide_dataset available in API.
+ public_api_visitor.private_map['tf'] = ['slide_dataset']
traverse.traverse(api, public_api_visitor)
proto_dict = visitor.GetProtos()
diff --git a/tensorflow/tools/ci_build/Dockerfile.cmake b/tensorflow/tools/ci_build/Dockerfile.cmake
index ec90c83aac..d5dea4f3e4 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cmake
+++ b/tensorflow/tools/ci_build/Dockerfile.cmake
@@ -23,11 +23,12 @@ RUN /install/install_deb_packages.sh
RUN apt-get update
RUN apt-get install -y --no-install-recommends python-pip
+RUN pip install --upgrade wheel
RUN pip install --upgrade astor
RUN pip install --upgrade gast
RUN pip install --upgrade numpy
RUN pip install --upgrade termcolor
# Install golang
-RUN add-apt-repository -y ppa:ubuntu-lxc/lxd-stable
-RUN apt-get install -y golang
+RUN apt-get install -t xenial-backports -y golang-1.9
+ENV PATH=${PATH}:/usr/lib/go-1.9/bin
diff --git a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
index 8b8ba31a0d..40189a6d1b 100644
--- a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
@@ -65,4 +65,5 @@ bazel test -c opt $BUILD_OPTS -k --test_output=errors \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--test_tag_filters=-no_pip,-no_windows,-no_oss \
--build_tag_filters=-no_pip,-no_windows,-no_oss --build_tests_only \
- //${PY_TEST_DIR}/tensorflow/python/...
+ //${PY_TEST_DIR}/tensorflow/python/... \
+ //${PY_TEST_DIR}/tensorflow/contrib/...
diff --git a/tensorflow/tools/def_file_filter/BUILD b/tensorflow/tools/def_file_filter/BUILD
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tensorflow/tools/def_file_filter/BUILD
diff --git a/tensorflow/tools/def_file_filter/BUILD.tpl b/tensorflow/tools/def_file_filter/BUILD.tpl
new file mode 100644
index 0000000000..3cb72f4979
--- /dev/null
+++ b/tensorflow/tools/def_file_filter/BUILD.tpl
@@ -0,0 +1,15 @@
+# Description:
+# Tools for filtering DEF file for TensorFlow on Windows
+#
+# On Windows, we use a DEF file generated by Bazel to export
+# symbols from the tensorflow dynamic library(_pywrap_tensorflow.dll).
+# The maximum number of symbols that can be exported per DLL is 64K,
+# so we have to filter some useless symbols through this python script.
+
+package(default_visibility = ["//visibility:public"])
+
+py_binary(
+ name = "def_file_filter",
+ srcs = ["def_file_filter.py"],
+ srcs_version = "PY2AND3",
+)
diff --git a/tensorflow/tools/def_file_filter/def_file_filter.py.tpl b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl
new file mode 100644
index 0000000000..8bdc03eb0f
--- /dev/null
+++ b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl
@@ -0,0 +1,168 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""def_file_filter.py - tool to filter a windows def file.
+
+The def file can be used to export symbols from the tensorflow dll to enable
+tf.load_library().
+
+Because the linker allows only 64K symbols to be exported per dll
+we filter the symbols down to the essentials. The regular expressions
+we use for this are specific to tensorflow.
+
+TODO: this works fine but there is an issue with exporting
+'const char * const' and importing it from a user_ops. The problem is
+on the importing end and using __declspec(dllimport) works around it.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import io
+import os
+import re
+import subprocess
+import sys
+import tempfile
+
+# External tools we use that come with visual studio sdk
+UNDNAME = "%{undname_bin_path}"
+
+# Exclude if matched
+EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::")
+
+# Include if matched before exclude
+INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
+ r"google::protobuf::internal::ArenaImpl::AllocateAligned|" # for contrib/data/_prefetching_ops
+ r"google::protobuf::internal::ArenaImpl::AddCleanup|" # for contrib/data/_prefetching_ops
+ r"google::protobuf::Arena::OnArenaAllocation|" # for contrib/data/_prefetching_ops
+ r"tensorflow::internal::LogMessage|"
+ r"tensorflow::internal::LogString|"
+ r"tensorflow::internal::CheckOpMessageBuilder|"
+ r"tensorflow::internal::MakeCheckOpValueString|"
+ r"tensorflow::internal::PickUnusedPortOrDie|"
+ r"tensorflow::internal::ValidateDevice|"
+ r"tensorflow::ops::internal::Enter|"
+ r"tensorflow::strings::internal::AppendPieces|"
+ r"tensorflow::strings::internal::CatPieces|"
+ r"tensorflow::io::internal::JoinPathImpl")
+
+# Include if matched after exclude
+INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
+ r"^(TFE_\w*)$|"
+ r"nsync::|"
+ r"tensorflow::|"
+ r"functor::|"
+ r"perftools::gputools")
+
+# We want to identify data members explicitly in the DEF file, so that no one
+# can implicitly link against the DLL if they use one of the variables exported
+# from the DLL and the header they use does not decorate the symbol with
+# __declspec(dllimport). It is easier to detect what a data symbol does
+# NOT look like, so doing it with the below regex.
+DATA_EXCLUDE_RE = re.compile(r"[)(]|"
+ r"vftable|"
+ r"vbtable|"
+ r"vcall|"
+ r"RTTI|"
+ r"protobuf::internal::ExplicitlyConstructed")
+
+def get_args():
+ """Parse command line."""
+ filename_list = lambda x: x.split(";")
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--input", type=filename_list,
+ help="paths to input def file",
+ required=True)
+ parser.add_argument("--output", help="output deffile", required=True)
+ parser.add_argument("--target", help="name of the target", required=True)
+ args = parser.parse_args()
+ return args
+
+
+def main():
+ """main."""
+ args = get_args()
+
+ # Pipe dumpbin to extract all linkable symbols from libs.
+ # Good symbols are collected in candidates and also written to
+ # a temp file.
+ candidates = []
+ tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
+ for def_file_path in args.input:
+ def_file = open(def_file_path, 'r')
+ for line in def_file:
+ cols = line.split()
+ sym = cols[0]
+ tmpfile.file.write(sym + "\n")
+ candidates.append(sym)
+ tmpfile.file.close()
+
+ # Run the symbols through undname to get their undecorated name
+ # so we can filter on something readable.
+ with open(args.output, "w") as def_fp:
+ # track dupes
+ taken = set()
+
+ # Header for the def file.
+ def_fp.write("LIBRARY " + args.target + "\n")
+ def_fp.write("EXPORTS\n")
+ def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")
+
+ # Each symbols returned by undname matches the same position in candidates.
+ # We compare on undname but use the decorated name from candidates.
+ dupes = 0
+ proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
+ for idx, line in enumerate(io.TextIOWrapper(proc.stdout, encoding="utf-8")):
+ decorated = candidates[idx]
+ if decorated in taken:
+ # Symbol is already in output, done.
+ dupes += 1
+ continue
+
+ if not INCLUDEPRE_RE.search(line):
+ if EXCLUDE_RE.search(line):
+ continue
+ if not INCLUDE_RE.search(line):
+ continue
+
+ if "deleting destructor" in line:
+ # Some of the symbols convered by INCLUDEPRE_RE export deleting
+ # destructor symbols, which is a bad idea.
+ # So we filter out such symbols here.
+ continue
+
+ if DATA_EXCLUDE_RE.search(line):
+ def_fp.write("\t" + decorated + "\n")
+ else:
+ def_fp.write("\t" + decorated + " DATA\n")
+ taken.add(decorated)
+ def_fp.close()
+
+ exit_code = proc.wait()
+ if exit_code != 0:
+ print("{} failed, exit={}".format(UNDNAME, exit_code))
+ return exit_code
+
+ os.unlink(tmpfile.name)
+
+ print("symbols={}, taken={}, dupes={}"
+ .format(len(candidates), len(taken), dupes))
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
new file mode 100644
index 0000000000..47539b2423
--- /dev/null
+++ b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
@@ -0,0 +1,56 @@
+"""Repository rule for def file filter autoconfiguration.
+
+This repository reuses Bazel's VC detect mechanism to find undname.exe,
+which is a tool used in def_file_filter.py.
+
+def_file_filter.py is for filtering the DEF file for TensorFlow on Windows.
+On Windows, we use a DEF file generated by Bazel to export symbols from the
+tensorflow dynamic library(_pywrap_tensorflow.dll). The maximum number of
+symbols that can be exported per DLL is 64K, so we have to filter some useless
+symbols through this python script.
+
+`def_file_filter_config` depends on the following environment variables:
+ * `BAZEL_VC`
+ * `BAZEL_VS`
+ * `VS90COMNTOOLS`
+ * `VS100COMNTOOLS`
+ * `VS110COMNTOOLS`
+ * `VS120COMNTOOLS`
+ * `VS140COMNTOOLS`
+"""
+
+load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_vc_path")
+load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_msvc_tool")
+load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "auto_configure_fail")
+
+def _def_file_filter_configure_impl(repository_ctx):
+ if repository_ctx.os.name.lower().find("windows") == -1:
+ repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
+ repository_ctx.file("def_file_filter.py", "")
+ return
+ vc_path = find_vc_path(repository_ctx)
+ if vc_path == "visual-studio-not-found":
+ auto_configure_fail("Visual C++ build tools not found on your machine")
+ undname_bin_path = find_msvc_tool(repository_ctx, vc_path, "undname.exe").replace("\\", "\\\\")
+
+ repository_ctx.template(
+ "def_file_filter.py",
+ Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
+ {
+ "%{undname_bin_path}": undname_bin_path,
+ })
+ repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
+
+
+def_file_filter_configure = repository_rule(
+ implementation = _def_file_filter_configure_impl,
+ environ = [
+ "BAZEL_VC",
+ "BAZEL_VS",
+ "VS90COMNTOOLS",
+ "VS100COMNTOOLS",
+ "VS110COMNTOOLS",
+ "VS120COMNTOOLS",
+ "VS140COMNTOOLS"
+ ],
+)
diff --git a/tensorflow/tools/dist_test/README.md b/tensorflow/tools/dist_test/README.md
index c1b1f79bbd..228d5ee35d 100644
--- a/tensorflow/tools/dist_test/README.md
+++ b/tensorflow/tools/dist_test/README.md
@@ -17,6 +17,14 @@ cesnsu model:
./local_test.sh --model_name CENSUS_WIDENDEEP
+You can test specify version of TensorFlow:
+
+```shell
+./local_test.sh ${whl_file_url}
+```
+
+For example, you can find these TensorFlow python package URLs from [here](https://www.tensorflow.org/install/install_linux#the_url_of_the_tensorflow_python_package) for Ubuntu.
+
**2) Launch a remote k8s cluster on Google Kubernetes Engine (GKE) and run the
test suite on it**
diff --git a/tensorflow/tools/dist_test/local_test.sh b/tensorflow/tools/dist_test/local_test.sh
index 435f9d0dc9..caae7fd530 100755
--- a/tensorflow/tools/dist_test/local_test.sh
+++ b/tensorflow/tools/dist_test/local_test.sh
@@ -16,12 +16,11 @@
#
# Tests distributed TensorFlow on a locally running TF GRPC cluster.
#
-# This script peforms the following steps:
-# 1) Build the docker-in-docker (dind) image capable of running docker and
-# Kubernetes (k8s) cluster inside.
+# This script performs the following steps:
+# 1) Build the docker image capable of running distributed TensorFlow in docker.
# 2) Run a container from the aforementioned image and start docker service
# in it
-# 3) Call a script to launch a k8s TensorFlow GRPC cluster inside the container
+# 3) Call a script to launch a distributed TensorFlow GRPC cluster inside the container
# and run the distributed test suite.
#
# Usage: local_test.sh <whl_file_location>
@@ -64,15 +63,9 @@ die() {
# Configurations
DOCKER_IMG_NAME="tensorflow/tf-dist-test-local-cluster"
-LOCAL_K8S_CACHE=${HOME}/kubernetes
-# Helper function
-get_container_id_by_image_name() {
- # Get the id of a container by image name
- # Usage: get_docker_container_id_by_image_name <img_name>
-
- docker ps | grep $1 | awk '{print $1}'
-}
+# Use TensorFlow v1.5.0 for Python 2.7 and CPU only as we set num_gpus to 0 in the below
+DEFAULT_WHL_FILE_LOCATION="https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.5.0-cp27-none-linux_x86_64.whl"
# Parse input arguments
LEAVE_CONTAINER_RUNNING=0
@@ -84,7 +77,8 @@ SYNC_REPLICAS_FLAG=""
WHL_FILE_LOCATION=${1}
if [[ -z "${WHL_FILE_LOCATION}" ]]; then
- die "whl file location is not specified"
+ WHL_FILE_LOCATION=${DEFAULT_WHL_FILE_LOCATION}
+ echo "use default whl file location"
fi
while true; do
@@ -121,7 +115,7 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Get utility functions
source ${DIR}/scripts/utils.sh
-# Build docker-in-docker image for local k8s cluster.
+# Build docker image for local distributed TensorFlow cluster.
NO_CACHE_FLAG=""
if [[ ! -z "${TF_DIST_DOCKER_NO_CACHE}" ]] &&
[[ "${TF_DIST_DOCKER_NO_CACHE}" != "0" ]]; then
diff --git a/tensorflow/tools/git/gen_git_source.py b/tensorflow/tools/git/gen_git_source.py
index 3630dbd740..cbcdbf5b80 100755
--- a/tensorflow/tools/git/gen_git_source.py
+++ b/tensorflow/tools/git/gen_git_source.py
@@ -114,6 +114,13 @@ def configure(src_base_path, gen_path, debug=False):
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
+ elif not os.path.exists(src):
+ # Git repo is configured in a way we don't support such as having
+ # packed refs. Even though in a git repo, tf.__git_version__ will not
+ # be accurate.
+ # TODO(mikecase): Support grabbing git info when using packed refs.
+ open(os.path.join(gen_path, target), "w").write("")
+ spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
diff --git a/tensorflow/tools/graph_transforms/BUILD b/tensorflow/tools/graph_transforms/BUILD
index b7d7fac315..6e21aa2846 100644
--- a/tensorflow/tools/graph_transforms/BUILD
+++ b/tensorflow/tools/graph_transforms/BUILD
@@ -178,6 +178,7 @@ tf_cc_test(
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
+ "//tensorflow/core/kernels:quantization_utils",
"//tensorflow/core/kernels:quantized_ops",
"//tensorflow/core/util/tensor_bundle",
],
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
index d89afe85c7..d86f65325b 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
@@ -182,6 +182,36 @@ Status FuseBatchNormWithConv(const NodeMatch& match,
return Status::OK();
}
+Status FuseBatchNormWithBatchToSpace(const NodeMatch& match,
+ std::vector<NodeDef>* new_nodes) {
+ // Calculate the scale and offset values to apply.
+ std::vector<float> scale_values;
+ std::vector<float> offset_values;
+ TF_RETURN_IF_ERROR(
+ GetScaleAndOffsetValues(match, &scale_values, &offset_values));
+
+ // Fuse conv weights, and set the final output node name as batch_norm_node.
+ const NodeDef& batch_norm_node = match.node;
+ const NodeMatch& batch_to_space_node_match = match.inputs[0];
+ const NodeMatch& conv_node_match = batch_to_space_node_match.inputs[0];
+ const NodeDef& batch_to_space_node = batch_to_space_node_match.node;
+ const NodeDef& conv_node = conv_node_match.node;
+
+ string biasadd_name = conv_node.name() + "/biasadd";
+ TF_RETURN_IF_ERROR(
+ FuseScaleOffsetToConvWeights(scale_values, offset_values, conv_node_match,
+ biasadd_name , new_nodes));
+
+ NodeDef new_batch_to_space_node = batch_to_space_node;
+ // reuse batch_norm node name
+ new_batch_to_space_node.set_name(batch_norm_node.name());
+ new_batch_to_space_node.set_input(0, biasadd_name);
+ new_nodes->push_back(batch_to_space_node_match.inputs[1].node);
+ new_nodes->push_back(batch_to_space_node_match.inputs[2].node);
+ new_nodes->push_back(new_batch_to_space_node);
+ return Status::OK();
+}
+
Status FuseBatchNormWithConvConcat(const NodeMatch& match,
std::vector<NodeDef>* new_nodes) {
// Calculate the scale and offset values to apply.
@@ -287,6 +317,43 @@ Status FoldOldBatchNorms(const GraphDef& input_graph_def,
do {
did_graph_change = false;
GraphDef replaced_graph_def;
+ TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
+ current_graph_def, // clang-format off
+ {"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
+ {
+ {"BatchToSpaceND", // batch_to_space_node
+ {
+ {"Conv2D", // conv_node
+ {
+ {"*"}, // input_node
+ {"Const"}, // weights_node
+ }
+ },
+ {"Const"}, // block_shape
+ {"Const"}, // crops
+ }
+ },
+ {"Const"}, // mean_node
+ {"Const"}, // variance_node
+ {"Const"}, // beta_node
+ {"Const"}, // gamma_node
+ }
+ }, // clang-format on
+ [&did_graph_change](const NodeMatch& match,
+ const std::set<string>& input_nodes,
+ const std::set<string>& output_nodes,
+ std::vector<NodeDef>* new_nodes) {
+ TF_RETURN_IF_ERROR(FuseBatchNormWithBatchToSpace(match, new_nodes));
+ did_graph_change = true;
+ return Status::OK();
+ },
+ {}, &replaced_graph_def));
+ current_graph_def = replaced_graph_def;
+ } while (did_graph_change);
+
+ do {
+ did_graph_change = false;
+ GraphDef replaced_graph_def;
// Replace BatchNorm with concat as input.
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, // clang-format off
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
index b30ba9ac8b..272410c693 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
+#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
@@ -298,6 +299,96 @@ class FoldOldBatchNormsTest : public ::testing::Test {
}
};
+void TestFoldFusedBatchNormsWithBatchToSpace() {
+ auto root = tensorflow::Scope::NewRootScope();
+ using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
+
+ Tensor input_data(DT_FLOAT, TensorShape({2, 1, 3, 2}));
+ test::FillValues<float>(
+ &input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
+ -5.0f, -3.0f, -6.0f});
+ Output input_op =
+ Const(root.WithOpName("input_op"), Input::Initializer(input_data));
+
+ Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
+ test::FillValues<float>(&weights_data,
+ {1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
+ Output weights_op =
+ Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
+
+ Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
+ {1, 1, 1, 1}, "VALID");
+
+ Tensor block_shape_data(DT_INT32, TensorShape({2}));
+ test::FillValues<int32>(&block_shape_data, {1, 2});
+ Output block_shape_op =
+ Const(root.WithOpName("block_shape_op"), Input::Initializer(block_shape_data));
+
+ Tensor crops_data(DT_INT32, TensorShape({2, 2}));
+ test::FillValues<int32>(&crops_data, {0, 0, 0, 1});
+ Output crops_op =
+ Const(root.WithOpName("crops_op"), Input::Initializer(crops_data));
+
+ Output batch_to_space_op = BatchToSpaceND(root.WithOpName("batch_to_space_op"),
+ conv_op, block_shape_op, crops_data);
+
+ Tensor mean_data(DT_FLOAT, TensorShape({2}));
+ test::FillValues<float>(&mean_data, {10.0f, 20.0f});
+ Output mean_op =
+ Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
+
+ Tensor variance_data(DT_FLOAT, TensorShape({2}));
+ test::FillValues<float>(&variance_data, {0.25f, 0.5f});
+ Output variance_op = Const(root.WithOpName("variance_op"),
+ Input::Initializer(variance_data));
+
+ Tensor beta_data(DT_FLOAT, TensorShape({2}));
+ test::FillValues<float>(&beta_data, {0.1f, 0.6f});
+ Output beta_op =
+ Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
+
+ Tensor gamma_data(DT_FLOAT, TensorShape({2}));
+ test::FillValues<float>(&gamma_data, {1.0f, 2.0f});
+ Output gamma_op =
+ Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
+
+ GraphDef original_graph_def;
+ TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
+
+ NodeDef batch_norm_node;
+ batch_norm_node.set_op("FusedBatchNorm");
+ batch_norm_node.set_name("output");
+ AddNodeInput("batch_to_space_op", &batch_norm_node);
+ AddNodeInput("gamma_op", &batch_norm_node);
+ AddNodeInput("beta_op", &batch_norm_node);
+ AddNodeInput("mean_op", &batch_norm_node);
+ AddNodeInput("variance_op", &batch_norm_node);
+ SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
+ SetNodeAttr("epsilon", 0.00001f, &batch_norm_node);
+ SetNodeAttr("is_training", false, &batch_norm_node);
+ *(original_graph_def.mutable_node()->Add()) = batch_norm_node;
+
+ std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
+ TF_ASSERT_OK(original_session->Create(original_graph_def));
+ std::vector<Tensor> original_outputs;
+ TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
+
+ GraphDef fused_graph_def;
+ TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
+ &fused_graph_def));
+
+ std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
+ TF_ASSERT_OK(fused_session->Create(fused_graph_def));
+ std::vector<Tensor> fused_outputs;
+ TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
+
+ test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
+
+ for (const NodeDef& node : fused_graph_def.node()) {
+ EXPECT_NE("FusedBatchNormWithBatchToSpace", node.op());
+ }
+}
+
TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNorms) {
TestFoldOldBatchNorms();
}
@@ -315,5 +406,9 @@ TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithConcat) {
TestFoldFusedBatchNormsWithConcat(/*split=*/false);
}
+TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithBatchToSpace) {
+ TestFoldFusedBatchNormsWithBatchToSpace();
+}
+
} // namespace graph_transforms
} // namespace tensorflow
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index 1833d67d82..2607b9d704 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -48,36 +48,65 @@ py_binary(
deps = ["//tensorflow:tensorflow_py"],
)
+COMMON_PIP_DEPS = [
+ ":licenses",
+ "MANIFEST.in",
+ "README",
+ "setup.py",
+ ":included_headers",
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/contrib/boosted_trees:boosted_trees_pip",
+ "//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
+ "//tensorflow/contrib/data/python/kernel_tests:dataset_serialization_test",
+ "//tensorflow/contrib/data/python/ops:contrib_op_loader",
+ "//tensorflow/contrib/eager/python/examples:examples_pip",
+ "//tensorflow/contrib/eager/python:checkpointable_utils",
+ "//tensorflow/contrib/eager/python:evaluator",
+ "//tensorflow/contrib/gan:gan",
+ "//tensorflow/contrib/graph_editor:graph_editor_pip",
+ "//tensorflow/contrib/keras:keras",
+ "//tensorflow/contrib/labeled_tensor:labeled_tensor_pip",
+ "//tensorflow/contrib/nn:nn_py",
+ "//tensorflow/contrib/predictor:predictor_pip",
+ "//tensorflow/contrib/py2tf:py2tf",
+ "//tensorflow/contrib/py2tf/converters:converters",
+ "//tensorflow/contrib/py2tf/converters:test_lib",
+ "//tensorflow/contrib/py2tf/impl:impl",
+ "//tensorflow/contrib/py2tf/pyct:pyct",
+ "//tensorflow/contrib/py2tf/pyct/static_analysis:static_analysis",
+ "//tensorflow/contrib/receptive_field:receptive_field_pip",
+ "//tensorflow/contrib/session_bundle:session_bundle_pip",
+ "//tensorflow/contrib/signal:signal_py",
+ "//tensorflow/contrib/signal:test_util",
+ "//tensorflow/contrib/slim:slim",
+ "//tensorflow/contrib/slim/python/slim/data:data_pip",
+ "//tensorflow/contrib/slim/python/slim/nets:nets_pip",
+ "//tensorflow/contrib/specs:specs",
+ "//tensorflow/contrib/summary:summary_test_util",
+ "//tensorflow/contrib/tensor_forest:init_py",
+ "//tensorflow/contrib/tensor_forest/hybrid:hybrid_pip",
+ "//tensorflow/contrib/timeseries:timeseries_pip",
+ "//tensorflow/contrib/tpu",
+ "//tensorflow/examples/tutorials/mnist:package",
+ "//tensorflow/python:distributed_framework_test_lib",
+ "//tensorflow/python:meta_graph_testdata",
+ "//tensorflow/python:spectral_ops_test_util",
+ "//tensorflow/python:util_example_parser_configuration",
+ "//tensorflow/python/debug:debug_pip",
+ "//tensorflow/python/eager:eager_pip",
+ "//tensorflow/python/saved_model:saved_model",
+ "//tensorflow/python/tools:tools_pip",
+ "//tensorflow/python:test_ops",
+ "//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
+]
+
# On Windows, python binary is a zip file of runfiles tree.
# Add everything to its data dependency for generating a runfiles tree
# for building the pip package on Windows.
py_binary(
name = "simple_console_for_windows",
srcs = ["simple_console_for_windows.py"],
- data = [
- "MANIFEST.in",
- "README",
- "setup.py",
- ":included_headers",
- "//tensorflow/contrib/nn:nn_py",
- "//tensorflow/contrib/session_bundle:session_bundle_pip",
- "//tensorflow/contrib/signal:signal_py",
- "//tensorflow/contrib/slim/python/slim/data:data_pip",
- "//tensorflow/python:util_example_parser_configuration",
- "//tensorflow/python/debug:debug_pip",
- "//tensorflow/python/saved_model",
- "//tensorflow/python:spectral_ops_test_util",
- "//tensorflow/python/tools:tools_pip",
- "//tensorflow/python/eager:eager_pip",
- "//tensorflow/contrib/summary:summary_test_util",
- # These targets don't build on Windows yet. Exclude them for now.
- # "//tensorflow/contrib/slim",
- # "//tensorflow/contrib/slim/python/slim/nets:nets_pip",
- # "//tensorflow/contrib/specs",
- # "//tensorflow/contrib/tensor_forest:init_py",
- # "//tensorflow/contrib/tensor_forest/hybrid:hybrid_pip",
- # "//tensorflow/examples/tutorials/mnist:package",
- ],
+ data = COMMON_PIP_DEPS,
srcs_version = "PY2AND3",
deps = ["//tensorflow:tensorflow_py"],
)
@@ -108,6 +137,7 @@ filegroup(
"@highwayhash//:LICENSE",
"@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
+ "@kafka//:LICENSE",
"@libxsmm_archive//:LICENSE",
"@lmdb//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
@@ -137,61 +167,12 @@ sh_binary(
data = select({
"//tensorflow:windows": [":simple_console_for_windows"],
"//tensorflow:windows_msvc": [":simple_console_for_windows"],
- "//conditions:default": [
- ":licenses",
- "MANIFEST.in",
- "README",
- "setup.py",
- ":included_headers",
+ "//conditions:default": COMMON_PIP_DEPS + [
":simple_console",
- "//tensorflow:tensorflow_py",
- "//tensorflow/contrib/boosted_trees:boosted_trees_pip",
- "//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
- "//tensorflow/contrib/data/python/kernel_tests:dataset_serialization_test",
- "//tensorflow/contrib/data/python/ops:contrib_op_loader",
- "//tensorflow/contrib/eager/python/examples:examples_pip",
- "//tensorflow/contrib/eager/python:checkpointable_utils",
- "//tensorflow/contrib/eager/python:evaluator",
- "//tensorflow/contrib/gan:gan",
- "//tensorflow/contrib/graph_editor:graph_editor_pip",
- "//tensorflow/contrib/keras:keras",
- "//tensorflow/contrib/labeled_tensor:labeled_tensor_pip",
"//tensorflow/contrib/lite/python:interpreter_test_data",
"//tensorflow/contrib/lite/toco:toco",
"//tensorflow/contrib/lite/toco/python:toco_wrapper",
"//tensorflow/contrib/lite/toco/python:toco_from_protos",
- "//tensorflow/contrib/nn:nn_py",
- "//tensorflow/contrib/predictor:predictor_pip",
- "//tensorflow/contrib/py2tf:py2tf",
- "//tensorflow/contrib/py2tf/converters:converters",
- "//tensorflow/contrib/py2tf/converters:test_lib",
- "//tensorflow/contrib/py2tf/impl:impl",
- "//tensorflow/contrib/py2tf/pyct:pyct",
- "//tensorflow/contrib/py2tf/pyct/static_analysis:static_analysis",
- "//tensorflow/contrib/receptive_field:receptive_field_pip",
- "//tensorflow/contrib/session_bundle:session_bundle_pip",
- "//tensorflow/contrib/signal:signal_py",
- "//tensorflow/contrib/signal:test_util",
- "//tensorflow/contrib/slim:slim",
- "//tensorflow/contrib/slim/python/slim/data:data_pip",
- "//tensorflow/contrib/slim/python/slim/nets:nets_pip",
- "//tensorflow/contrib/specs:specs",
- "//tensorflow/contrib/summary:summary_test_util",
- "//tensorflow/contrib/tensor_forest:init_py",
- "//tensorflow/contrib/tensor_forest/hybrid:hybrid_pip",
- "//tensorflow/contrib/timeseries:timeseries_pip",
- "//tensorflow/contrib/tpu",
- "//tensorflow/examples/tutorials/mnist:package",
- "//tensorflow/python:distributed_framework_test_lib",
- "//tensorflow/python:meta_graph_testdata",
- "//tensorflow/python:spectral_ops_test_util",
- "//tensorflow/python:util_example_parser_configuration",
- "//tensorflow/python/debug:debug_pip",
- "//tensorflow/python/eager:eager_pip",
- "//tensorflow/python/saved_model:saved_model",
- "//tensorflow/python/tools:tools_pip",
- "//tensorflow/python:test_ops",
- "//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
],
}) + if_mkl(["//third_party/mkl:intel_binary_blob"]) + if_tensorrt([
"//tensorflow/contrib/tensorrt:init_py",
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index e1a5f091ba..e0152da4df 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -29,7 +29,7 @@ from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.6.0-rc1'
+_VERSION = '1.6.0'
REQUIRED_PACKAGES = [
'absl-py >= 0.1.6',
@@ -72,7 +72,7 @@ if sys.version_info < (3, 4):
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
- 'freeze_graph = tensorflow.python.tools.freeze_graph:main',
+ 'freeze_graph = tensorflow.python.tools.freeze_graph:run_main',
'toco_from_protos = tensorflow.contrib.lite.toco.python.toco_from_protos:main',
'toco = tensorflow.contrib.lite.toco.python.toco_wrapper:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
diff --git a/tensorflow/tools/test/upload_test_benchmarks.py b/tensorflow/tools/test/upload_test_benchmarks.py
index 77cc9f75f7..edd093510e 100644
--- a/tensorflow/tools/test/upload_test_benchmarks.py
+++ b/tensorflow/tools/test/upload_test_benchmarks.py
@@ -88,6 +88,7 @@ import os
import shutil
from google.cloud import datastore
+from six import text_type
def is_real_file(dirpath, fname):
@@ -150,7 +151,7 @@ def upload_benchmark_data(client, data):
"""
test_result = json.loads(data)
- test_name = unicode(test_result["name"])
+ test_name = text_type(test_result["name"])
start_time = datetime.datetime.utcfromtimestamp(
float(test_result["startTime"]))
batch = []
@@ -162,7 +163,7 @@ def upload_benchmark_data(client, data):
t_val.update({
"test": test_name,
"start": start_time,
- "info": unicode(data)
+ "info": text_type(data)
})
batch.append(t_val)
@@ -170,7 +171,7 @@ def upload_benchmark_data(client, data):
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
for ent in test_result["entries"].get("entry", []):
- ent_name = unicode(ent["name"])
+ ent_name = text_type(ent["name"])
e_key = client.key("Entry")
e_val = datastore.Entity(e_key, exclude_from_indexes=["info"])
e_val.update({
@@ -178,7 +179,7 @@ def upload_benchmark_data(client, data):
"start": start_time,
"entry": ent_name,
"timing": ent["wallTime"],
- "info": unicode(json.dumps(ent))
+ "info": text_type(json.dumps(ent))
})
batch.append(e_val)