aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/compiler/aot/tfcompile.bzl640
-rw-r--r--tensorflow/compiler/tests/build_defs.bzl165
-rw-r--r--tensorflow/compiler/tests/plugin.bzl17
-rw-r--r--tensorflow/compiler/xla/service/cpu/build_defs.bzl17
-rw-r--r--tensorflow/compiler/xla/tests/build_defs.bzl484
-rw-r--r--tensorflow/compiler/xla/tests/plugin.bzl1
-rw-r--r--tensorflow/compiler/xla/xla.bzl49
-rw-r--r--tensorflow/contrib/lite/build_def.bzl407
-rw-r--r--tensorflow/contrib/lite/java/aar_with_jni.bzl52
-rw-r--r--tensorflow/contrib/lite/special_rules.bzl6
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl126
-rw-r--r--tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl20
-rw-r--r--tensorflow/core/platform/default/build_config.bzl1183
-rw-r--r--tensorflow/core/platform/default/build_config_root.bzl78
-rw-r--r--tensorflow/core/platform/default/platform.bzl71
-rw-r--r--tensorflow/java/build_defs.bzl2
-rw-r--r--tensorflow/java/src/gen/gen_ops.bzl80
-rw-r--r--tensorflow/python/build_defs.bzl42
-rw-r--r--tensorflow/tensorflow.bzl2684
-rw-r--r--tensorflow/tools/api/generator/api_gen.bzl7
-rw-r--r--tensorflow/tools/def_file_filter/def_file_filter_configure.bzl42
-rw-r--r--tensorflow/tools/test/performance.bzl110
-rw-r--r--tensorflow/version_check.bzl64
-rw-r--r--tensorflow/workspace.bzl1729
-rw-r--r--third_party/android/android_configure.bzl54
-rw-r--r--third_party/clang_toolchain/cc_configure_clang.bzl18
-rw-r--r--third_party/clang_toolchain/download_clang.bzl104
-rw-r--r--third_party/common.bzl10
-rw-r--r--third_party/flatbuffers/build_defs.bzl347
-rw-r--r--third_party/llvm/llvm.bzl193
-rw-r--r--third_party/mkl/build_defs.bzl53
-rw-r--r--third_party/mpi/mpi.bzl10
-rw-r--r--third_party/repo.bzl126
-rw-r--r--third_party/sycl/sycl_configure.bzl331
-rw-r--r--third_party/toolchains/clang6/repo.bzl49
-rw-r--r--third_party/toolchains/cpus/arm/arm_compiler_configure.bzl52
-rw-r--r--third_party/toolchains/gpus/cuda/build_defs.bzl7
37 files changed, 4590 insertions, 4840 deletions
diff --git a/tensorflow/compiler/aot/tfcompile.bzl b/tensorflow/compiler/aot/tfcompile.bzl
index 3b260f82e6..5c57fee326 100644
--- a/tensorflow/compiler/aot/tfcompile.bzl
+++ b/tensorflow/compiler/aot/tfcompile.bzl
@@ -16,355 +16,339 @@ tf_library(
)
"""
-load(
- "//tensorflow:tensorflow.bzl",
- "if_android",
- "tf_cc_test",
- "tf_copts",
-)
+load("//tensorflow:tensorflow.bzl",
+ "if_android", "tf_cc_test", "tf_copts")
+
+def tf_library(name, graph, config,
+ freeze_checkpoint=None, freeze_saver=None,
+ cpp_class=None, gen_test=True, gen_benchmark=True,
+ visibility=None, testonly=None,
+ tfcompile_flags=None,
+ tfcompile_tool="//tensorflow/compiler/aot:tfcompile",
+ include_standard_runtime_deps=True,
+ enable_xla_hlo_profiling=False, deps=None, tags=None):
+ """Runs tfcompile to compile a TensorFlow graph into executable code.
+
+ Given an invocation of tf_library(name="foo", ...), generates the following
+ build targets:
+ foo: A cc_library containing the generated header and computation.
+ foo_test: A cc_test with simple tests and benchmarks. Only created if
+ gen_test=True.
+ foo_benchmark: A cc_binary that runs a minimal-dependency benchmark, useful
+ for mobile devices or other platforms that can't compile the
+ full test libraries. Only created if gen_benchmark=True.
+
+ Args:
+ name: The name of the build rule.
+ graph: The TensorFlow GraphDef to compile. If the file ends in '.pbtxt' it
+ is expected to be in the human-readable proto text format, otherwise it is
+ expected to be in the proto binary format.
+ config: File containing tensorflow.tf2xla.Config proto. If the file ends
+ in '.pbtxt' it is expected to be in the human-readable proto text format,
+ otherwise it is expected to be in the proto binary format.
+ freeze_checkpoint: If provided, run freeze_graph with this checkpoint to
+ convert variables into constants.
+ freeze_saver: If provided, run freeze_graph with this saver, in SaverDef
+ binary form, to convert variables into constants.
+ cpp_class: The name of the generated C++ class, wrapping the generated
+ function. The syntax of this flag is
+ [[<optional_namespace>::],...]<class_name>. This mirrors the C++ syntax
+ for referring to a class, where multiple namespaces may precede the class
+ name, separated by double-colons. The class will be generated in the
+ given namespace(s), or if no namespaces are given, within the global
+ namespace.
+ gen_test: If True, also generate a cc_test rule that builds a simple
+ test and benchmark.
+ gen_benchmark: If True, also generate a binary with a simple benchmark.
+ Unlike the output of gen_test, this benchmark can be run on android.
+ visibility: Bazel build visibility.
+ testonly: Bazel testonly attribute.
+ tfcompile_flags: Extra flags to pass to tfcompile to control compilation.
+ tfcompile_tool: The tfcompile binary. A non-default can be passed to
+ use a tfcompile built with extra dependencies.
+ include_standard_runtime_deps: If True, the standard list of kernel/runtime
+ deps is added to deps. If False, deps must contain the full set of deps
+ needed by the generated library.
+ enable_xla_hlo_profiling: Enable XLA HLO profiling in the generated program,
+ and emit metadata that lets us pretty-print the gathered profile counters.
+ deps: a list of deps to include on the build rules for the generated
+ library, added to the standard deps if standard_runtime_deps is True.
+ tags: tags to apply to subsidiary build rules.
-def tf_library(
- name,
- graph,
- config,
- freeze_checkpoint = None,
- freeze_saver = None,
- cpp_class = None,
- gen_test = True,
- gen_benchmark = True,
- visibility = None,
- testonly = None,
- tfcompile_flags = None,
- tfcompile_tool = "//tensorflow/compiler/aot:tfcompile",
- include_standard_runtime_deps = True,
- enable_xla_hlo_profiling = False,
- deps = None,
- tags = None):
- """Runs tfcompile to compile a TensorFlow graph into executable code.
+ The output header is called <name>.h.
+ """
+ if not cpp_class:
+ fail("cpp_class must be specified")
- Given an invocation of tf_library(name="foo", ...), generates the following
- build targets:
- foo: A cc_library containing the generated header and computation.
- foo_test: A cc_test with simple tests and benchmarks. Only created if
- gen_test=True.
- foo_benchmark: A cc_binary that runs a minimal-dependency benchmark, useful
- for mobile devices or other platforms that can't compile the
- full test libraries. Only created if gen_benchmark=True.
+ tfcompile_graph = graph
+ if freeze_checkpoint or freeze_saver:
+ if not freeze_checkpoint:
+ fail("freeze_checkpoint must be specified when freeze_saver is specified")
- Args:
- name: The name of the build rule.
- graph: The TensorFlow GraphDef to compile. If the file ends in '.pbtxt' it
- is expected to be in the human-readable proto text format, otherwise it is
- expected to be in the proto binary format.
- config: File containing tensorflow.tf2xla.Config proto. If the file ends
- in '.pbtxt' it is expected to be in the human-readable proto text format,
- otherwise it is expected to be in the proto binary format.
- freeze_checkpoint: If provided, run freeze_graph with this checkpoint to
- convert variables into constants.
- freeze_saver: If provided, run freeze_graph with this saver, in SaverDef
- binary form, to convert variables into constants.
- cpp_class: The name of the generated C++ class, wrapping the generated
- function. The syntax of this flag is
- [[<optional_namespace>::],...]<class_name>. This mirrors the C++ syntax
- for referring to a class, where multiple namespaces may precede the class
- name, separated by double-colons. The class will be generated in the
- given namespace(s), or if no namespaces are given, within the global
- namespace.
- gen_test: If True, also generate a cc_test rule that builds a simple
- test and benchmark.
- gen_benchmark: If True, also generate a binary with a simple benchmark.
- Unlike the output of gen_test, this benchmark can be run on android.
- visibility: Bazel build visibility.
- testonly: Bazel testonly attribute.
- tfcompile_flags: Extra flags to pass to tfcompile to control compilation.
- tfcompile_tool: The tfcompile binary. A non-default can be passed to
- use a tfcompile built with extra dependencies.
- include_standard_runtime_deps: If True, the standard list of kernel/runtime
- deps is added to deps. If False, deps must contain the full set of deps
- needed by the generated library.
- enable_xla_hlo_profiling: Enable XLA HLO profiling in the generated program,
- and emit metadata that lets us pretty-print the gathered profile counters.
- deps: a list of deps to include on the build rules for the generated
- library, added to the standard deps if standard_runtime_deps is True.
- tags: tags to apply to subsidiary build rules.
+ freeze_name = "freeze_" + name
+ freeze_file = freeze_name + ".pb"
- The output header is called <name>.h.
- """
- if not cpp_class:
- fail("cpp_class must be specified")
+ # First run tfcompile to generate the list of out_nodes.
+ out_nodes_file = "out_nodes_" + freeze_name
+ native.genrule(
+ name=("gen_" + out_nodes_file),
+ srcs=[config],
+ outs=[out_nodes_file],
+ cmd=("$(location " + tfcompile_tool + ")" +
+ " --config=$(location " + config + ")" +
+ " --dump_fetch_nodes > $@"),
+ tools=[tfcompile_tool],
+ # Run tfcompile on the build host, rather than forge, since it's
+ # typically way faster on the local machine.
+ local=1,
+ tags=tags,
+ )
- tfcompile_graph = graph
- if freeze_checkpoint or freeze_saver:
- if not freeze_checkpoint:
- fail("freeze_checkpoint must be specified when freeze_saver is specified")
+ # Now run freeze_graph to convert variables into constants.
+ freeze_args = (" --input_graph=$(location " + graph + ")" +
+ " --checkpoint_version=1" +
+ " --input_binary=" + str(not graph.endswith(".pbtxt")) +
+ " --input_checkpoint=$(location " + freeze_checkpoint + ")" +
+ " --output_graph=$(location " + freeze_file + ")" +
+ " --output_node_names=$$(<$(location " + out_nodes_file +
+ "))")
+ freeze_saver_srcs = []
+ if freeze_saver:
+ freeze_args += " --input_saver=$(location " + freeze_saver + ")"
+ freeze_saver_srcs += [freeze_saver]
+ native.genrule(
+ name=freeze_name,
+ srcs=[
+ graph,
+ freeze_checkpoint,
+ out_nodes_file,
+ ] + freeze_saver_srcs,
+ outs=[freeze_file],
+ cmd=("$(location //tensorflow/python/tools:freeze_graph)" +
+ freeze_args),
+ tools=["//tensorflow/python/tools:freeze_graph"],
+ tags=tags,
+ )
+ tfcompile_graph = freeze_file
- freeze_name = "freeze_" + name
- freeze_file = freeze_name + ".pb"
+ # Rule that runs tfcompile to produce the header and object file.
+ header_file = name + ".h"
+ metadata_object_file = name + "_tfcompile_metadata.o"
+ function_object_file = name + "_tfcompile_function.o"
+ ep = ("__" + native.package_name() + "__" + name).replace("/", "_")
+ if type(tfcompile_flags) == type(""):
+ flags = tfcompile_flags
+ else:
+ flags = " ".join(["'" + arg.replace("'", "'\\''") + "'" for arg in (tfcompile_flags or [])])
+ if enable_xla_hlo_profiling:
+ profiling_flag = "--xla_hlo_profile"
+ else:
+ profiling_flag = ""
+ native.genrule(
+ name=("gen_" + name),
+ srcs=[
+ tfcompile_graph,
+ config,
+ ],
+ outs=[
+ header_file,
+ metadata_object_file,
+ function_object_file,
+ ],
+ cmd=("$(location " + tfcompile_tool + ")" +
+ " --graph=$(location " + tfcompile_graph + ")" +
+ " --config=$(location " + config + ")" +
+ " --entry_point=" + ep +
+ " --cpp_class=" + cpp_class +
+ " --target_triple=" + target_llvm_triple() +
+ " --out_header=$(@D)/" + header_file +
+ " --out_metadata_object=$(@D)/" + metadata_object_file +
+ " --out_function_object=$(@D)/" + function_object_file +
+ " " + flags + " " + profiling_flag),
+ tools=[tfcompile_tool],
+ visibility=visibility,
+ testonly=testonly,
+ # Run tfcompile on the build host since it's typically faster on the local
+ # machine.
+ #
+ # Note that setting the local=1 attribute on a *test target* causes the
+ # test infrastructure to skip that test. However this is a genrule, not a
+ # test target, and runs with --genrule_strategy=forced_forge, meaning the
+ # local=1 attribute is ignored, and the genrule is still run.
+ #
+ # https://www.bazel.io/versions/master/docs/be/general.html#genrule
+ local=1,
+ tags=tags,
+ )
- # First run tfcompile to generate the list of out_nodes.
- out_nodes_file = "out_nodes_" + freeze_name
- native.genrule(
- name = ("gen_" + out_nodes_file),
- srcs = [config],
- outs = [out_nodes_file],
- cmd = ("$(location " + tfcompile_tool + ")" +
- " --config=$(location " + config + ")" +
- " --dump_fetch_nodes > $@"),
- tools = [tfcompile_tool],
- # Run tfcompile on the build host, rather than forge, since it's
- # typically way faster on the local machine.
- local = 1,
- tags = tags,
- )
+ # Rule that runs tfcompile to produce the SessionModule proto, useful for
+ # debugging. TODO(b/64813587): Once the SessionModule proto is
+ # deterministic, move this into the main rule above.
+ session_module_pb = name + "_session_module.pb"
+ native.genrule(
+ name=(name + "_session_module"),
+ srcs=[
+ tfcompile_graph,
+ config,
+ ],
+ outs=[
+ session_module_pb,
+ ],
+ cmd=("$(location " + tfcompile_tool + ")" +
+ " --graph=$(location " + tfcompile_graph + ")" +
+ " --config=$(location " + config + ")" +
+ " --entry_point=" + ep +
+ " --cpp_class=" + cpp_class +
+ " --target_triple=" + target_llvm_triple() +
+ " --out_session_module=$(@D)/" + session_module_pb +
+ " " + flags),
+ tools=[tfcompile_tool],
+ visibility=visibility,
+ testonly=testonly,
+ local=1,
+ tags=tags,
+ )
- # Now run freeze_graph to convert variables into constants.
- freeze_args = (" --input_graph=$(location " + graph + ")" +
- " --checkpoint_version=1" +
- " --input_binary=" + str(not graph.endswith(".pbtxt")) +
- " --input_checkpoint=$(location " + freeze_checkpoint + ")" +
- " --output_graph=$(location " + freeze_file + ")" +
- " --output_node_names=$$(<$(location " + out_nodes_file +
- "))")
- freeze_saver_srcs = []
- if freeze_saver:
- freeze_args += " --input_saver=$(location " + freeze_saver + ")"
- freeze_saver_srcs += [freeze_saver]
- native.genrule(
- name = freeze_name,
- srcs = [
- graph,
- freeze_checkpoint,
- out_nodes_file,
- ] + freeze_saver_srcs,
- outs = [freeze_file],
- cmd = ("$(location //tensorflow/python/tools:freeze_graph)" +
- freeze_args),
- tools = ["//tensorflow/python/tools:freeze_graph"],
- tags = tags,
- )
- tfcompile_graph = freeze_file
+ # The cc_library rule packaging up the header and object file, and needed
+ # kernel implementations.
+ need_xla_data_proto = (flags and flags.find("--gen_program_shape") != -1)
+ native.cc_library(
+ name=name,
+ srcs=[function_object_file, metadata_object_file],
+ hdrs=[header_file],
+ visibility=visibility,
+ testonly=testonly,
+ deps = [
+ # These deps are required by all tf_library targets even if
+ # include_standard_runtime_deps is False. Without them, the
+ # generated code will fail to compile.
+ "//tensorflow/compiler/tf2xla:xla_compiled_cpu_function",
+ "//tensorflow/core:framework_lite",
+ ] + (need_xla_data_proto and [
+ # If we're generating the program shape, we must depend on the proto.
+ "//tensorflow/compiler/xla:xla_data_proto",
+ ] or []) + (enable_xla_hlo_profiling and [
+ "//tensorflow/compiler/xla/service:hlo_profile_printer_data"
+ ] or []) + (include_standard_runtime_deps and [
+ # TODO(cwhipkey): only depend on kernel code that the model actually needed.
+ "//tensorflow/compiler/tf2xla/kernels:index_ops_kernel_argmax_float_1d",
+ "//tensorflow/compiler/tf2xla/kernels:index_ops_kernel_argmax_float_2d",
+ "//tensorflow/compiler/xla/service/cpu:runtime_conv2d",
+ "//tensorflow/compiler/xla/service/cpu:runtime_matmul",
+ "//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_conv2d",
+ "//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_matmul",
+ "//third_party/eigen3",
+ ] or []) + (deps or []),
+ tags=tags,
+ )
- # Rule that runs tfcompile to produce the header and object file.
- header_file = name + ".h"
- metadata_object_file = name + "_tfcompile_metadata.o"
- function_object_file = name + "_tfcompile_function.o"
- ep = ("__" + native.package_name() + "__" + name).replace("/", "_")
- if type(tfcompile_flags) == type(""):
- flags = tfcompile_flags
- else:
- flags = " ".join(["'" + arg.replace("'", "'\\''") + "'" for arg in (tfcompile_flags or [])])
- if enable_xla_hlo_profiling:
- profiling_flag = "--xla_hlo_profile"
- else:
- profiling_flag = ""
+ # Variables used for gen_test and gen_benchmark.
+ no_ns_name = ""
+ cpp_class_split = cpp_class.rsplit("::", maxsplit=2)
+ if len(cpp_class_split) == 1:
+ no_ns_name = cpp_class_split[0]
+ else:
+ no_ns_name = cpp_class_split[1]
+ sed_replace = (
+ "-e \"s|{{TFCOMPILE_HEADER}}|$(location " + header_file + ")|g\" " +
+ "-e \"s|{{TFCOMPILE_CPP_CLASS}}|" + cpp_class + "|g\" " +
+ "-e \"s|{{TFCOMPILE_NAME}}|" + no_ns_name + "|g\" ")
+
+ if gen_test:
+ test_name = name + "_test"
+ test_file = test_name + ".cc"
+ # Rule to rewrite test.cc to produce the test_file.
native.genrule(
- name = ("gen_" + name),
- srcs = [
- tfcompile_graph,
- config,
- ],
- outs = [
+ name=("gen_" + test_name),
+ testonly=1,
+ srcs=[
+ "//tensorflow/compiler/aot:test.cc",
header_file,
- metadata_object_file,
- function_object_file,
],
- cmd = ("$(location " + tfcompile_tool + ")" +
- " --graph=$(location " + tfcompile_graph + ")" +
- " --config=$(location " + config + ")" +
- " --entry_point=" + ep +
- " --cpp_class=" + cpp_class +
- " --target_triple=" + target_llvm_triple() +
- " --out_header=$(@D)/" + header_file +
- " --out_metadata_object=$(@D)/" + metadata_object_file +
- " --out_function_object=$(@D)/" + function_object_file +
- " " + flags + " " + profiling_flag),
- tools = [tfcompile_tool],
- visibility = visibility,
- testonly = testonly,
- # Run tfcompile on the build host since it's typically faster on the local
- # machine.
- #
- # Note that setting the local=1 attribute on a *test target* causes the
- # test infrastructure to skip that test. However this is a genrule, not a
- # test target, and runs with --genrule_strategy=forced_forge, meaning the
- # local=1 attribute is ignored, and the genrule is still run.
- #
- # https://www.bazel.io/versions/master/docs/be/general.html#genrule
- local = 1,
- tags = tags,
+ outs=[test_file],
+ cmd=("sed " + sed_replace +
+ " $(location //tensorflow/compiler/aot:test.cc) " +
+ "> $(OUTS)"),
+ tags=tags,
+ )
+
+ # The cc_test rule for the generated code. To ensure that this works
+ # reliably across build configurations, we must use tf_cc_test instead of
+ # native.cc_test. This is related to how we build
+ # //tensorflow/core:lib -- see the note in tensorflow/core/BUILD
+ # for more details.
+ tf_cc_test(
+ name=test_name,
+ srcs=[test_file],
+ deps=[
+ ":" + name,
+ "//tensorflow/compiler/aot:runtime",
+ "//tensorflow/compiler/aot:tf_library_test_main",
+ "//tensorflow/compiler/xla:executable_run_options",
+ "//third_party/eigen3",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test",
+ ],
+ tags=tags,
)
- # Rule that runs tfcompile to produce the SessionModule proto, useful for
- # debugging. TODO(b/64813587): Once the SessionModule proto is
- # deterministic, move this into the main rule above.
- session_module_pb = name + "_session_module.pb"
+ if gen_benchmark:
+ benchmark_name = name + "_benchmark"
+ benchmark_file = benchmark_name + ".cc"
+ benchmark_main = ("//tensorflow/compiler/aot:" +
+ "benchmark_main.template")
+
+ # Rule to rewrite benchmark.cc to produce the benchmark_file.
native.genrule(
- name = (name + "_session_module"),
- srcs = [
- tfcompile_graph,
- config,
- ],
- outs = [
- session_module_pb,
+ name=("gen_" + benchmark_name),
+ srcs=[
+ benchmark_main,
+ header_file,
],
- cmd = ("$(location " + tfcompile_tool + ")" +
- " --graph=$(location " + tfcompile_graph + ")" +
- " --config=$(location " + config + ")" +
- " --entry_point=" + ep +
- " --cpp_class=" + cpp_class +
- " --target_triple=" + target_llvm_triple() +
- " --out_session_module=$(@D)/" + session_module_pb +
- " " + flags),
- tools = [tfcompile_tool],
- visibility = visibility,
testonly = testonly,
- local = 1,
- tags = tags,
+ outs=[benchmark_file],
+ cmd=("sed " + sed_replace +
+ " $(location " + benchmark_main + ") " +
+ "> $(OUTS)"),
+ tags=tags,
)
- # The cc_library rule packaging up the header and object file, and needed
- # kernel implementations.
- need_xla_data_proto = (flags and flags.find("--gen_program_shape") != -1)
- native.cc_library(
- name = name,
- srcs = [function_object_file, metadata_object_file],
- hdrs = [header_file],
- visibility = visibility,
+ # The cc_benchmark rule for the generated code. This does not need the
+ # tf_cc_binary since we (by deliberate design) do not depend on
+ # //tensorflow/core:lib.
+ #
+ # Note: to get smaller size on android for comparison, compile with:
+ # --copt=-fvisibility=hidden
+ # --copt=-D_LIBCPP_TYPE_VIS=_LIBCPP_HIDDEN
+ # --copt=-D_LIBCPP_EXCEPTION_ABI=_LIBCPP_HIDDEN
+ native.cc_binary(
+ name=benchmark_name,
+ srcs=[benchmark_file],
testonly = testonly,
- deps = [
- # These deps are required by all tf_library targets even if
- # include_standard_runtime_deps is False. Without them, the
- # generated code will fail to compile.
- "//tensorflow/compiler/tf2xla:xla_compiled_cpu_function",
- "//tensorflow/core:framework_lite",
- ] + (need_xla_data_proto and [
- # If we're generating the program shape, we must depend on the proto.
- "//tensorflow/compiler/xla:xla_data_proto",
- ] or []) + (enable_xla_hlo_profiling and [
- "//tensorflow/compiler/xla/service:hlo_profile_printer_data",
- ] or []) + (include_standard_runtime_deps and [
- # TODO(cwhipkey): only depend on kernel code that the model actually needed.
- "//tensorflow/compiler/tf2xla/kernels:index_ops_kernel_argmax_float_1d",
- "//tensorflow/compiler/tf2xla/kernels:index_ops_kernel_argmax_float_2d",
- "//tensorflow/compiler/xla/service/cpu:runtime_conv2d",
- "//tensorflow/compiler/xla/service/cpu:runtime_matmul",
- "//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_conv2d",
- "//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_matmul",
+ copts = tf_copts(),
+ linkopts = if_android(["-pie", "-s"]),
+ deps=[
+ ":" + name,
+ "//tensorflow/compiler/aot:benchmark",
+ "//tensorflow/compiler/aot:runtime",
+ "//tensorflow/compiler/xla:executable_run_options",
"//third_party/eigen3",
- ] or []) + (deps or []),
- tags = tags,
- )
-
- # Variables used for gen_test and gen_benchmark.
- no_ns_name = ""
- cpp_class_split = cpp_class.rsplit("::", maxsplit = 2)
- if len(cpp_class_split) == 1:
- no_ns_name = cpp_class_split[0]
- else:
- no_ns_name = cpp_class_split[1]
- sed_replace = (
- "-e \"s|{{TFCOMPILE_HEADER}}|$(location " + header_file + ")|g\" " +
- "-e \"s|{{TFCOMPILE_CPP_CLASS}}|" + cpp_class + "|g\" " +
- "-e \"s|{{TFCOMPILE_NAME}}|" + no_ns_name + "|g\" "
+ ] + if_android([
+ "//tensorflow/compiler/aot:benchmark_extra_android",
+ ]),
+ tags=tags,
)
- if gen_test:
- test_name = name + "_test"
- test_file = test_name + ".cc"
-
- # Rule to rewrite test.cc to produce the test_file.
- native.genrule(
- name = ("gen_" + test_name),
- testonly = 1,
- srcs = [
- "//tensorflow/compiler/aot:test.cc",
- header_file,
- ],
- outs = [test_file],
- cmd = ("sed " + sed_replace +
- " $(location //tensorflow/compiler/aot:test.cc) " +
- "> $(OUTS)"),
- tags = tags,
- )
-
- # The cc_test rule for the generated code. To ensure that this works
- # reliably across build configurations, we must use tf_cc_test instead of
- # native.cc_test. This is related to how we build
- # //tensorflow/core:lib -- see the note in tensorflow/core/BUILD
- # for more details.
- tf_cc_test(
- name = test_name,
- srcs = [test_file],
- deps = [
- ":" + name,
- "//tensorflow/compiler/aot:runtime",
- "//tensorflow/compiler/aot:tf_library_test_main",
- "//tensorflow/compiler/xla:executable_run_options",
- "//third_party/eigen3",
- "//tensorflow/core:lib",
- "//tensorflow/core:test",
- ],
- tags = tags,
- )
-
- if gen_benchmark:
- benchmark_name = name + "_benchmark"
- benchmark_file = benchmark_name + ".cc"
- benchmark_main = ("//tensorflow/compiler/aot:" +
- "benchmark_main.template")
-
- # Rule to rewrite benchmark.cc to produce the benchmark_file.
- native.genrule(
- name = ("gen_" + benchmark_name),
- srcs = [
- benchmark_main,
- header_file,
- ],
- testonly = testonly,
- outs = [benchmark_file],
- cmd = ("sed " + sed_replace +
- " $(location " + benchmark_main + ") " +
- "> $(OUTS)"),
- tags = tags,
- )
-
- # The cc_benchmark rule for the generated code. This does not need the
- # tf_cc_binary since we (by deliberate design) do not depend on
- # //tensorflow/core:lib.
- #
- # Note: to get smaller size on android for comparison, compile with:
- # --copt=-fvisibility=hidden
- # --copt=-D_LIBCPP_TYPE_VIS=_LIBCPP_HIDDEN
- # --copt=-D_LIBCPP_EXCEPTION_ABI=_LIBCPP_HIDDEN
- native.cc_binary(
- name = benchmark_name,
- srcs = [benchmark_file],
- testonly = testonly,
- copts = tf_copts(),
- linkopts = if_android(["-pie", "-s"]),
- deps = [
- ":" + name,
- "//tensorflow/compiler/aot:benchmark",
- "//tensorflow/compiler/aot:runtime",
- "//tensorflow/compiler/xla:executable_run_options",
- "//third_party/eigen3",
- ] + if_android([
- "//tensorflow/compiler/aot:benchmark_extra_android",
- ]),
- tags = tags,
- )
-
def target_llvm_triple():
- """Returns the target LLVM triple to be used for compiling the target."""
-
- # TODO(toddw): Add target_triple for other targets. For details see:
- # http://llvm.org/docs/doxygen/html/Triple_8h_source.html
- return select({
- "//tensorflow:android_armeabi": "armv5-none-android",
- "//tensorflow:android_arm": "armv7-none-android",
- "//tensorflow:android_arm64": "aarch64-none-android",
- "//tensorflow:android_x86": "i686-none-android",
- "//tensorflow:linux_ppc64le": "ppc64le-ibm-linux-gnu",
- "//tensorflow:darwin": "x86_64-none-darwin",
- "//conditions:default": "x86_64-pc-linux",
- })
+ """Returns the target LLVM triple to be used for compiling the target."""
+ # TODO(toddw): Add target_triple for other targets. For details see:
+ # http://llvm.org/docs/doxygen/html/Triple_8h_source.html
+ return select({
+ "//tensorflow:android_armeabi": "armv5-none-android",
+ "//tensorflow:android_arm": "armv7-none-android",
+ "//tensorflow:android_arm64": "aarch64-none-android",
+ "//tensorflow:android_x86": "i686-none-android",
+ "//tensorflow:linux_ppc64le": "ppc64le-ibm-linux-gnu",
+ "//tensorflow:darwin": "x86_64-none-darwin",
+ "//conditions:default": "x86_64-pc-linux",
+ })
diff --git a/tensorflow/compiler/tests/build_defs.bzl b/tensorflow/compiler/tests/build_defs.bzl
index a76f136736..7b114d4f85 100644
--- a/tensorflow/compiler/tests/build_defs.bzl
+++ b/tensorflow/compiler/tests/build_defs.bzl
@@ -4,97 +4,88 @@ load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
def all_backends():
- b = ["cpu"] + plugins.keys()
- if cuda_is_configured():
- return b + ["gpu"]
- else:
- return b
+ b = ["cpu"] + plugins.keys()
+ if cuda_is_configured():
+ return b + ["gpu"]
+ else:
+ return b
-def tf_xla_py_test(
- name,
- srcs = [],
- deps = [],
- tags = [],
- data = [],
- main = None,
- disabled_backends = None,
- **kwargs):
- """Generates py_test targets, one per XLA backend.
+def tf_xla_py_test(name, srcs=[], deps=[], tags=[], data=[], main=None,
+ disabled_backends=None, **kwargs):
+ """Generates py_test targets, one per XLA backend.
- This rule generates py_test() targets named name_backend, for each backend
- in all_backends(). The rule also generates a test suite with named `name` that
- tests all backends for the test.
+ This rule generates py_test() targets named name_backend, for each backend
+ in all_backends(). The rule also generates a test suite with named `name` that
+ tests all backends for the test.
- For example, the following rule generates test cases foo_test_cpu,
- foo_test_gpu, and a test suite name foo_test that tests both.
- tf_xla_py_test(
- name="foo_test",
- srcs="foo_test.py",
- deps=[...],
- )
+ For example, the following rule generates test cases foo_test_cpu,
+ foo_test_gpu, and a test suite name foo_test that tests both.
+ tf_xla_py_test(
+ name="foo_test",
+ srcs="foo_test.py",
+ deps=[...],
+ )
- Args:
- name: Name of the target.
- srcs: Sources for the target.
- deps: Dependencies of the target.
- tags: Tags to apply to the generated targets.
- data: Data dependencies of the target.
- main: Same as py_test's main attribute.
- disabled_backends: A list of backends that should not be tested. Supported
- values include "cpu" and "gpu". If not specified, defaults to None.
- **kwargs: keyword arguments passed onto the generated py_test() rules.
- """
- if disabled_backends == None:
- disabled_backends = []
+ Args:
+ name: Name of the target.
+ srcs: Sources for the target.
+ deps: Dependencies of the target.
+ tags: Tags to apply to the generated targets.
+ data: Data dependencies of the target.
+ main: Same as py_test's main attribute.
+ disabled_backends: A list of backends that should not be tested. Supported
+ values include "cpu" and "gpu". If not specified, defaults to None.
+ **kwargs: keyword arguments passed onto the generated py_test() rules.
+ """
+ if disabled_backends == None:
+ disabled_backends = []
- enabled_backends = [b for b in all_backends() if b not in disabled_backends]
- test_names = []
- for backend in enabled_backends:
- test_name = "{}_{}".format(name, backend)
- backend_tags = ["tf_xla_{}".format(backend)]
- backend_args = []
- backend_deps = []
- backend_data = []
- if backend == "cpu":
- backend_args += [
- "--test_device=XLA_CPU",
- "--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64",
- ]
- elif backend == "gpu":
- backend_args += [
- "--test_device=XLA_GPU",
- "--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_BFLOAT16",
- ]
- backend_tags += ["requires-gpu-sm35"]
- elif backend in plugins:
- backend_args += [
- "--test_device=" + plugins[backend]["device"],
- "--types=" + plugins[backend]["types"],
- ]
- backend_tags += plugins[backend]["tags"]
- backend_args += plugins[backend]["args"]
- backend_deps += plugins[backend]["deps"]
- backend_data += plugins[backend]["data"]
- else:
- fail("Unknown backend {}".format(backend))
+ enabled_backends = [b for b in all_backends() if b not in disabled_backends]
+ test_names = []
+ for backend in enabled_backends:
+ test_name = "{}_{}".format(name, backend)
+ backend_tags = ["tf_xla_{}".format(backend)]
+ backend_args = []
+ backend_deps = []
+ backend_data = []
+ if backend == "cpu":
+ backend_args += [
+ "--test_device=XLA_CPU",
+ "--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64"
+ ]
+ elif backend == "gpu":
+ backend_args += [
+ "--test_device=XLA_GPU",
+ "--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_BFLOAT16"
+ ]
+ backend_tags += ["requires-gpu-sm35"]
+ elif backend in plugins:
+ backend_args += ["--test_device=" + plugins[backend]["device"],
+ "--types=" + plugins[backend]["types"]]
+ backend_tags += plugins[backend]["tags"]
+ backend_args += plugins[backend]["args"]
+ backend_deps += plugins[backend]["deps"]
+ backend_data += plugins[backend]["data"]
+ else:
+ fail("Unknown backend {}".format(backend))
- native.py_test(
- name = test_name,
- srcs = srcs,
- srcs_version = "PY2AND3",
- args = backend_args,
- main = "{}.py".format(name) if main == None else main,
- data = data + backend_data,
- deps = deps + backend_deps,
- tags = tags + backend_tags,
- **kwargs
- )
- test_names.append(test_name)
- native.test_suite(name = name, tests = test_names)
+ native.py_test(
+ name=test_name,
+ srcs=srcs,
+ srcs_version="PY2AND3",
+ args=backend_args,
+ main="{}.py".format(name) if main == None else main,
+ data=data + backend_data,
+ deps=deps + backend_deps,
+ tags=tags + backend_tags,
+ **kwargs
+ )
+ test_names.append(test_name)
+ native.test_suite(name=name, tests=test_names)
-def generate_backend_suites(backends = []):
- """Generates per-backend test_suites that run all tests for a backend."""
- if not backends:
- backends = all_backends()
- for backend in backends:
- native.test_suite(name = "%s_tests" % backend, tags = ["tf_xla_%s" % backend])
+def generate_backend_suites(backends=[]):
+ """Generates per-backend test_suites that run all tests for a backend."""
+ if not backends:
+ backends = all_backends()
+ for backend in backends:
+ native.test_suite(name="%s_tests" % backend, tags=["tf_xla_%s" % backend])
diff --git a/tensorflow/compiler/tests/plugin.bzl b/tensorflow/compiler/tests/plugin.bzl
index 46a854d145..fbc8781a3e 100644
--- a/tensorflow/compiler/tests/plugin.bzl
+++ b/tensorflow/compiler/tests/plugin.bzl
@@ -18,12 +18,13 @@
# git update-index --assume-unchanged tensorflow/compiler/tests/plugin.bzl
plugins = {
- #"example": {
- # "device":"XLA_MY_DEVICE",
- # "types":"DT_FLOAT,DT_HALF,DT_INT32",
- # "tags":[],
- # "args":["--disabled_manifest=tensorflow/compiler/plugin/example/disabled_manifest.txt"],
- # "data":["//tensorflow/compiler/plugin/example:disabled_manifest.txt"],
- # "deps":[],
- #},
+ #"example": {
+ # "device":"XLA_MY_DEVICE",
+ # "types":"DT_FLOAT,DT_HALF,DT_INT32",
+ # "tags":[],
+ # "args":["--disabled_manifest=tensorflow/compiler/plugin/example/disabled_manifest.txt"],
+ # "data":["//tensorflow/compiler/plugin/example:disabled_manifest.txt"],
+ # "deps":[],
+ #},
}
+
diff --git a/tensorflow/compiler/xla/service/cpu/build_defs.bzl b/tensorflow/compiler/xla/service/cpu/build_defs.bzl
index ffa1cd4ec8..e78330b216 100644
--- a/tensorflow/compiler/xla/service/cpu/build_defs.bzl
+++ b/tensorflow/compiler/xla/service/cpu/build_defs.bzl
@@ -1,11 +1,12 @@
"""build_defs for service/cpu."""
+
def runtime_copts():
- """Returns copts used for CPU runtime libraries."""
- return (["-DEIGEN_AVOID_STL_ARRAY"] + select({
- "//tensorflow:android_arm": ["-mfpu=neon"],
- "//conditions:default": [],
- }) + select({
- "//tensorflow:android": ["-O2"],
- "//conditions:default": [],
- }))
+ """Returns copts used for CPU runtime libraries."""
+ return (["-DEIGEN_AVOID_STL_ARRAY"] + select({
+ "//tensorflow:android_arm": ["-mfpu=neon"],
+ "//conditions:default": []
+ }) + select({
+ "//tensorflow:android": ["-O2"],
+ "//conditions:default": []
+ }))
diff --git a/tensorflow/compiler/xla/tests/build_defs.bzl b/tensorflow/compiler/xla/tests/build_defs.bzl
index caa89b5725..53f2c3bfbf 100644
--- a/tensorflow/compiler/xla/tests/build_defs.bzl
+++ b/tensorflow/compiler/xla/tests/build_defs.bzl
@@ -7,258 +7,252 @@ load("//tensorflow:tensorflow.bzl", "tf_cc_test")
all_backends = ["cpu", "gpu"] + plugins.keys()
def filter_backends(backends):
- """Removes "gpu" from a backend list if CUDA is not enabled.
+ """Removes "gpu" from a backend list if CUDA is not enabled.
+
+ This allows us to simply hardcode lists including "gpu" here and in the
+ BUILD file, without causing failures when CUDA isn't enabled.'
+
+ Args:
+ backends: A list of backends to filter.
+
+ Returns:
+ The filtered list of backends.
+ """
+ if cuda_is_configured():
+ return backends
+ else:
+ return [backend for backend in backends if backend != "gpu"]
+
+
+def xla_test(name,
+ srcs,
+ deps,
+ xla_test_library_deps=[],
+ backends=[],
+ blacklisted_backends=[],
+ args=[],
+ tags=[],
+ copts=[],
+ data=[],
+ backend_tags={},
+ backend_args={},
+ **kwargs):
+ """Generates cc_test targets for the given XLA backends.
+
+ This rule generates a cc_test target for one or more XLA backends and also a
+ platform-agnostic cc_library rule. The arguments are identical to cc_test with
+ two additions: 'backends' and 'backend_args'. 'backends' specifies the
+ backends to generate tests for ("cpu", "gpu"), and
+ 'backend_args'/'backend_tags' specifies backend-specific args parameters to
+ use when generating the cc_test.
+
+ The name of the cc_tests are the provided name argument with the backend name
+ appended, and the cc_library target name is the provided name argument with
+ "_lib" appended. For example, if name parameter is "foo_test", then the cpu
+ test target will be "foo_test_cpu" and the cc_library target is "foo_lib".
+
+ The cc_library target can be used to link with other plugins outside of
+ xla_test.
+
+ The build rule also defines a test suite ${name} which includes the tests for
+ each of the supported backends.
+
+ Each generated cc_test target has a tag indicating which backend the test is
+ for. This tag is of the form "xla_${BACKEND}" (eg, "xla_cpu"). These
+ tags can be used to gather tests for a particular backend into a test_suite.
+
+ Examples:
+
+ # Generates the targets: foo_test_cpu and foo_test_gpu.
+ xla_test(
+ name = "foo_test",
+ srcs = ["foo_test.cc"],
+ backends = ["cpu", "gpu"],
+ deps = [...],
+ )
- This allows us to simply hardcode lists including "gpu" here and in the
- BUILD file, without causing failures when CUDA isn't enabled.'
+ # Generates the targets: bar_test_cpu and bar_test_gpu. bar_test_cpu
+ # includes the additional arg "--special_cpu_flag".
+ xla_test(
+ name = "bar_test",
+ srcs = ["bar_test.cc"],
+ backends = ["cpu", "gpu"],
+ backend_args = {"cpu": ["--special_cpu_flag"]}
+ deps = [...],
+ )
- Args:
- backends: A list of backends to filter.
+ The build rule defines the preprocessor macro XLA_TEST_BACKEND_${BACKEND}
+ to the value 1 where ${BACKEND} is the uppercase name of the backend.
+
+ Args:
+ name: Name of the target.
+ srcs: Sources for the target.
+ deps: Dependencies of the target.
+ xla_test_library_deps: If set, the generated test targets will depend on the
+ respective cc_libraries generated by the xla_test_library rule.
+ backends: A list of backends to generate tests for. Supported values: "cpu",
+ "gpu". If this list is empty, the test will be generated for all supported
+ backends.
+ blacklisted_backends: A list of backends to NOT generate tests for.
+ args: Test arguments for the target.
+ tags: Tags for the target.
+ copts: Additional copts to pass to the build.
+ data: Additional data to pass to the build.
+ backend_tags: A dict mapping backend name to list of additional tags to
+ use for that target.
+ backend_args: A dict mapping backend name to list of additional args to
+ use for that target.
+ **kwargs: Additional keyword arguments to pass to native.cc_test.
+ """
+ test_names = []
+ if not backends:
+ backends = all_backends
+
+ backends = [backend for backend in backends
+ if backend not in blacklisted_backends]
+
+ native.cc_library(
+ name="%s_lib" % name,
+ srcs=srcs,
+ copts=copts,
+ testonly=True,
+ deps=deps + ["//tensorflow/compiler/xla/tests:test_macros_header"],
+ )
+
+ for backend in filter_backends(backends):
+ test_name = "%s_%s" % (name, backend)
+ this_backend_tags = ["xla_%s" % backend]
+ this_backend_copts = []
+ this_backend_args = backend_args.get(backend, [])
+ this_backend_data = []
+ if backend == "cpu":
+ backend_deps = ["//tensorflow/compiler/xla/service:cpu_plugin"]
+ backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_cpu"]
+ elif backend == "gpu":
+ backend_deps = ["//tensorflow/compiler/xla/service:gpu_plugin"]
+ backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_gpu"]
+ this_backend_tags += ["requires-gpu-sm35"]
+ elif backend in plugins:
+ backend_deps = []
+ backend_deps += plugins[backend]["deps"]
+ this_backend_copts += plugins[backend]["copts"]
+ this_backend_tags += plugins[backend]["tags"]
+ this_backend_args += plugins[backend]["args"]
+ this_backend_data += plugins[backend]["data"]
+ else:
+ fail("Unknown backend %s" % backend)
+
+ if xla_test_library_deps:
+ for lib_dep in xla_test_library_deps:
+ backend_deps += ["%s_%s" % (lib_dep, backend)]
+
+ tf_cc_test(
+ name=test_name,
+ srcs=srcs,
+ tags=tags + backend_tags.get(backend, []) + this_backend_tags,
+ extra_copts=copts + ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()] +
+ this_backend_copts,
+ args=args + this_backend_args,
+ deps=deps + backend_deps,
+ data=data + this_backend_data,
+ **kwargs)
+
+ test_names.append(test_name)
+
+ native.test_suite(name=name, tests=test_names)
+
+def xla_test_library(name,
+ srcs,
+ hdrs=[],
+ deps=[],
+ backends=[]):
+ """Generates cc_library targets for the given XLA backends.
+
+ This rule forces the sources to be compiled for each backend so that the
+ backend specific macros could expand correctly. It's useful when test targets
+ in different directories referring to the same sources but test with different
+ arguments.
+
+ Examples:
+
+ # Generates the targets: foo_test_library_cpu and foo_test_gpu.
+ xla_test_library(
+ name = "foo_test_library",
+ srcs = ["foo_test.cc"],
+ backends = ["cpu", "gpu"],
+ deps = [...],
+ )
+ # Then use the xla_test rule to generate test targets:
+ xla_test(
+ name = "foo_test",
+ srcs = [],
+ backends = ["cpu", "gpu"],
+ deps = [...],
+ xla_test_library_deps = [":foo_test_library"],
+ )
- Returns:
- The filtered list of backends.
- """
- if cuda_is_configured():
- return backends
+ Args:
+ name: Name of the target.
+ srcs: Sources for the target.
+ hdrs: Headers for the target.
+ deps: Dependencies of the target.
+ backends: A list of backends to generate libraries for.
+ Supported values: "cpu", "gpu". If this list is empty, the
+ library will be generated for all supported backends.
+ """
+
+ if not backends:
+ backends = all_backends
+
+ for backend in filter_backends(backends):
+ this_backend_copts = []
+ if backend in ["cpu", "gpu"]:
+ backend_deps = ["//tensorflow/compiler/xla/tests:test_macros_%s" % backend]
+ elif backend in plugins:
+ backend_deps = plugins[backend]["deps"]
+ this_backend_copts += plugins[backend]["copts"]
else:
- return [backend for backend in backends if backend != "gpu"]
-
-def xla_test(
- name,
- srcs,
- deps,
- xla_test_library_deps = [],
- backends = [],
- blacklisted_backends = [],
- args = [],
- tags = [],
- copts = [],
- data = [],
- backend_tags = {},
- backend_args = {},
- **kwargs):
- """Generates cc_test targets for the given XLA backends.
-
- This rule generates a cc_test target for one or more XLA backends and also a
- platform-agnostic cc_library rule. The arguments are identical to cc_test with
- two additions: 'backends' and 'backend_args'. 'backends' specifies the
- backends to generate tests for ("cpu", "gpu"), and
- 'backend_args'/'backend_tags' specifies backend-specific args parameters to
- use when generating the cc_test.
-
- The name of the cc_tests are the provided name argument with the backend name
- appended, and the cc_library target name is the provided name argument with
- "_lib" appended. For example, if name parameter is "foo_test", then the cpu
- test target will be "foo_test_cpu" and the cc_library target is "foo_lib".
-
- The cc_library target can be used to link with other plugins outside of
- xla_test.
-
- The build rule also defines a test suite ${name} which includes the tests for
- each of the supported backends.
-
- Each generated cc_test target has a tag indicating which backend the test is
- for. This tag is of the form "xla_${BACKEND}" (eg, "xla_cpu"). These
- tags can be used to gather tests for a particular backend into a test_suite.
-
- Examples:
-
- # Generates the targets: foo_test_cpu and foo_test_gpu.
- xla_test(
- name = "foo_test",
- srcs = ["foo_test.cc"],
- backends = ["cpu", "gpu"],
- deps = [...],
- )
-
- # Generates the targets: bar_test_cpu and bar_test_gpu. bar_test_cpu
- # includes the additional arg "--special_cpu_flag".
- xla_test(
- name = "bar_test",
- srcs = ["bar_test.cc"],
- backends = ["cpu", "gpu"],
- backend_args = {"cpu": ["--special_cpu_flag"]}
- deps = [...],
- )
-
- The build rule defines the preprocessor macro XLA_TEST_BACKEND_${BACKEND}
- to the value 1 where ${BACKEND} is the uppercase name of the backend.
-
- Args:
- name: Name of the target.
- srcs: Sources for the target.
- deps: Dependencies of the target.
- xla_test_library_deps: If set, the generated test targets will depend on the
- respective cc_libraries generated by the xla_test_library rule.
- backends: A list of backends to generate tests for. Supported values: "cpu",
- "gpu". If this list is empty, the test will be generated for all supported
- backends.
- blacklisted_backends: A list of backends to NOT generate tests for.
- args: Test arguments for the target.
- tags: Tags for the target.
- copts: Additional copts to pass to the build.
- data: Additional data to pass to the build.
- backend_tags: A dict mapping backend name to list of additional tags to
- use for that target.
- backend_args: A dict mapping backend name to list of additional args to
- use for that target.
- **kwargs: Additional keyword arguments to pass to native.cc_test.
- """
- test_names = []
- if not backends:
- backends = all_backends
-
- backends = [
- backend
- for backend in backends
- if backend not in blacklisted_backends
- ]
+ fail("Unknown backend %s" % backend)
native.cc_library(
- name = "%s_lib" % name,
+ name = "%s_%s" % (name, backend),
srcs = srcs,
- copts = copts,
testonly = True,
- deps = deps + ["//tensorflow/compiler/xla/tests:test_macros_header"],
+ hdrs = hdrs,
+ copts = ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()]
+ + this_backend_copts,
+ deps = deps + backend_deps,
)
- for backend in filter_backends(backends):
- test_name = "%s_%s" % (name, backend)
- this_backend_tags = ["xla_%s" % backend]
- this_backend_copts = []
- this_backend_args = backend_args.get(backend, [])
- this_backend_data = []
- if backend == "cpu":
- backend_deps = ["//tensorflow/compiler/xla/service:cpu_plugin"]
- backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_cpu"]
- elif backend == "gpu":
- backend_deps = ["//tensorflow/compiler/xla/service:gpu_plugin"]
- backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_gpu"]
- this_backend_tags += ["requires-gpu-sm35"]
- elif backend in plugins:
- backend_deps = []
- backend_deps += plugins[backend]["deps"]
- this_backend_copts += plugins[backend]["copts"]
- this_backend_tags += plugins[backend]["tags"]
- this_backend_args += plugins[backend]["args"]
- this_backend_data += plugins[backend]["data"]
- else:
- fail("Unknown backend %s" % backend)
-
- if xla_test_library_deps:
- for lib_dep in xla_test_library_deps:
- backend_deps += ["%s_%s" % (lib_dep, backend)]
-
- tf_cc_test(
- name = test_name,
- srcs = srcs,
- tags = tags + backend_tags.get(backend, []) + this_backend_tags,
- extra_copts = copts + ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()] +
- this_backend_copts,
- args = args + this_backend_args,
- deps = deps + backend_deps,
- data = data + this_backend_data,
- **kwargs
- )
-
- test_names.append(test_name)
-
- native.test_suite(name = name, tests = test_names)
-
-def xla_test_library(
- name,
- srcs,
- hdrs = [],
- deps = [],
- backends = []):
- """Generates cc_library targets for the given XLA backends.
-
- This rule forces the sources to be compiled for each backend so that the
- backend specific macros could expand correctly. It's useful when test targets
- in different directories referring to the same sources but test with different
- arguments.
-
- Examples:
-
- # Generates the targets: foo_test_library_cpu and foo_test_gpu.
- xla_test_library(
- name = "foo_test_library",
- srcs = ["foo_test.cc"],
- backends = ["cpu", "gpu"],
- deps = [...],
- )
- # Then use the xla_test rule to generate test targets:
- xla_test(
- name = "foo_test",
- srcs = [],
- backends = ["cpu", "gpu"],
- deps = [...],
- xla_test_library_deps = [":foo_test_library"],
- )
-
- Args:
- name: Name of the target.
- srcs: Sources for the target.
- hdrs: Headers for the target.
- deps: Dependencies of the target.
- backends: A list of backends to generate libraries for.
- Supported values: "cpu", "gpu". If this list is empty, the
- library will be generated for all supported backends.
- """
-
- if not backends:
- backends = all_backends
-
- for backend in filter_backends(backends):
- this_backend_copts = []
- if backend in ["cpu", "gpu"]:
- backend_deps = ["//tensorflow/compiler/xla/tests:test_macros_%s" % backend]
- elif backend in plugins:
- backend_deps = plugins[backend]["deps"]
- this_backend_copts += plugins[backend]["copts"]
- else:
- fail("Unknown backend %s" % backend)
-
- native.cc_library(
- name = "%s_%s" % (name, backend),
- srcs = srcs,
- testonly = True,
- hdrs = hdrs,
- copts = ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()] +
- this_backend_copts,
- deps = deps + backend_deps,
- )
-
-def generate_backend_suites(backends = []):
- if not backends:
- backends = all_backends
- for backend in filter_backends(backends):
- native.test_suite(
- name = "%s_tests" % backend,
- tags = ["xla_%s" % backend],
- )
-
-def generate_backend_test_macros(backends = []):
- if not backends:
- backends = all_backends
- for backend in filter_backends(backends):
- manifest = ""
- if backend in plugins:
- manifest = plugins[backend]["disabled_manifest"]
-
- native.cc_library(
- name = "test_macros_%s" % backend,
- testonly = True,
- srcs = ["test_macros.cc"],
- hdrs = ["test_macros.h"],
- copts = [
- "-DXLA_PLATFORM=\\\"%s\\\"" % backend.upper(),
- "-DXLA_DISABLED_MANIFEST=\\\"%s\\\"" % manifest,
- ],
- deps = [
- "//tensorflow/compiler/xla:types",
- "//tensorflow/core:lib",
- "//tensorflow/core:regexp_internal",
- "//tensorflow/core:test",
- ],
- )
+
+def generate_backend_suites(backends=[]):
+ if not backends:
+ backends = all_backends
+ for backend in filter_backends(backends):
+ native.test_suite(name="%s_tests" % backend,
+ tags = ["xla_%s" % backend])
+
+
+def generate_backend_test_macros(backends=[]):
+ if not backends:
+ backends = all_backends
+ for backend in filter_backends(backends):
+ manifest = ""
+ if backend in plugins:
+ manifest = plugins[backend]["disabled_manifest"]
+
+ native.cc_library(
+ name="test_macros_%s" % backend,
+ testonly = True,
+ srcs = ["test_macros.cc"],
+ hdrs = ["test_macros.h"],
+ copts = [
+ "-DXLA_PLATFORM=\\\"%s\\\"" % backend.upper(),
+ "-DXLA_DISABLED_MANIFEST=\\\"%s\\\"" % manifest,
+ ],
+ deps = [
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:regexp_internal",
+ "//tensorflow/core:test",
+ ])
diff --git a/tensorflow/compiler/xla/tests/plugin.bzl b/tensorflow/compiler/xla/tests/plugin.bzl
index 107869fe59..8a5d91363b 100644
--- a/tensorflow/compiler/xla/tests/plugin.bzl
+++ b/tensorflow/compiler/xla/tests/plugin.bzl
@@ -33,3 +33,4 @@
# }
plugins = {}
+
diff --git a/tensorflow/compiler/xla/xla.bzl b/tensorflow/compiler/xla/xla.bzl
index cd64b4289c..1439f1bcc5 100644
--- a/tensorflow/compiler/xla/xla.bzl
+++ b/tensorflow/compiler/xla/xla.bzl
@@ -1,35 +1,30 @@
"""Wrapper around cc_proto_library used inside the XLA codebase."""
-load(
- "//tensorflow/core:platform/default/build_config.bzl",
- "cc_proto_library",
-)
-load(
- "//tensorflow/core:platform/default/build_config_root.bzl",
- "if_static",
-)
+load("//tensorflow/core:platform/default/build_config.bzl",
+ "cc_proto_library")
+load("//tensorflow/core:platform/default/build_config_root.bzl",
+ "if_static")
# xla_proto_library() is a convenience wrapper around cc_proto_library.
-def xla_proto_library(name, srcs = [], deps = [], visibility = None, testonly = 0, **kwargs):
- if kwargs.get("use_grpc_plugin"):
- kwargs["use_grpc_namespace"] = True
- cc_proto_library(
- name = name,
- srcs = srcs,
- deps = deps,
- cc_libs = if_static(
- ["@protobuf_archive//:protobuf"],
- otherwise = ["@protobuf_archive//:protobuf_headers"],
- ),
- protoc = "@protobuf_archive//:protoc",
- testonly = testonly,
- visibility = visibility,
- **kwargs
- )
+def xla_proto_library(name, srcs=[], deps=[], visibility=None, testonly=0, **kwargs):
+ if kwargs.get('use_grpc_plugin'):
+ kwargs['use_grpc_namespace'] = True
+ cc_proto_library(name=name,
+ srcs=srcs,
+ deps=deps,
+ cc_libs = if_static(
+ ["@protobuf_archive//:protobuf"],
+ otherwise=["@protobuf_archive//:protobuf_headers"],
+ ),
+ protoc="@protobuf_archive//:protoc",
+ testonly=testonly,
+ visibility=visibility,
+ **kwargs)
def xla_py_grpc_library(**kwargs):
- # Note: we don't currently define any special targets for Python GRPC in OSS.
- _ignore = kwargs
- pass
+ # Note: we don't currently define any special targets for Python GRPC in OSS.
+ _ignore = kwargs
+ pass
+
ORC_JIT_MEMORY_MAPPER_TARGETS = []
diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl
index 6a0ec896a4..5543acc1f5 100644
--- a/tensorflow/contrib/lite/build_def.bzl
+++ b/tensorflow/contrib/lite/build_def.bzl
@@ -1,196 +1,193 @@
"""Generate Flatbuffer binary from json."""
-
load(
"//tensorflow:tensorflow.bzl",
"tf_cc_test",
)
def tflite_copts():
- """Defines compile time flags."""
- copts = [
- "-DFARMHASH_NO_CXX_STRING",
- ] + select({
- str(Label("//tensorflow:android_arm64")): [
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_arm")): [
- "-mfpu=neon",
- "-mfloat-abi=softfp",
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_x86")): [
- "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
- ],
- str(Label("//tensorflow:ios_x86_64")): [
- "-msse4.1",
- ],
- "//conditions:default": [],
- }) + select({
- str(Label("//tensorflow:with_default_optimizations")): [],
- "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
- })
+ """Defines compile time flags."""
+ copts = [
+ "-DFARMHASH_NO_CXX_STRING",
+ ] + select({
+ str(Label("//tensorflow:android_arm64")): [
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_arm")): [
+ "-mfpu=neon",
+ "-mfloat-abi=softfp",
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_x86")): [
+ "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
+ ],
+ str(Label("//tensorflow:ios_x86_64")): [
+ "-msse4.1",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ str(Label("//tensorflow:with_default_optimizations")): [],
+ "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
+ })
- return copts
+ return copts
LINKER_SCRIPT = "//tensorflow/contrib/lite/java/src/main/native:version_script.lds"
def tflite_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary.
+ """Defines linker flags to reduce size of TFLite binary.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
- "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
+ "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_jni_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary with JNI.
+ """Defines linker flags to reduce size of TFLite binary with JNI.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_linkopts():
- """Defines linker flags to reduce size of TFLite binary."""
- return tflite_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary."""
+ return tflite_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ ],
+ "//conditions:default": [],
+ })
def tflite_jni_linkopts():
- """Defines linker flags to reduce size of TFLite binary with JNI."""
- return tflite_jni_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary with JNI."""
+ return tflite_jni_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
+ ],
+ "//conditions:default": [],
+ })
-def tflite_jni_binary(
- name,
- copts = tflite_copts(),
- linkopts = tflite_jni_linkopts(),
- linkscript = LINKER_SCRIPT,
- linkshared = 1,
- linkstatic = 1,
- deps = []):
- """Builds a jni binary for TFLite."""
- linkopts = linkopts + [
- "-Wl,--version-script", # Export only jni functions & classes.
- "$(location {})".format(linkscript),
- ]
- native.cc_binary(
- name = name,
- copts = copts,
- linkshared = linkshared,
- linkstatic = linkstatic,
- deps = deps + [linkscript],
- linkopts = linkopts,
- )
+def tflite_jni_binary(name,
+ copts=tflite_copts(),
+ linkopts=tflite_jni_linkopts(),
+ linkscript=LINKER_SCRIPT,
+ linkshared=1,
+ linkstatic=1,
+ deps=[]):
+ """Builds a jni binary for TFLite."""
+ linkopts = linkopts + [
+ "-Wl,--version-script", # Export only jni functions & classes.
+ "$(location {})".format(linkscript),
+ ]
+ native.cc_binary(
+ name=name,
+ copts=copts,
+ linkshared=linkshared,
+ linkstatic=linkstatic,
+ deps= deps + [linkscript],
+ linkopts=linkopts)
def tf_to_tflite(name, src, options, out):
- """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
+ """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input graphdef file.
- options: options passed to TOCO.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input graphdef file.
+ options: options passed to TOCO.
+ out: name of the output flatbuffer file.
+ """
- toco_cmdline = " ".join([
- "//tensorflow/contrib/lite/toco:toco",
- "--input_format=TENSORFLOW_GRAPHDEF",
- "--output_format=TFLITE",
- ("--input_file=$(location %s)" % src),
- ("--output_file=$(location %s)" % out),
- ] + options)
- native.genrule(
- name = name,
- srcs = [src],
- outs = [out],
- cmd = toco_cmdline,
- tools = ["//tensorflow/contrib/lite/toco:toco"],
- )
+ toco_cmdline = " ".join([
+ "//tensorflow/contrib/lite/toco:toco",
+ "--input_format=TENSORFLOW_GRAPHDEF",
+ "--output_format=TFLITE",
+ ("--input_file=$(location %s)" % src),
+ ("--output_file=$(location %s)" % out),
+ ] + options )
+ native.genrule(
+ name = name,
+ srcs=[src],
+ outs=[out],
+ cmd = toco_cmdline,
+ tools= ["//tensorflow/contrib/lite/toco:toco"],
+ )
def tflite_to_json(name, src, out):
- """Convert a TF Lite flatbuffer to JSON.
+ """Convert a TF Lite flatbuffer to JSON.
- Args:
- name: Name of rule.
- src: name of the input flatbuffer file.
- out: name of the output JSON file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input flatbuffer file.
+ out: name of the output JSON file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema.fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
- "$(location %s) --raw-binary --strict-json -t" +
- " -o /tmp $(location %s) -- $${TMP}.bin &&" +
- "cp $${TMP}.json $(location %s)") %
- (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema.fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
+ "$(location %s) --raw-binary --strict-json -t" +
+ " -o /tmp $(location %s) -- $${TMP}.bin &&" +
+ "cp $${TMP}.json $(location %s)")
+ % (src, flatc, schema, out),
+ tools = [flatc],
+ )
def json_to_tflite(name, src, out):
- """Convert a JSON file to TF Lite's flatbuffer.
+ """Convert a JSON file to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input JSON file.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input JSON file.
+ out: name of the output flatbuffer file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema_fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
- "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
- " -o /tmp $(location %s) $${TMP}.json &&" +
- "cp $${TMP}.bin $(location %s)") %
- (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema_fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
+ "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
+ " -o /tmp $(location %s) $${TMP}.json &&" +
+ "cp $${TMP}.bin $(location %s)")
+ % (src, flatc, schema, out),
+ tools = [flatc],
+ )
# This is the master list of generated examples that will be made into tests. A
# function called make_XXX_tests() must also appear in generate_examples.py.
@@ -265,58 +262,58 @@ def generated_test_models():
]
def gen_zip_test(name, test_name, **kwargs):
- """Generate a zipped-example test and its dependent zip files.
+ """Generate a zipped-example test and its dependent zip files.
- Args:
- name: Resulting cc_test target name
- test_name: Test targets this model. Comes from the list above.
- **kwargs: tf_cc_test kwargs.
- """
- gen_zipped_test_file(
- name = "zip_%s" % test_name,
- file = "%s.zip" % test_name,
- )
- tf_cc_test(name, **kwargs)
+ Args:
+ name: Resulting cc_test target name
+ test_name: Test targets this model. Comes from the list above.
+ **kwargs: tf_cc_test kwargs.
+ """
+ gen_zipped_test_file(
+ name = "zip_%s" % test_name,
+ file = "%s.zip" % test_name,
+ )
+ tf_cc_test(name, **kwargs)
def gen_zipped_test_file(name, file):
- """Generate a zip file of tests by using :generate_examples.
+ """Generate a zip file of tests by using :generate_examples.
- Args:
- name: Name of output. We will produce "`file`.files" as a target.
- file: The name of one of the generated_examples targets, e.g. "transpose"
- """
- toco = "//tensorflow/contrib/lite/toco:toco"
- native.genrule(
- name = file + ".files",
- cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco +
- " --zip_to_output " + file + " $(@D)"),
- outs = [file],
- tools = [
- ":generate_examples",
- toco,
- ],
- )
+ Args:
+ name: Name of output. We will produce "`file`.files" as a target.
+ file: The name of one of the generated_examples targets, e.g. "transpose"
+ """
+ toco = "//tensorflow/contrib/lite/toco:toco"
+ native.genrule(
+ name = file + ".files",
+ cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco
+ + " --zip_to_output " + file + " $(@D)"),
+ outs = [file],
+ tools = [
+ ":generate_examples",
+ toco,
+ ],
+ )
- native.filegroup(
- name = name,
- srcs = [file],
- )
+ native.filegroup(
+ name = name,
+ srcs = [file],
+ )
def gen_selected_ops(name, model):
- """Generate the library that includes only used ops.
+ """Generate the library that includes only used ops.
- Args:
- name: Name of the generated library.
- model: TFLite model to interpret.
- """
- out = name + "_registration.cc"
- tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
- tflite_path = "//tensorflow/contrib/lite"
- native.genrule(
- name = name,
- srcs = [model],
- outs = [out],
- cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s") %
- (tool, model, out, tflite_path[2:]),
- tools = [tool],
- )
+ Args:
+ name: Name of the generated library.
+ model: TFLite model to interpret.
+ """
+ out = name + "_registration.cc"
+ tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
+ tflite_path = "//tensorflow/contrib/lite"
+ native.genrule(
+ name = name,
+ srcs = [model],
+ outs = [out],
+ cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s")
+ % (tool, model, out, tflite_path[2:]),
+ tools = [tool],
+ )
diff --git a/tensorflow/contrib/lite/java/aar_with_jni.bzl b/tensorflow/contrib/lite/java/aar_with_jni.bzl
index 88e1a10f98..db837cf29e 100644
--- a/tensorflow/contrib/lite/java/aar_with_jni.bzl
+++ b/tensorflow/contrib/lite/java/aar_with_jni.bzl
@@ -3,12 +3,12 @@
load("@build_bazel_rules_android//android:rules.bzl", "android_binary")
def aar_with_jni(name, android_library):
- # Generate dummy AndroidManifest.xml for dummy apk usage
- # (dummy apk is generated by <name>_dummy_app_for_so target below)
- native.genrule(
- name = name + "_binary_manifest_generator",
- outs = [name + "_generated_AndroidManifest.xml"],
- cmd = """
+ # Generate dummy AndroidManifest.xml for dummy apk usage
+ # (dummy apk is generated by <name>_dummy_app_for_so target below)
+ native.genrule(
+ name = name + "_binary_manifest_generator",
+ outs = [name + "_generated_AndroidManifest.xml"],
+ cmd = """
cat > $(OUTS) <<EOF
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
@@ -17,27 +17,27 @@ cat > $(OUTS) <<EOF
</manifest>
EOF
""",
- )
+ )
- # Generate dummy apk including .so files and later we extract out
- # .so files and throw away the apk.
- android_binary(
- name = name + "_dummy_app_for_so",
- manifest = name + "_generated_AndroidManifest.xml",
- custom_package = "dummy.package.for.so",
- deps = [android_library],
- # In some platforms we don't have an Android SDK/NDK and this target
- # can't be built. We need to prevent the build system from trying to
- # use the target in that case.
- tags = ["manual"],
- )
+ # Generate dummy apk including .so files and later we extract out
+ # .so files and throw away the apk.
+ android_binary(
+ name = name + "_dummy_app_for_so",
+ manifest = name + "_generated_AndroidManifest.xml",
+ custom_package = "dummy.package.for.so",
+ deps = [android_library],
+ # In some platforms we don't have an Android SDK/NDK and this target
+ # can't be built. We need to prevent the build system from trying to
+ # use the target in that case.
+ tags = ["manual"],
+ )
- native.genrule(
- name = name,
- srcs = [android_library + ".aar", name + "_dummy_app_for_so_unsigned.apk"],
- outs = [name + ".aar"],
- tags = ["manual"],
- cmd = """
+ native.genrule(
+ name = name,
+ srcs = [android_library + ".aar", name + "_dummy_app_for_so_unsigned.apk"],
+ outs = [name + ".aar"],
+ tags = ["manual"],
+ cmd = """
cp $(location {}.aar) $(location :{}.aar)
chmod +w $(location :{}.aar)
origdir=$$PWD
@@ -46,4 +46,4 @@ unzip $$origdir/$(location :{}_dummy_app_for_so_unsigned.apk) "lib/*"
cp -r lib jni
zip -r $$origdir/$(location :{}.aar) jni/*/*.so
""".format(android_library, name, name, name, name),
- )
+ )
diff --git a/tensorflow/contrib/lite/special_rules.bzl b/tensorflow/contrib/lite/special_rules.bzl
index e10af3d240..54083c4918 100644
--- a/tensorflow/contrib/lite/special_rules.bzl
+++ b/tensorflow/contrib/lite/special_rules.bzl
@@ -1,6 +1,6 @@
"""External versions of build rules that differ outside of Google."""
def tflite_portable_test_suite(**kwargs):
- """This is a no-op outside of Google."""
- _ignore = [kwargs]
- pass
+ """This is a no-op outside of Google."""
+ _ignore = [kwargs]
+ pass
diff --git a/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl b/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl
index 1e4db57253..f425601691 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl
+++ b/tensorflow/contrib/proto/python/kernel_tests/build_defs.bzl
@@ -9,87 +9,81 @@ load("//tensorflow:tensorflow.bzl", "register_extension_info")
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
def _test_name(test, path):
- return "%s_%s_test" % (test, path.split("/")[-1].split(".")[0])
+ return "%s_%s_test" % (test, path.split("/")[-1].split(".")[0])
def decode_proto_test_suite(name, examples):
- """Build the decode_proto py_test for each test filename."""
- for test_filename in examples:
- tf_py_test(
- name = _test_name("decode_proto", test_filename),
- srcs = ["decode_proto_op_test.py"],
- size = "small",
- data = [test_filename] + if_static(
- [],
- otherwise = [":libtestexample.so"],
- ),
- main = "decode_proto_op_test.py",
- args = [
- "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
- ],
- additional_deps = [
- ":py_test_deps",
- "//third_party/py/numpy",
- "//tensorflow/contrib/proto:proto",
- "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
- ],
- tags = [
- "no_pip", # TODO(b/78026780)
- "no_windows", # TODO(b/78028010)
- ],
- )
- native.test_suite(
- name = name,
- tests = [
- ":" + _test_name("decode_proto", test_filename)
- for test_filename in examples
+ """Build the decode_proto py_test for each test filename."""
+ for test_filename in examples:
+ tf_py_test(
+ name = _test_name("decode_proto", test_filename),
+ srcs = ["decode_proto_op_test.py"],
+ size = "small",
+ data = [test_filename] + if_static(
+ [],
+ otherwise = [":libtestexample.so"],
+ ),
+ main = "decode_proto_op_test.py",
+ args = [
+ "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
+ ],
+ additional_deps = [
+ ":py_test_deps",
+ "//third_party/py/numpy",
+ "//tensorflow/contrib/proto:proto",
+ "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
+ ],
+ tags = [
+ "no_pip", # TODO(b/78026780)
+ "no_windows", # TODO(b/78028010)
],
)
+ native.test_suite(
+ name = name,
+ tests = [":" + _test_name("decode_proto", test_filename)
+ for test_filename in examples],
+ )
def encode_proto_test_suite(name, examples):
- """Build the encode_proto py_test for each test filename."""
- for test_filename in examples:
- tf_py_test(
- name = _test_name("encode_proto", test_filename),
- srcs = ["encode_proto_op_test.py"],
- size = "small",
- data = [test_filename] + if_static(
- [],
- otherwise = [":libtestexample.so"],
- ),
- main = "encode_proto_op_test.py",
- args = [
- "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
- ],
- additional_deps = [
- ":py_test_deps",
- "//third_party/py/numpy",
- "//tensorflow/contrib/proto:proto",
- "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
- "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
- ],
- tags = [
- "no_pip", # TODO(b/78026780)
- "no_windows", # TODO(b/78028010)
- ],
- )
- native.test_suite(
- name = name,
- tests = [
- ":" + _test_name("encode_proto", test_filename)
- for test_filename in examples
+ """Build the encode_proto py_test for each test filename."""
+ for test_filename in examples:
+ tf_py_test(
+ name = _test_name("encode_proto", test_filename),
+ srcs = ["encode_proto_op_test.py"],
+ size = "small",
+ data = [test_filename] + if_static(
+ [],
+ otherwise = [":libtestexample.so"],
+ ),
+ main = "encode_proto_op_test.py",
+ args = [
+ "--message_text_file=\"%s/%s\"" % (native.package_name(), test_filename),
+ ],
+ additional_deps = [
+ ":py_test_deps",
+ "//third_party/py/numpy",
+ "//tensorflow/contrib/proto:proto",
+ "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
+ "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
+ ],
+ tags = [
+ "no_pip", # TODO(b/78026780)
+ "no_windows", # TODO(b/78028010)
],
)
+ native.test_suite(
+ name = name,
+ tests = [":" + _test_name("encode_proto", test_filename)
+ for test_filename in examples],
+ )
register_extension_info(
extension_name = "decode_proto_test_suite",
label_regex_map = {
"deps": "deps:decode_example_.*",
- },
-)
+ })
register_extension_info(
extension_name = "encode_proto_test_suite",
label_regex_map = {
"deps": "deps:encode_example_.*",
- },
-)
+ })
diff --git a/tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl b/tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl
index 806db32122..f752b59568 100644
--- a/tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl
+++ b/tensorflow/core/kernels/fuzzing/tf_ops_fuzz_target_lib.bzl
@@ -1,13 +1,13 @@
"""Fuzzing template for TensorFlow ops."""
def tf_ops_fuzz_target_lib(name):
- native.cc_library(
- name = name + "_fuzz_lib",
- srcs = [name + "_fuzz.cc"],
- deps = [
- "//tensorflow/core/kernels/fuzzing:fuzz_session",
- "//tensorflow/cc:cc_ops",
- ],
- tags = ["no_windows"],
- alwayslink = 1,
- )
+ native.cc_library(
+ name = name + "_fuzz_lib",
+ srcs = [name + "_fuzz.cc"],
+ deps = [
+ "//tensorflow/core/kernels/fuzzing:fuzz_session",
+ "//tensorflow/cc:cc_ops",
+ ],
+ tags = ["no_windows"],
+ alwayslink = 1,
+ )
diff --git a/tensorflow/core/platform/default/build_config.bzl b/tensorflow/core/platform/default/build_config.bzl
index d915210e9d..66ccd81e41 100644
--- a/tensorflow/core/platform/default/build_config.bzl
+++ b/tensorflow/core/platform/default/build_config.bzl
@@ -13,224 +13,219 @@ load(
# Appends a suffix to a list of deps.
def tf_deps(deps, suffix):
- tf_deps = []
+ tf_deps = []
- # If the package name is in shorthand form (ie: does not contain a ':'),
- # expand it to the full name.
- for dep in deps:
- tf_dep = dep
+ # If the package name is in shorthand form (ie: does not contain a ':'),
+ # expand it to the full name.
+ for dep in deps:
+ tf_dep = dep
- if not ":" in dep:
- dep_pieces = dep.split("/")
- tf_dep += ":" + dep_pieces[len(dep_pieces) - 1]
+ if not ":" in dep:
+ dep_pieces = dep.split("/")
+ tf_dep += ":" + dep_pieces[len(dep_pieces) - 1]
- tf_deps += [tf_dep + suffix]
+ tf_deps += [tf_dep + suffix]
- return tf_deps
+ return tf_deps
# Modified from @cython//:Tools/rules.bzl
def pyx_library(
- name,
- deps = [],
- py_deps = [],
- srcs = [],
- **kwargs):
- """Compiles a group of .pyx / .pxd / .py files.
-
- First runs Cython to create .cpp files for each input .pyx or .py + .pxd
- pair. Then builds a shared object for each, passing "deps" to each cc_binary
- rule (includes Python headers by default). Finally, creates a py_library rule
- with the shared objects and any pure Python "srcs", with py_deps as its
- dependencies; the shared objects can be imported like normal Python files.
-
- Args:
- name: Name for the rule.
- deps: C/C++ dependencies of the Cython (e.g. Numpy headers).
- py_deps: Pure Python dependencies of the final library.
- srcs: .py, .pyx, or .pxd files to either compile or pass through.
- **kwargs: Extra keyword arguments passed to the py_library.
- """
-
- # First filter out files that should be run compiled vs. passed through.
- py_srcs = []
- pyx_srcs = []
- pxd_srcs = []
- for src in srcs:
- if src.endswith(".pyx") or (src.endswith(".py") and
- src[:-3] + ".pxd" in srcs):
- pyx_srcs.append(src)
- elif src.endswith(".py"):
- py_srcs.append(src)
- else:
- pxd_srcs.append(src)
- if src.endswith("__init__.py"):
- pxd_srcs.append(src)
-
- # Invoke cython to produce the shared object libraries.
- for filename in pyx_srcs:
- native.genrule(
- name = filename + "_cython_translation",
- srcs = [filename],
- outs = [filename.split(".")[0] + ".cpp"],
- # Optionally use PYTHON_BIN_PATH on Linux platforms so that python 3
- # works. Windows has issues with cython_binary so skip PYTHON_BIN_PATH.
- cmd = "PYTHONHASHSEED=0 $(location @cython//:cython_binary) --cplus $(SRCS) --output-file $(OUTS)",
- tools = ["@cython//:cython_binary"] + pxd_srcs,
- )
-
- shared_objects = []
- for src in pyx_srcs:
- stem = src.split(".")[0]
- shared_object_name = stem + ".so"
- native.cc_binary(
- name = shared_object_name,
- srcs = [stem + ".cpp"],
- deps = deps + ["//third_party/python_runtime:headers"],
- linkshared = 1,
- )
- shared_objects.append(shared_object_name)
-
- # Now create a py_library with these shared objects as data.
- native.py_library(
- name = name,
- srcs = py_srcs,
- deps = py_deps,
- srcs_version = "PY2AND3",
- data = shared_objects,
- **kwargs
+ name,
+ deps=[],
+ py_deps=[],
+ srcs=[],
+ **kwargs):
+ """Compiles a group of .pyx / .pxd / .py files.
+
+ First runs Cython to create .cpp files for each input .pyx or .py + .pxd
+ pair. Then builds a shared object for each, passing "deps" to each cc_binary
+ rule (includes Python headers by default). Finally, creates a py_library rule
+ with the shared objects and any pure Python "srcs", with py_deps as its
+ dependencies; the shared objects can be imported like normal Python files.
+
+ Args:
+ name: Name for the rule.
+ deps: C/C++ dependencies of the Cython (e.g. Numpy headers).
+ py_deps: Pure Python dependencies of the final library.
+ srcs: .py, .pyx, or .pxd files to either compile or pass through.
+ **kwargs: Extra keyword arguments passed to the py_library.
+ """
+ # First filter out files that should be run compiled vs. passed through.
+ py_srcs = []
+ pyx_srcs = []
+ pxd_srcs = []
+ for src in srcs:
+ if src.endswith(".pyx") or (src.endswith(".py")
+ and src[:-3] + ".pxd" in srcs):
+ pyx_srcs.append(src)
+ elif src.endswith(".py"):
+ py_srcs.append(src)
+ else:
+ pxd_srcs.append(src)
+ if src.endswith("__init__.py"):
+ pxd_srcs.append(src)
+
+ # Invoke cython to produce the shared object libraries.
+ for filename in pyx_srcs:
+ native.genrule(
+ name = filename + "_cython_translation",
+ srcs = [filename],
+ outs = [filename.split(".")[0] + ".cpp"],
+ # Optionally use PYTHON_BIN_PATH on Linux platforms so that python 3
+ # works. Windows has issues with cython_binary so skip PYTHON_BIN_PATH.
+ cmd = "PYTHONHASHSEED=0 $(location @cython//:cython_binary) --cplus $(SRCS) --output-file $(OUTS)",
+ tools = ["@cython//:cython_binary"] + pxd_srcs,
)
-def _proto_cc_hdrs(srcs, use_grpc_plugin = False):
- ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
- if use_grpc_plugin:
- ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
- return ret
-
-def _proto_cc_srcs(srcs, use_grpc_plugin = False):
- ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
- if use_grpc_plugin:
- ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
- return ret
-
-def _proto_py_outs(srcs, use_grpc_plugin = False):
- ret = [s[:-len(".proto")] + "_pb2.py" for s in srcs]
- if use_grpc_plugin:
- ret += [s[:-len(".proto")] + "_pb2_grpc.py" for s in srcs]
- return ret
+ shared_objects = []
+ for src in pyx_srcs:
+ stem = src.split(".")[0]
+ shared_object_name = stem + ".so"
+ native.cc_binary(
+ name=shared_object_name,
+ srcs=[stem + ".cpp"],
+ deps=deps + ["//third_party/python_runtime:headers"],
+ linkshared = 1,
+ )
+ shared_objects.append(shared_object_name)
+
+ # Now create a py_library with these shared objects as data.
+ native.py_library(
+ name=name,
+ srcs=py_srcs,
+ deps=py_deps,
+ srcs_version = "PY2AND3",
+ data=shared_objects,
+ **kwargs
+ )
+
+def _proto_cc_hdrs(srcs, use_grpc_plugin=False):
+ ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
+ if use_grpc_plugin:
+ ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
+ return ret
+
+def _proto_cc_srcs(srcs, use_grpc_plugin=False):
+ ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
+ if use_grpc_plugin:
+ ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
+ return ret
+
+def _proto_py_outs(srcs, use_grpc_plugin=False):
+ ret = [s[:-len(".proto")] + "_pb2.py" for s in srcs]
+ if use_grpc_plugin:
+ ret += [s[:-len(".proto")] + "_pb2_grpc.py" for s in srcs]
+ return ret
# Re-defined protocol buffer rule to allow building "header only" protocol
# buffers, to avoid duplicate registrations. Also allows non-iterable cc_libs
# containing select() statements.
def cc_proto_library(
- name,
- srcs = [],
- deps = [],
- cc_libs = [],
- include = None,
- protoc = "@protobuf_archive//:protoc",
- internal_bootstrap_hack = False,
- use_grpc_plugin = False,
- use_grpc_namespace = False,
- default_header = False,
- **kargs):
- """Bazel rule to create a C++ protobuf library from proto source files.
-
- Args:
- name: the name of the cc_proto_library.
- srcs: the .proto files of the cc_proto_library.
- deps: a list of dependency labels; must be cc_proto_library.
- cc_libs: a list of other cc_library targets depended by the generated
- cc_library.
- include: a string indicating the include path of the .proto files.
- protoc: the label of the protocol compiler to generate the sources.
- internal_bootstrap_hack: a flag indicate the cc_proto_library is used only
- for bootstraping. When it is set to True, no files will be generated.
- The rule will simply be a provider for .proto files, so that other
- cc_proto_library can depend on it.
- use_grpc_plugin: a flag to indicate whether to call the grpc C++ plugin
- when processing the proto files.
- default_header: Controls the naming of generated rules. If True, the `name`
- rule will be header-only, and an _impl rule will contain the
- implementation. Otherwise the header-only rule (name + "_headers_only")
- must be referred to explicitly.
- **kargs: other keyword arguments that are passed to cc_library.
- """
-
- includes = []
- if include != None:
- includes = [include]
-
- if internal_bootstrap_hack:
- # For pre-checked-in generated files, we add the internal_bootstrap_hack
- # which will skip the codegen action.
- proto_gen(
- name = name + "_genproto",
- srcs = srcs,
- deps = [s + "_genproto" for s in deps],
- includes = includes,
- protoc = protoc,
- visibility = ["//visibility:public"],
- )
-
- # An empty cc_library to make rule dependency consistent.
- native.cc_library(
- name = name,
- **kargs
- )
- return
-
- grpc_cpp_plugin = None
- plugin_options = []
- if use_grpc_plugin:
- grpc_cpp_plugin = "//external:grpc_cpp_plugin"
- if use_grpc_namespace:
- plugin_options = ["services_namespace=grpc"]
-
- gen_srcs = _proto_cc_srcs(srcs, use_grpc_plugin)
- gen_hdrs = _proto_cc_hdrs(srcs, use_grpc_plugin)
- outs = gen_srcs + gen_hdrs
-
+ name,
+ srcs=[],
+ deps=[],
+ cc_libs=[],
+ include=None,
+ protoc="@protobuf_archive//:protoc",
+ internal_bootstrap_hack=False,
+ use_grpc_plugin=False,
+ use_grpc_namespace=False,
+ default_header=False,
+ **kargs):
+ """Bazel rule to create a C++ protobuf library from proto source files.
+
+ Args:
+ name: the name of the cc_proto_library.
+ srcs: the .proto files of the cc_proto_library.
+ deps: a list of dependency labels; must be cc_proto_library.
+ cc_libs: a list of other cc_library targets depended by the generated
+ cc_library.
+ include: a string indicating the include path of the .proto files.
+ protoc: the label of the protocol compiler to generate the sources.
+ internal_bootstrap_hack: a flag indicate the cc_proto_library is used only
+ for bootstraping. When it is set to True, no files will be generated.
+ The rule will simply be a provider for .proto files, so that other
+ cc_proto_library can depend on it.
+ use_grpc_plugin: a flag to indicate whether to call the grpc C++ plugin
+ when processing the proto files.
+ default_header: Controls the naming of generated rules. If True, the `name`
+ rule will be header-only, and an _impl rule will contain the
+ implementation. Otherwise the header-only rule (name + "_headers_only")
+ must be referred to explicitly.
+ **kargs: other keyword arguments that are passed to cc_library.
+ """
+
+ includes = []
+ if include != None:
+ includes = [include]
+
+ if internal_bootstrap_hack:
+ # For pre-checked-in generated files, we add the internal_bootstrap_hack
+ # which will skip the codegen action.
proto_gen(
- name = name + "_genproto",
- srcs = srcs,
- deps = [s + "_genproto" for s in deps],
- includes = includes,
- protoc = protoc,
- plugin = grpc_cpp_plugin,
- plugin_language = "grpc",
- plugin_options = plugin_options,
- gen_cc = 1,
- outs = outs,
- visibility = ["//visibility:public"],
+ name=name + "_genproto",
+ srcs=srcs,
+ deps=[s + "_genproto" for s in deps],
+ includes=includes,
+ protoc=protoc,
+ visibility=["//visibility:public"],
)
-
- if use_grpc_plugin:
- cc_libs += select({
- "//tensorflow:linux_s390x": ["//external:grpc_lib_unsecure"],
- "//conditions:default": ["//external:grpc_lib"],
- })
-
- if default_header:
- header_only_name = name
- impl_name = name + "_impl"
- else:
- header_only_name = name + "_headers_only"
- impl_name = name
-
+ # An empty cc_library to make rule dependency consistent.
native.cc_library(
- name = impl_name,
- srcs = gen_srcs,
- hdrs = gen_hdrs,
- deps = cc_libs + deps,
- includes = includes,
- **kargs
- )
- native.cc_library(
- name = header_only_name,
- deps = ["@protobuf_archive//:protobuf_headers"] + if_static([impl_name]),
- hdrs = gen_hdrs,
- **kargs
- )
+ name=name,
+ **kargs)
+ return
+
+ grpc_cpp_plugin = None
+ plugin_options = []
+ if use_grpc_plugin:
+ grpc_cpp_plugin = "//external:grpc_cpp_plugin"
+ if use_grpc_namespace:
+ plugin_options = ["services_namespace=grpc"]
+
+ gen_srcs = _proto_cc_srcs(srcs, use_grpc_plugin)
+ gen_hdrs = _proto_cc_hdrs(srcs, use_grpc_plugin)
+ outs = gen_srcs + gen_hdrs
+
+ proto_gen(
+ name=name + "_genproto",
+ srcs=srcs,
+ deps=[s + "_genproto" for s in deps],
+ includes=includes,
+ protoc=protoc,
+ plugin=grpc_cpp_plugin,
+ plugin_language="grpc",
+ plugin_options=plugin_options,
+ gen_cc=1,
+ outs=outs,
+ visibility=["//visibility:public"],
+ )
+
+ if use_grpc_plugin:
+ cc_libs += select({
+ "//tensorflow:linux_s390x": ["//external:grpc_lib_unsecure"],
+ "//conditions:default": ["//external:grpc_lib"],
+ })
+
+ if default_header:
+ header_only_name = name
+ impl_name = name + "_impl"
+ else:
+ header_only_name = name + "_headers_only"
+ impl_name = name
+
+ native.cc_library(
+ name=impl_name,
+ srcs=gen_srcs,
+ hdrs=gen_hdrs,
+ deps=cc_libs + deps,
+ includes=includes,
+ **kargs)
+ native.cc_library(
+ name=header_only_name,
+ deps=["@protobuf_archive//:protobuf_headers"] + if_static([impl_name]),
+ hdrs=gen_hdrs,
+ **kargs)
# Re-defined protocol buffer rule to bring in the change introduced in commit
# https://github.com/google/protobuf/commit/294b5758c373cbab4b72f35f4cb62dc1d8332b68
@@ -239,505 +234,477 @@ def cc_proto_library(
# to include the above commit.
def py_proto_library(
name,
- srcs = [],
- deps = [],
- py_libs = [],
- py_extra_srcs = [],
- include = None,
- default_runtime = "@protobuf_archive//:protobuf_python",
- protoc = "@protobuf_archive//:protoc",
- use_grpc_plugin = False,
+ srcs=[],
+ deps=[],
+ py_libs=[],
+ py_extra_srcs=[],
+ include=None,
+ default_runtime="@protobuf_archive//:protobuf_python",
+ protoc="@protobuf_archive//:protoc",
+ use_grpc_plugin=False,
**kargs):
- """Bazel rule to create a Python protobuf library from proto source files
-
- NOTE: the rule is only an internal workaround to generate protos. The
- interface may change and the rule may be removed when bazel has introduced
- the native rule.
-
- Args:
- name: the name of the py_proto_library.
- srcs: the .proto files of the py_proto_library.
- deps: a list of dependency labels; must be py_proto_library.
- py_libs: a list of other py_library targets depended by the generated
- py_library.
- py_extra_srcs: extra source files that will be added to the output
- py_library. This attribute is used for internal bootstrapping.
- include: a string indicating the include path of the .proto files.
- default_runtime: the implicitly default runtime which will be depended on by
- the generated py_library target.
- protoc: the label of the protocol compiler to generate the sources.
- use_grpc_plugin: a flag to indicate whether to call the Python C++ plugin
- when processing the proto files.
- **kargs: other keyword arguments that are passed to cc_library.
- """
- outs = _proto_py_outs(srcs, use_grpc_plugin)
-
- includes = []
- if include != None:
- includes = [include]
-
- grpc_python_plugin = None
- if use_grpc_plugin:
- grpc_python_plugin = "//external:grpc_python_plugin"
- # Note: Generated grpc code depends on Python grpc module. This dependency
- # is not explicitly listed in py_libs. Instead, host system is assumed to
- # have grpc installed.
-
+ """Bazel rule to create a Python protobuf library from proto source files
+
+ NOTE: the rule is only an internal workaround to generate protos. The
+ interface may change and the rule may be removed when bazel has introduced
+ the native rule.
+
+ Args:
+ name: the name of the py_proto_library.
+ srcs: the .proto files of the py_proto_library.
+ deps: a list of dependency labels; must be py_proto_library.
+ py_libs: a list of other py_library targets depended by the generated
+ py_library.
+ py_extra_srcs: extra source files that will be added to the output
+ py_library. This attribute is used for internal bootstrapping.
+ include: a string indicating the include path of the .proto files.
+ default_runtime: the implicitly default runtime which will be depended on by
+ the generated py_library target.
+ protoc: the label of the protocol compiler to generate the sources.
+ use_grpc_plugin: a flag to indicate whether to call the Python C++ plugin
+ when processing the proto files.
+ **kargs: other keyword arguments that are passed to cc_library.
+ """
+ outs = _proto_py_outs(srcs, use_grpc_plugin)
+
+ includes = []
+ if include != None:
+ includes = [include]
+
+ grpc_python_plugin = None
+ if use_grpc_plugin:
+ grpc_python_plugin = "//external:grpc_python_plugin"
+ # Note: Generated grpc code depends on Python grpc module. This dependency
+ # is not explicitly listed in py_libs. Instead, host system is assumed to
+ # have grpc installed.
+
+ proto_gen(
+ name=name + "_genproto",
+ srcs=srcs,
+ deps=[s + "_genproto" for s in deps],
+ includes=includes,
+ protoc=protoc,
+ gen_py=1,
+ outs=outs,
+ visibility=["//visibility:public"],
+ plugin=grpc_python_plugin,
+ plugin_language="grpc"
+ )
+
+ if default_runtime and not default_runtime in py_libs + deps:
+ py_libs = py_libs + [default_runtime]
+
+ native.py_library(
+ name=name,
+ srcs=outs+py_extra_srcs,
+ deps=py_libs+deps,
+ imports=includes,
+ **kargs)
+
+def tf_proto_library_cc(name, srcs = [], has_services = None,
+ protodeps = [],
+ visibility = [], testonly = 0,
+ cc_libs = [],
+ cc_stubby_versions = None,
+ cc_grpc_version = None,
+ j2objc_api_version = 1,
+ cc_api_version = 2,
+ dart_api_version = 2,
+ java_api_version = 2, py_api_version = 2,
+ js_api_version = 2, js_codegen = "jspb",
+ default_header = False):
+ js_codegen = js_codegen # unused argument
+ js_api_version = js_api_version # unused argument
+ native.filegroup(
+ name = name + "_proto_srcs",
+ srcs = srcs + tf_deps(protodeps, "_proto_srcs"),
+ testonly = testonly,
+ visibility = visibility,
+ )
+
+ use_grpc_plugin = None
+ if cc_grpc_version:
+ use_grpc_plugin = True
+
+ cc_deps = tf_deps(protodeps, "_cc")
+ cc_name = name + "_cc"
+ if not srcs:
+ # This is a collection of sub-libraries. Build header-only and impl
+ # libraries containing all the sources.
proto_gen(
- name = name + "_genproto",
- srcs = srcs,
- deps = [s + "_genproto" for s in deps],
- includes = includes,
- protoc = protoc,
- gen_py = 1,
- outs = outs,
- visibility = ["//visibility:public"],
- plugin = grpc_python_plugin,
- plugin_language = "grpc",
- )
-
- if default_runtime and not default_runtime in py_libs + deps:
- py_libs = py_libs + [default_runtime]
-
- native.py_library(
- name = name,
- srcs = outs + py_extra_srcs,
- deps = py_libs + deps,
- imports = includes,
- **kargs
+ name = cc_name + "_genproto",
+ deps = [s + "_genproto" for s in cc_deps],
+ protoc = "@protobuf_archive//:protoc",
+ visibility=["//visibility:public"],
)
-
-def tf_proto_library_cc(
- name,
- srcs = [],
- has_services = None,
- protodeps = [],
- visibility = [],
- testonly = 0,
- cc_libs = [],
- cc_stubby_versions = None,
- cc_grpc_version = None,
- j2objc_api_version = 1,
- cc_api_version = 2,
- dart_api_version = 2,
- java_api_version = 2,
- py_api_version = 2,
- js_api_version = 2,
- js_codegen = "jspb",
- default_header = False):
- js_codegen = js_codegen # unused argument
- js_api_version = js_api_version # unused argument
- native.filegroup(
- name = name + "_proto_srcs",
- srcs = srcs + tf_deps(protodeps, "_proto_srcs"),
+ native.cc_library(
+ name = cc_name,
+ deps = cc_deps + ["@protobuf_archive//:protobuf_headers"] +
+ if_static([name + "_cc_impl"]),
testonly = testonly,
visibility = visibility,
)
+ native.cc_library(
+ name = cc_name + "_impl",
+ deps = [s + "_impl" for s in cc_deps] + ["@protobuf_archive//:cc_wkt_protos"],
+ )
- use_grpc_plugin = None
- if cc_grpc_version:
- use_grpc_plugin = True
-
- cc_deps = tf_deps(protodeps, "_cc")
- cc_name = name + "_cc"
- if not srcs:
- # This is a collection of sub-libraries. Build header-only and impl
- # libraries containing all the sources.
- proto_gen(
- name = cc_name + "_genproto",
- deps = [s + "_genproto" for s in cc_deps],
- protoc = "@protobuf_archive//:protoc",
- visibility = ["//visibility:public"],
- )
- native.cc_library(
- name = cc_name,
- deps = cc_deps + ["@protobuf_archive//:protobuf_headers"] +
- if_static([name + "_cc_impl"]),
- testonly = testonly,
- visibility = visibility,
- )
- native.cc_library(
- name = cc_name + "_impl",
- deps = [s + "_impl" for s in cc_deps] + ["@protobuf_archive//:cc_wkt_protos"],
- )
-
- return
-
- cc_proto_library(
- name = cc_name,
- srcs = srcs,
- deps = cc_deps + ["@protobuf_archive//:cc_wkt_protos"],
- cc_libs = cc_libs + if_static(
- ["@protobuf_archive//:protobuf"],
- ["@protobuf_archive//:protobuf_headers"],
- ),
- copts = if_not_windows([
- "-Wno-unknown-warning-option",
- "-Wno-unused-but-set-variable",
- "-Wno-sign-compare",
- ]),
+ return
+
+ cc_proto_library(
+ name = cc_name,
+ srcs = srcs,
+ deps = cc_deps + ["@protobuf_archive//:cc_wkt_protos"],
+ cc_libs = cc_libs + if_static(
+ ["@protobuf_archive//:protobuf"],
+ ["@protobuf_archive//:protobuf_headers"]
+ ),
+ copts = if_not_windows([
+ "-Wno-unknown-warning-option",
+ "-Wno-unused-but-set-variable",
+ "-Wno-sign-compare",
+ ]),
+ protoc = "@protobuf_archive//:protoc",
+ use_grpc_plugin = use_grpc_plugin,
+ testonly = testonly,
+ visibility = visibility,
+ default_header = default_header,
+ )
+
+def tf_proto_library_py(name, srcs=[], protodeps=[], deps=[], visibility=[],
+ testonly=0, srcs_version="PY2AND3", use_grpc_plugin=False):
+ py_deps = tf_deps(protodeps, "_py")
+ py_name = name + "_py"
+ if not srcs:
+ # This is a collection of sub-libraries. Build header-only and impl
+ # libraries containing all the sources.
+ proto_gen(
+ name = py_name + "_genproto",
+ deps = [s + "_genproto" for s in py_deps],
protoc = "@protobuf_archive//:protoc",
- use_grpc_plugin = use_grpc_plugin,
- testonly = testonly,
- visibility = visibility,
- default_header = default_header,
+ visibility=["//visibility:public"],
)
-
-def tf_proto_library_py(
- name,
- srcs = [],
- protodeps = [],
- deps = [],
- visibility = [],
- testonly = 0,
- srcs_version = "PY2AND3",
- use_grpc_plugin = False):
- py_deps = tf_deps(protodeps, "_py")
- py_name = name + "_py"
- if not srcs:
- # This is a collection of sub-libraries. Build header-only and impl
- # libraries containing all the sources.
- proto_gen(
- name = py_name + "_genproto",
- deps = [s + "_genproto" for s in py_deps],
- protoc = "@protobuf_archive//:protoc",
- visibility = ["//visibility:public"],
- )
- native.py_library(
- name = py_name,
- deps = py_deps + ["@protobuf_archive//:protobuf_python"],
- testonly = testonly,
- visibility = visibility,
- )
- return
-
- py_proto_library(
+ native.py_library(
name = py_name,
- srcs = srcs,
- srcs_version = srcs_version,
- deps = deps + py_deps + ["@protobuf_archive//:protobuf_python"],
- protoc = "@protobuf_archive//:protoc",
- default_runtime = "@protobuf_archive//:protobuf_python",
- visibility = visibility,
+ deps = py_deps + ["@protobuf_archive//:protobuf_python"],
testonly = testonly,
- use_grpc_plugin = use_grpc_plugin,
+ visibility = visibility,
)
+ return
+
+ py_proto_library(
+ name = py_name,
+ srcs = srcs,
+ srcs_version = srcs_version,
+ deps = deps + py_deps + ["@protobuf_archive//:protobuf_python"],
+ protoc = "@protobuf_archive//:protoc",
+ default_runtime = "@protobuf_archive//:protobuf_python",
+ visibility = visibility,
+ testonly = testonly,
+ use_grpc_plugin = use_grpc_plugin,
+ )
def tf_jspb_proto_library(**kwargs):
- pass
+ pass
def tf_nano_proto_library(**kwargs):
- pass
-
-def tf_proto_library(
- name,
- srcs = [],
- has_services = None,
- protodeps = [],
- visibility = [],
- testonly = 0,
- cc_libs = [],
- cc_api_version = 2,
- cc_grpc_version = None,
- dart_api_version = 2,
- j2objc_api_version = 1,
- java_api_version = 2,
- py_api_version = 2,
- js_api_version = 2,
- js_codegen = "jspb",
- provide_cc_alias = False,
- default_header = False):
- """Make a proto library, possibly depending on other proto libraries."""
- _ignore = (js_api_version, js_codegen, provide_cc_alias)
-
- tf_proto_library_cc(
- name = name,
- srcs = srcs,
- protodeps = protodeps,
- cc_grpc_version = cc_grpc_version,
- cc_libs = cc_libs,
- testonly = testonly,
- visibility = visibility,
- default_header = default_header,
- )
-
- tf_proto_library_py(
- name = name,
- srcs = srcs,
- protodeps = protodeps,
- srcs_version = "PY2AND3",
- testonly = testonly,
- visibility = visibility,
- use_grpc_plugin = has_services,
- )
+ pass
+
+def tf_proto_library(name, srcs = [], has_services = None,
+ protodeps = [],
+ visibility = [], testonly = 0,
+ cc_libs = [],
+ cc_api_version = 2, cc_grpc_version = None,
+ dart_api_version = 2, j2objc_api_version = 1,
+ java_api_version = 2, py_api_version = 2,
+ js_api_version = 2, js_codegen = "jspb",
+ provide_cc_alias = False,
+ default_header = False):
+ """Make a proto library, possibly depending on other proto libraries."""
+ _ignore = (js_api_version, js_codegen, provide_cc_alias)
+
+ tf_proto_library_cc(
+ name = name,
+ srcs = srcs,
+ protodeps = protodeps,
+ cc_grpc_version = cc_grpc_version,
+ cc_libs = cc_libs,
+ testonly = testonly,
+ visibility = visibility,
+ default_header = default_header,
+ )
+
+ tf_proto_library_py(
+ name = name,
+ srcs = srcs,
+ protodeps = protodeps,
+ srcs_version = "PY2AND3",
+ testonly = testonly,
+ visibility = visibility,
+ use_grpc_plugin = has_services,
+ )
# A list of all files under platform matching the pattern in 'files'. In
# contrast with 'tf_platform_srcs' below, which seletive collects files that
# must be compiled in the 'default' platform, this is a list of all headers
# mentioned in the platform/* files.
def tf_platform_hdrs(files):
- return native.glob(["platform/*/" + f for f in files])
+ return native.glob(["platform/*/" + f for f in files])
def tf_platform_srcs(files):
- base_set = ["platform/default/" + f for f in files]
- windows_set = base_set + ["platform/windows/" + f for f in files]
- posix_set = base_set + ["platform/posix/" + f for f in files]
-
- # Handle cases where we must also bring the posix file in. Usually, the list
- # of files to build on windows builds is just all the stuff in the
- # windows_set. However, in some cases the implementations in 'posix/' are
- # just what is necessary and historically we choose to simply use the posix
- # file instead of making a copy in 'windows'.
- for f in files:
- if f == "error.cc":
- windows_set.append("platform/posix/" + f)
-
- return select({
- "//tensorflow:windows": native.glob(windows_set),
- "//tensorflow:windows_msvc": native.glob(windows_set),
- "//conditions:default": native.glob(posix_set),
- })
+ base_set = ["platform/default/" + f for f in files]
+ windows_set = base_set + ["platform/windows/" + f for f in files]
+ posix_set = base_set + ["platform/posix/" + f for f in files]
+
+ # Handle cases where we must also bring the posix file in. Usually, the list
+ # of files to build on windows builds is just all the stuff in the
+ # windows_set. However, in some cases the implementations in 'posix/' are
+ # just what is necessary and historically we choose to simply use the posix
+ # file instead of making a copy in 'windows'.
+ for f in files:
+ if f == "error.cc":
+ windows_set.append("platform/posix/" + f)
+
+ return select({
+ "//tensorflow:windows" : native.glob(windows_set),
+ "//tensorflow:windows_msvc" : native.glob(windows_set),
+ "//conditions:default" : native.glob(posix_set),
+ })
def tf_additional_lib_hdrs(exclude = []):
- windows_hdrs = native.glob([
+ windows_hdrs = native.glob([
+ "platform/default/*.h",
+ "platform/windows/*.h",
+ "platform/posix/error.h",
+ ], exclude = exclude)
+ return select({
+ "//tensorflow:windows" : windows_hdrs,
+ "//tensorflow:windows_msvc" : windows_hdrs,
+ "//conditions:default" : native.glob([
"platform/default/*.h",
- "platform/windows/*.h",
- "platform/posix/error.h",
- ], exclude = exclude)
- return select({
- "//tensorflow:windows": windows_hdrs,
- "//tensorflow:windows_msvc": windows_hdrs,
- "//conditions:default": native.glob([
- "platform/default/*.h",
- "platform/posix/*.h",
- ], exclude = exclude),
- })
+ "platform/posix/*.h",
+ ], exclude = exclude),
+ })
def tf_additional_lib_srcs(exclude = []):
- windows_srcs = native.glob([
+ windows_srcs = native.glob([
+ "platform/default/*.cc",
+ "platform/windows/*.cc",
+ "platform/posix/error.cc",
+ ], exclude = exclude)
+ return select({
+ "//tensorflow:windows" : windows_srcs,
+ "//tensorflow:windows_msvc" : windows_srcs,
+ "//conditions:default" : native.glob([
"platform/default/*.cc",
- "platform/windows/*.cc",
- "platform/posix/error.cc",
- ], exclude = exclude)
- return select({
- "//tensorflow:windows": windows_srcs,
- "//tensorflow:windows_msvc": windows_srcs,
- "//conditions:default": native.glob([
- "platform/default/*.cc",
- "platform/posix/*.cc",
- ], exclude = exclude),
- })
+ "platform/posix/*.cc",
+ ], exclude = exclude),
+ })
def tf_additional_minimal_lib_srcs():
- return [
- "platform/default/integral_types.h",
- "platform/default/mutex.h",
- ]
+ return [
+ "platform/default/integral_types.h",
+ "platform/default/mutex.h",
+ ]
def tf_additional_proto_hdrs():
- return [
- "platform/default/integral_types.h",
- "platform/default/logging.h",
- "platform/default/protobuf.h",
- ] + if_windows([
- "platform/windows/integral_types.h",
- ])
+ return [
+ "platform/default/integral_types.h",
+ "platform/default/logging.h",
+ "platform/default/protobuf.h"
+ ] + if_windows([
+ "platform/windows/integral_types.h",
+ ])
def tf_additional_proto_srcs():
- return [
- "platform/default/protobuf.cc",
- ]
+ return [
+ "platform/default/protobuf.cc",
+ ]
def tf_additional_human_readable_json_deps():
- return []
+ return []
def tf_additional_all_protos():
- return ["//tensorflow/core:protos_all"]
+ return ["//tensorflow/core:protos_all"]
def tf_protos_all_impl():
- return ["//tensorflow/core:protos_all_cc_impl"]
+ return ["//tensorflow/core:protos_all_cc_impl"]
def tf_protos_all():
- return if_static(
- extra_deps = tf_protos_all_impl(),
- otherwise = ["//tensorflow/core:protos_all_cc"],
- )
+ return if_static(
+ extra_deps=tf_protos_all_impl(),
+ otherwise=["//tensorflow/core:protos_all_cc"])
def tf_protos_grappler_impl():
- return ["//tensorflow/core/grappler/costs:op_performance_data_cc_impl"]
+ return ["//tensorflow/core/grappler/costs:op_performance_data_cc_impl"]
def tf_protos_grappler():
- return if_static(
- extra_deps = tf_protos_grappler_impl(),
- otherwise = ["//tensorflow/core/grappler/costs:op_performance_data_cc"],
- )
+ return if_static(
+ extra_deps=tf_protos_grappler_impl(),
+ otherwise=["//tensorflow/core/grappler/costs:op_performance_data_cc"])
def tf_additional_cupti_wrapper_deps():
- return ["//tensorflow/core/platform/default/gpu:cupti_wrapper"]
+ return ["//tensorflow/core/platform/default/gpu:cupti_wrapper"]
def tf_additional_device_tracer_srcs():
- return ["platform/default/device_tracer.cc"]
+ return ["platform/default/device_tracer.cc"]
def tf_additional_device_tracer_cuda_deps():
- return []
+ return []
def tf_additional_device_tracer_deps():
- return []
+ return []
def tf_additional_libdevice_data():
- return []
+ return []
def tf_additional_libdevice_deps():
- return ["@local_config_cuda//cuda:cuda_headers"]
+ return ["@local_config_cuda//cuda:cuda_headers"]
def tf_additional_libdevice_srcs():
- return ["platform/default/cuda_libdevice_path.cc"]
+ return ["platform/default/cuda_libdevice_path.cc"]
def tf_additional_test_deps():
- return []
+ return []
def tf_additional_test_srcs():
- return [
- "platform/default/test_benchmark.cc",
- ] + select({
- "//tensorflow:windows": [
- "platform/windows/test.cc",
+ return [
+ "platform/default/test_benchmark.cc",
+ ] + select({
+ "//tensorflow:windows" : [
+ "platform/windows/test.cc"
],
- "//conditions:default": [
- "platform/posix/test.cc",
+ "//conditions:default" : [
+ "platform/posix/test.cc",
],
})
def tf_kernel_tests_linkstatic():
- return 0
+ return 0
def tf_additional_lib_defines():
- """Additional defines needed to build TF libraries."""
- return select({
- "//tensorflow:with_jemalloc_linux_x86_64": ["TENSORFLOW_USE_JEMALLOC"],
- "//tensorflow:with_jemalloc_linux_ppc64le": ["TENSORFLOW_USE_JEMALLOC"],
- "//conditions:default": [],
- }) + if_not_mobile(["TENSORFLOW_USE_ABSL"])
+ """Additional defines needed to build TF libraries."""
+ return select({
+ "//tensorflow:with_jemalloc_linux_x86_64": ["TENSORFLOW_USE_JEMALLOC"],
+ "//tensorflow:with_jemalloc_linux_ppc64le":["TENSORFLOW_USE_JEMALLOC"],
+ "//conditions:default": [],
+ }) + if_not_mobile(["TENSORFLOW_USE_ABSL"])
def tf_additional_lib_deps():
- """Additional dependencies needed to build TF libraries."""
- return if_not_mobile(["@com_google_absl//absl/base:base"]) + if_static(
- ["@nsync//:nsync_cpp"],
- ["@nsync//:nsync_headers"],
- ) + select({
- "//tensorflow:with_jemalloc_linux_x86_64_dynamic": ["@jemalloc//:jemalloc_headers"],
- "//tensorflow:with_jemalloc_linux_ppc64le_dynamic": ["@jemalloc//:jemalloc_headers"],
- "//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
- "//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
- "//conditions:default": [],
- })
+ """Additional dependencies needed to build TF libraries."""
+ return if_not_mobile(["@com_google_absl//absl/base:base"]) + if_static(
+ ["@nsync//:nsync_cpp"],
+ ["@nsync//:nsync_headers"]
+ ) + select({
+ "//tensorflow:with_jemalloc_linux_x86_64_dynamic": ["@jemalloc//:jemalloc_headers"],
+ "//tensorflow:with_jemalloc_linux_ppc64le_dynamic": ["@jemalloc//:jemalloc_headers"],
+ "//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
+ "//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
+ "//conditions:default": [],
+ })
def tf_additional_core_deps():
- return select({
- "//tensorflow:with_gcp_support_android_override": [],
- "//tensorflow:with_gcp_support_ios_override": [],
- "//tensorflow:with_gcp_support": [
- "//tensorflow/core/platform/cloud:gcs_file_system",
- ],
- "//conditions:default": [],
- }) + select({
- "//tensorflow:with_hdfs_support_windows_override": [],
- "//tensorflow:with_hdfs_support_android_override": [],
- "//tensorflow:with_hdfs_support_ios_override": [],
- "//tensorflow:with_hdfs_support": [
- "//tensorflow/core/platform/hadoop:hadoop_file_system",
- ],
- "//conditions:default": [],
- }) + select({
- "//tensorflow:with_s3_support_windows_override": [],
- "//tensorflow:with_s3_support_android_override": [],
- "//tensorflow:with_s3_support_ios_override": [],
- "//tensorflow:with_s3_support": [
- "//tensorflow/core/platform/s3:s3_file_system",
- ],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_gcp_support_android_override": [],
+ "//tensorflow:with_gcp_support_ios_override": [],
+ "//tensorflow:with_gcp_support": [
+ "//tensorflow/core/platform/cloud:gcs_file_system",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_hdfs_support_windows_override": [],
+ "//tensorflow:with_hdfs_support_android_override": [],
+ "//tensorflow:with_hdfs_support_ios_override": [],
+ "//tensorflow:with_hdfs_support": [
+ "//tensorflow/core/platform/hadoop:hadoop_file_system",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_s3_support_windows_override": [],
+ "//tensorflow:with_s3_support_android_override": [],
+ "//tensorflow:with_s3_support_ios_override": [],
+ "//tensorflow:with_s3_support": [
+ "//tensorflow/core/platform/s3:s3_file_system",
+ ],
+ "//conditions:default": [],
+ })
# TODO(jart, jhseu): Delete when GCP is default on.
def tf_additional_cloud_op_deps():
- return select({
- "//tensorflow:with_gcp_support_windows_override": [],
- "//tensorflow:with_gcp_support_android_override": [],
- "//tensorflow:with_gcp_support_ios_override": [],
- "//tensorflow:with_gcp_support": [
- "//tensorflow/contrib/cloud:bigquery_reader_ops_op_lib",
- "//tensorflow/contrib/cloud:gcs_config_ops_op_lib",
- ],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_gcp_support_windows_override": [],
+ "//tensorflow:with_gcp_support_android_override": [],
+ "//tensorflow:with_gcp_support_ios_override": [],
+ "//tensorflow:with_gcp_support": [
+ "//tensorflow/contrib/cloud:bigquery_reader_ops_op_lib",
+ "//tensorflow/contrib/cloud:gcs_config_ops_op_lib",
+ ],
+ "//conditions:default": [],
+ })
# TODO(jart, jhseu): Delete when GCP is default on.
def tf_additional_cloud_kernel_deps():
- return select({
- "//tensorflow:with_gcp_support_windows_override": [],
- "//tensorflow:with_gcp_support_android_override": [],
- "//tensorflow:with_gcp_support_ios_override": [],
- "//tensorflow:with_gcp_support": [
- "//tensorflow/contrib/cloud/kernels:bigquery_reader_ops",
- "//tensorflow/contrib/cloud/kernels:gcs_config_ops",
- ],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_gcp_support_windows_override": [],
+ "//tensorflow:with_gcp_support_android_override": [],
+ "//tensorflow:with_gcp_support_ios_override": [],
+ "//tensorflow:with_gcp_support": [
+ "//tensorflow/contrib/cloud/kernels:bigquery_reader_ops",
+ "//tensorflow/contrib/cloud/kernels:gcs_config_ops",
+ ],
+ "//conditions:default": [],
+ })
def tf_lib_proto_parsing_deps():
- return [
- ":protos_all_cc",
- "//third_party/eigen3",
- "//tensorflow/core/platform/default/build_config:proto_parsing",
- ]
+ return [
+ ":protos_all_cc",
+ "//third_party/eigen3",
+ "//tensorflow/core/platform/default/build_config:proto_parsing",
+ ]
def tf_additional_verbs_lib_defines():
- return select({
- "//tensorflow:with_verbs_support": ["TENSORFLOW_USE_VERBS"],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_verbs_support": ["TENSORFLOW_USE_VERBS"],
+ "//conditions:default": [],
+ })
def tf_additional_mpi_lib_defines():
- return select({
- "//tensorflow:with_mpi_support": ["TENSORFLOW_USE_MPI"],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_mpi_support": ["TENSORFLOW_USE_MPI"],
+ "//conditions:default": [],
+ })
def tf_additional_gdr_lib_defines():
- return select({
- "//tensorflow:with_gdr_support": ["TENSORFLOW_USE_GDR"],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_gdr_support": ["TENSORFLOW_USE_GDR"],
+ "//conditions:default": [],
+ })
-def tf_py_clif_cc(name, visibility = None, **kwargs):
- pass
+def tf_py_clif_cc(name, visibility=None, **kwargs):
+ pass
-def tf_pyclif_proto_library(
- name,
- proto_lib,
- proto_srcfile = "",
- visibility = None,
- **kwargs):
- pass
+def tf_pyclif_proto_library(name, proto_lib, proto_srcfile="", visibility=None,
+ **kwargs):
+ pass
def tf_additional_binary_deps():
- return ["@nsync//:nsync_cpp"] + if_cuda(
- [
- "//tensorflow/stream_executor:cuda_platform",
- "//tensorflow/core/platform/default/build_config:cuda",
- ],
- ) + select({
- "//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
- "//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
- "//conditions:default": [],
- }) + [
- # TODO(allenl): Split these out into their own shared objects (they are
- # here because they are shared between contrib/ op shared objects and
- # core).
- "//tensorflow/core/kernels:lookup_util",
- "//tensorflow/core/util/tensor_bundle",
- ] + if_mkl(
- [
- "//third_party/mkl:intel_binary_blob",
- ],
- )
+ return ["@nsync//:nsync_cpp"] + if_cuda(
+ [
+ "//tensorflow/stream_executor:cuda_platform",
+ "//tensorflow/core/platform/default/build_config:cuda",
+ ],
+ ) + select({
+ "//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
+ "//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
+ "//conditions:default": [],
+ }) + [
+ # TODO(allenl): Split these out into their own shared objects (they are
+ # here because they are shared between contrib/ op shared objects and
+ # core).
+ "//tensorflow/core/kernels:lookup_util",
+ "//tensorflow/core/util/tensor_bundle",
+ ] + if_mkl(
+ [
+ "//third_party/mkl:intel_binary_blob",
+ ],
+ )
diff --git a/tensorflow/core/platform/default/build_config_root.bzl b/tensorflow/core/platform/default/build_config_root.bzl
index 42387eb844..09029a4b25 100644
--- a/tensorflow/core/platform/default/build_config_root.bzl
+++ b/tensorflow/core/platform/default/build_config_root.bzl
@@ -3,58 +3,58 @@
# be separate to avoid cyclic references.
def tf_cuda_tests_tags():
- return ["requires-gpu"]
+ return ["requires-gpu"]
def tf_sycl_tests_tags():
- return ["requires-gpu"]
+ return ["requires-gpu"]
def tf_additional_plugin_deps():
- return select({
- str(Label("//tensorflow:with_xla_support")): [
- str(Label("//tensorflow/compiler/jit")),
- ],
- "//conditions:default": [],
- })
+ return select({
+ str(Label("//tensorflow:with_xla_support")): [
+ str(Label("//tensorflow/compiler/jit"))
+ ],
+ "//conditions:default": [],
+ })
def tf_additional_xla_deps_py():
- return []
+ return []
def tf_additional_grpc_deps_py():
- return []
+ return []
def tf_additional_license_deps():
- return select({
- str(Label("//tensorflow:with_xla_support")): ["@llvm//:LICENSE.TXT"],
- "//conditions:default": [],
- })
+ return select({
+ str(Label("//tensorflow:with_xla_support")): ["@llvm//:LICENSE.TXT"],
+ "//conditions:default": [],
+ })
def tf_additional_verbs_deps():
- return select({
- str(Label("//tensorflow:with_verbs_support")): [
- str(Label("//tensorflow/contrib/verbs:verbs_server_lib")),
- str(Label("//tensorflow/contrib/verbs:grpc_verbs_client")),
- ],
- "//conditions:default": [],
- })
+ return select({
+ str(Label("//tensorflow:with_verbs_support")): [
+ str(Label("//tensorflow/contrib/verbs:verbs_server_lib")),
+ str(Label("//tensorflow/contrib/verbs:grpc_verbs_client")),
+ ],
+ "//conditions:default": [],
+ })
def tf_additional_mpi_deps():
- return select({
- str(Label("//tensorflow:with_mpi_support")): [
- str(Label("//tensorflow/contrib/mpi:mpi_server_lib")),
- ],
- "//conditions:default": [],
- })
+ return select({
+ str(Label("//tensorflow:with_mpi_support")): [
+ str(Label("//tensorflow/contrib/mpi:mpi_server_lib")),
+ ],
+ "//conditions:default": [],
+ })
def tf_additional_gdr_deps():
- return select({
- str(Label("//tensorflow:with_gdr_support")): [
- str(Label("//tensorflow/contrib/gdr:gdr_server_lib")),
- ],
- "//conditions:default": [],
- })
-
-def if_static(extra_deps, otherwise = []):
- return select({
- str(Label("//tensorflow:framework_shared_object")): otherwise,
- "//conditions:default": extra_deps,
- })
+ return select({
+ str(Label("//tensorflow:with_gdr_support")): [
+ str(Label("//tensorflow/contrib/gdr:gdr_server_lib")),
+ ],
+ "//conditions:default": [],
+ })
+
+def if_static(extra_deps, otherwise=[]):
+ return select({
+ str(Label("//tensorflow:framework_shared_object")): otherwise,
+ "//conditions:default": extra_deps,
+ })
diff --git a/tensorflow/core/platform/default/platform.bzl b/tensorflow/core/platform/default/platform.bzl
index 76bfaa896e..20ab441bf4 100644
--- a/tensorflow/core/platform/default/platform.bzl
+++ b/tensorflow/core/platform/default/platform.bzl
@@ -5,52 +5,55 @@ CUDNN_VERSION = ""
PLATFORM = ""
def cuda_sdk_version():
- return CUDA_VERSION
+ return CUDA_VERSION
def cudnn_sdk_version():
- return CUDNN_VERSION
+ return CUDNN_VERSION
def cuda_library_path(name, version = cuda_sdk_version()):
- if PLATFORM == "Darwin":
- if not version:
- return "lib/lib{}.dylib".format(name)
- else:
- return "lib/lib{}.{}.dylib".format(name, version)
- elif not version:
- return "lib64/lib{}.so".format(name)
+ if PLATFORM == "Darwin":
+ if not version:
+ return "lib/lib{}.dylib".format(name)
else:
- return "lib64/lib{}.so.{}".format(name, version)
+ return "lib/lib{}.{}.dylib".format(name, version)
+ else:
+ if not version:
+ return "lib64/lib{}.so".format(name)
+ else:
+ return "lib64/lib{}.so.{}".format(name, version)
def cuda_static_library_path(name):
- if PLATFORM == "Darwin":
- return "lib/lib{}_static.a".format(name)
- else:
- return "lib64/lib{}_static.a".format(name)
+ if PLATFORM == "Darwin":
+ return "lib/lib{}_static.a".format(name)
+ else:
+ return "lib64/lib{}_static.a".format(name)
def cudnn_library_path(version = cudnn_sdk_version()):
- if PLATFORM == "Darwin":
- if not version:
- return "lib/libcudnn.dylib"
- else:
- return "lib/libcudnn.{}.dylib".format(version)
- elif not version:
- return "lib64/libcudnn.so"
+ if PLATFORM == "Darwin":
+ if not version:
+ return "lib/libcudnn.dylib"
+ else:
+ return "lib/libcudnn.{}.dylib".format(version)
+ else:
+ if not version:
+ return "lib64/libcudnn.so"
else:
- return "lib64/libcudnn.so.{}".format(version)
+ return "lib64/libcudnn.so.{}".format(version)
def cupti_library_path(version = cuda_sdk_version()):
- if PLATFORM == "Darwin":
- if not version:
- return "extras/CUPTI/lib/libcupti.dylib"
- else:
- return "extras/CUPTI/lib/libcupti.{}.dylib".format(version)
- elif not version:
- return "extras/CUPTI/lib64/libcupti.so"
+ if PLATFORM == "Darwin":
+ if not version:
+ return "extras/CUPTI/lib/libcupti.dylib"
else:
- return "extras/CUPTI/lib64/libcupti.so.{}".format(version)
+ return "extras/CUPTI/lib/libcupti.{}.dylib".format(version)
+ else:
+ if not version:
+ return "extras/CUPTI/lib64/libcupti.so"
+ else:
+ return "extras/CUPTI/lib64/libcupti.so.{}".format(version)
def readlink_command():
- if PLATFORM == "Darwin":
- return "greadlink"
- else:
- return "readlink"
+ if PLATFORM == "Darwin":
+ return "greadlink"
+ else:
+ return "readlink"
diff --git a/tensorflow/java/build_defs.bzl b/tensorflow/java/build_defs.bzl
index f423cc4d82..e1916ca4d9 100644
--- a/tensorflow/java/build_defs.bzl
+++ b/tensorflow/java/build_defs.bzl
@@ -18,7 +18,7 @@ XLINT_OPTS = [
"-Xlint:-processing",
"-Xlint:-serial",
"-Xlint:-try",
- "-Xlint:-classfile", # see b/32750402, go/javac-warnings#classfile
+ "-Xlint:-classfile", # see b/32750402, go/javac-warnings#classfile
]
# The bazel errorprone plugin currently only enables default errorChecks
diff --git a/tensorflow/java/src/gen/gen_ops.bzl b/tensorflow/java/src/gen/gen_ops.bzl
index b46721a93d..f4ff34ea03 100644
--- a/tensorflow/java/src/gen/gen_ops.bzl
+++ b/tensorflow/java/src/gen/gen_ops.bzl
@@ -17,48 +17,46 @@ load(
# and then archive those source files into
# ops/gen_sources.srcjar
#
-def tf_java_op_gen_srcjar(
- name,
- gen_tool,
- base_package,
- api_def_srcs = [],
- out_dir = "ops/",
- out_src_dir = "src/main/java/",
- visibility = ["//tensorflow/java:__pkg__"]):
- gen_cmds = ["rm -rf $(@D)"] # Always start from fresh when generating source files
- srcs = api_def_srcs[:]
+def tf_java_op_gen_srcjar(name,
+ gen_tool,
+ base_package,
+ api_def_srcs=[],
+ out_dir="ops/",
+ out_src_dir="src/main/java/",
+ visibility=["//tensorflow/java:__pkg__"]):
- if not api_def_srcs:
- api_def_args_str = ","
- else:
- api_def_args = []
- for api_def_src in api_def_srcs:
- # Add directory of the first ApiDef source to args.
- # We are assuming all ApiDefs in a single api_def_src are in the
- # same directory.
- api_def_args.append(
- "$$(dirname $$(echo $(locations " + api_def_src +
- ") | cut -d\" \" -f1))",
- )
- api_def_args_str = ",".join(api_def_args)
+ gen_cmds = ["rm -rf $(@D)"] # Always start from fresh when generating source files
+ srcs = api_def_srcs[:]
- gen_cmds += ["$(location " + gen_tool + ")" +
- " --output_dir=$(@D)/" + out_src_dir +
- " --base_package=" + base_package +
- " --api_dirs=" + api_def_args_str]
+ if not api_def_srcs:
+ api_def_args_str = ","
+ else:
+ api_def_args = []
+ for api_def_src in api_def_srcs:
+ # Add directory of the first ApiDef source to args.
+ # We are assuming all ApiDefs in a single api_def_src are in the
+ # same directory.
+ api_def_args.append(
+ "$$(dirname $$(echo $(locations " + api_def_src +
+ ") | cut -d\" \" -f1))")
+ api_def_args_str = ",".join(api_def_args)
- # Generate a source archive containing generated code for these ops.
- gen_srcjar = out_dir + name + ".srcjar"
- gen_cmds += ["$(location @local_jdk//:jar) cMf $(location :" + gen_srcjar + ") -C $(@D) src"]
+ gen_cmds += ["$(location " + gen_tool + ")" +
+ " --output_dir=$(@D)/" + out_src_dir +
+ " --base_package=" + base_package +
+ " --api_dirs=" + api_def_args_str]
- native.genrule(
- name = name,
- srcs = srcs,
- outs = [gen_srcjar],
- tools = [
- "@local_jdk//:jar",
- "@local_jdk//:jdk",
- gen_tool,
- ] + tf_binary_additional_srcs(),
- cmd = " && ".join(gen_cmds),
- )
+ # Generate a source archive containing generated code for these ops.
+ gen_srcjar = out_dir + name + ".srcjar"
+ gen_cmds += ["$(location @local_jdk//:jar) cMf $(location :" + gen_srcjar + ") -C $(@D) src"]
+
+ native.genrule(
+ name=name,
+ srcs=srcs,
+ outs=[gen_srcjar],
+ tools=[
+ "@local_jdk//:jar",
+ "@local_jdk//:jdk",
+ gen_tool
+ ] + tf_binary_additional_srcs(),
+ cmd=" && ".join(gen_cmds))
diff --git a/tensorflow/python/build_defs.bzl b/tensorflow/python/build_defs.bzl
index 244820f41a..b9056f86e6 100644
--- a/tensorflow/python/build_defs.bzl
+++ b/tensorflow/python/build_defs.bzl
@@ -12,26 +12,22 @@ load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
# consumers of the tf_gen_op_wrapper_py rule would be simplified if we don't
# hard code the ops/ directory.
-def tf_gen_op_wrapper_private_py(
- name,
- out = None,
- deps = [],
- require_shape_functions = True,
- visibility = []):
- if not name.endswith("_gen"):
- fail("name must end in _gen")
- if not visibility:
- visibility = ["//visibility:private"]
- bare_op_name = name[:-4] # Strip off the _gen
- tf_gen_op_wrapper_py(
- name = bare_op_name,
- out = out,
- visibility = visibility,
- deps = deps,
- require_shape_functions = require_shape_functions,
- generated_target_name = name,
- api_def_srcs = [
- "//tensorflow/core/api_def:base_api_def",
- "//tensorflow/core/api_def:python_api_def",
- ],
- )
+def tf_gen_op_wrapper_private_py(name, out=None, deps=[],
+ require_shape_functions=True,
+ visibility=[]):
+ if not name.endswith("_gen"):
+ fail("name must end in _gen")
+ if not visibility:
+ visibility = ["//visibility:private"]
+ bare_op_name = name[:-4] # Strip off the _gen
+ tf_gen_op_wrapper_py(name=bare_op_name,
+ out=out,
+ visibility=visibility,
+ deps=deps,
+ require_shape_functions=require_shape_functions,
+ generated_target_name=name,
+ api_def_srcs = [
+ "//tensorflow/core/api_def:base_api_def",
+ "//tensorflow/core/api_def:python_api_def",
+ ],
+ )
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 9819a12958..e4632c4811 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -4,11 +4,11 @@
# Uses the ":optmode" config_setting to pick the options.
load(
"//tensorflow/core:platform/default/build_config_root.bzl",
- "if_static",
- "tf_additional_grpc_deps_py",
- "tf_additional_xla_deps_py",
"tf_cuda_tests_tags",
"tf_sycl_tests_tags",
+ "tf_additional_grpc_deps_py",
+ "tf_additional_xla_deps_py",
+ "if_static",
)
load(
"@local_config_tensorrt//:build_defs.bzl",
@@ -16,14 +16,15 @@ load(
)
load(
"@local_config_cuda//cuda:build_defs.bzl",
- "cuda_default_copts",
"if_cuda",
+ "cuda_default_copts",
)
load(
"//third_party/mkl:build_defs.bzl",
"if_mkl",
- "if_mkl_lnx_x64",
+ "if_mkl_lnx_x64"
)
+
def register_extension_info(**kwargs):
pass
@@ -31,148 +32,147 @@ def register_extension_info(**kwargs):
# i.e. "common_runtime/direct_session_test.cc" becomes
# "common_runtime_direct_session_test"
def src_to_test_name(src):
- return src.replace("/", "_").split(".")[0]
+ return src.replace("/", "_").split(".")[0]
def full_path(relative_paths):
- return [native.package_name() + "/" + relative for relative in relative_paths]
+ return [native.package_name() + "/" + relative for relative in relative_paths]
def _add_tfcore_prefix(src):
- if src.startswith("//"):
- return src
- return "//tensorflow/core:" + src
+ if src.startswith("//"):
+ return src
+ return "//tensorflow/core:" + src
# List of proto files for android builds
def tf_android_core_proto_sources(core_proto_sources_relative):
- return [
- _add_tfcore_prefix(p)
- for p in core_proto_sources_relative
- ]
+ return [
+ _add_tfcore_prefix(p) for p in core_proto_sources_relative
+ ]
# Returns the list of pb.h and proto.h headers that are generated for
# tf_android_core_proto_sources().
def tf_android_core_proto_headers(core_proto_sources_relative):
- return ([
- _add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".pb.h")
- for p in core_proto_sources_relative
- ] + [
- _add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".proto.h")
- for p in core_proto_sources_relative
- ])
+ return ([
+ _add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".pb.h")
+ for p in core_proto_sources_relative
+ ] + [
+ _add_tfcore_prefix(p).replace(":", "/").replace(".proto", ".proto.h")
+ for p in core_proto_sources_relative
+ ])
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
- return str(Label(dep))
+ return str(Label(dep))
def if_android_x86(a):
- return select({
- clean_dep("//tensorflow:android_x86"): a,
- clean_dep("//tensorflow:android_x86_64"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android_x86"): a,
+ clean_dep("//tensorflow:android_x86_64"): a,
+ "//conditions:default": [],
+ })
def if_android_arm(a):
- return select({
- clean_dep("//tensorflow:android_arm"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android_arm"): a,
+ "//conditions:default": [],
+ })
def if_android_arm64(a):
- return select({
- clean_dep("//tensorflow:android_arm64"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android_arm64"): a,
+ "//conditions:default": [],
+ })
def if_android_mips(a):
- return select({
- clean_dep("//tensorflow:android_mips"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android_mips"): a,
+ "//conditions:default": [],
+ })
def if_not_android(a):
- return select({
- clean_dep("//tensorflow:android"): [],
- "//conditions:default": a,
- })
+ return select({
+ clean_dep("//tensorflow:android"): [],
+ "//conditions:default": a,
+ })
def if_not_android_mips_and_mips64(a):
- return select({
- clean_dep("//tensorflow:android_mips"): [],
- clean_dep("//tensorflow:android_mips64"): [],
- "//conditions:default": a,
- })
+ return select({
+ clean_dep("//tensorflow:android_mips"): [],
+ clean_dep("//tensorflow:android_mips64"): [],
+ "//conditions:default": a,
+ })
def if_android(a):
- return select({
- clean_dep("//tensorflow:android"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android"): a,
+ "//conditions:default": [],
+ })
def if_ios(a):
- return select({
- clean_dep("//tensorflow:ios"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:ios"): a,
+ "//conditions:default": [],
+ })
def if_ios_x86_64(a):
- return select({
- clean_dep("//tensorflow:ios_x86_64"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:ios_x86_64"): a,
+ "//conditions:default": [],
+ })
def if_mobile(a):
- return select({
- clean_dep("//tensorflow:android"): a,
- clean_dep("//tensorflow:ios"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:android"): a,
+ clean_dep("//tensorflow:ios"): a,
+ "//conditions:default": [],
+ })
def if_not_mobile(a):
- return select({
- clean_dep("//tensorflow:android"): [],
- clean_dep("//tensorflow:ios"): [],
- "//conditions:default": a,
- })
+ return select({
+ clean_dep("//tensorflow:android"): [],
+ clean_dep("//tensorflow:ios"): [],
+ "//conditions:default": a,
+ })
def if_not_windows(a):
- return select({
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- "//conditions:default": a,
- })
+ return select({
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ "//conditions:default": a,
+ })
def if_windows(a):
- return select({
- clean_dep("//tensorflow:windows"): a,
- clean_dep("//tensorflow:windows_msvc"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:windows"): a,
+ clean_dep("//tensorflow:windows_msvc"): a,
+ "//conditions:default": [],
+ })
def if_not_windows_cuda(a):
- return select({
- clean_dep("//tensorflow:with_cuda_support_windows_override"): [],
- "//conditions:default": a,
- })
+ return select({
+ clean_dep("//tensorflow:with_cuda_support_windows_override"): [],
+ "//conditions:default": a,
+ })
def if_linux_x86_64(a):
- return select({
- clean_dep("//tensorflow:linux_x86_64"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:linux_x86_64"): a,
+ "//conditions:default": [],
+ })
def if_darwin(a):
- return select({
- clean_dep("//tensorflow:darwin"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:darwin"): a,
+ "//conditions:default": [],
+ })
def if_override_eigen_strong_inline(a):
- return select({
- clean_dep("//tensorflow:override_eigen_strong_inline"): a,
- "//conditions:default": [],
- })
+ return select({
+ clean_dep("//tensorflow:override_eigen_strong_inline"): a,
+ "//conditions:default": [],
+ })
-def get_win_copts(is_external = False):
+def get_win_copts(is_external=False):
WINDOWS_COPTS = [
"/DPLATFORM_WINDOWS",
"/DEIGEN_HAS_C99_MATH",
@@ -186,150 +186,143 @@ def get_win_copts(is_external = False):
"/DNOGDI",
]
if is_external:
- return WINDOWS_COPTS + ["/UTF_COMPILE_LIBRARY"]
+ return WINDOWS_COPTS + ["/UTF_COMPILE_LIBRARY"]
else:
- return WINDOWS_COPTS + ["/DTF_COMPILE_LIBRARY"]
+ return WINDOWS_COPTS + ["/DTF_COMPILE_LIBRARY"]
# LINT.IfChange
-def tf_copts(android_optimization_level_override = "-O2", is_external = False):
- # For compatibility reasons, android_optimization_level_override
- # is currently only being set for Android.
- # To clear this value, and allow the CROSSTOOL default
- # to be used, pass android_optimization_level_override=None
- android_copts = [
- "-std=c++11",
- "-DTF_LEAN_BINARY",
- "-Wno-narrowing",
- "-fomit-frame-pointer",
- ]
- if android_optimization_level_override:
- android_copts.append(android_optimization_level_override)
- return (
- if_not_windows([
- "-DEIGEN_AVOID_STL_ARRAY",
- "-Iexternal/gemmlowp",
- "-Wno-sign-compare",
- "-fno-exceptions",
- "-ftemplate-depth=900",
- ]) +
- if_cuda(["-DGOOGLE_CUDA=1"]) +
- if_tensorrt(["-DGOOGLE_TENSORRT=1"]) +
- if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"]) +
- if_mkl_lnx_x64(["-fopenmp"]) +
- if_android_arm(["-mfpu=neon"]) +
- if_linux_x86_64(["-msse3"]) +
- if_ios_x86_64(["-msse4.1"]) +
- select({
+def tf_copts(android_optimization_level_override="-O2", is_external=False):
+ # For compatibility reasons, android_optimization_level_override
+ # is currently only being set for Android.
+ # To clear this value, and allow the CROSSTOOL default
+ # to be used, pass android_optimization_level_override=None
+ android_copts = [
+ "-std=c++11",
+ "-DTF_LEAN_BINARY",
+ "-Wno-narrowing",
+ "-fomit-frame-pointer",
+ ]
+ if android_optimization_level_override:
+ android_copts.append(android_optimization_level_override)
+ return (
+ if_not_windows([
+ "-DEIGEN_AVOID_STL_ARRAY",
+ "-Iexternal/gemmlowp",
+ "-Wno-sign-compare",
+ "-fno-exceptions",
+ "-ftemplate-depth=900"])
+ + if_cuda(["-DGOOGLE_CUDA=1"])
+ + if_tensorrt(["-DGOOGLE_TENSORRT=1"])
+ + if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"])
+ + if_mkl_lnx_x64(["-fopenmp"])
+ + if_android_arm(["-mfpu=neon"])
+ + if_linux_x86_64(["-msse3"])
+ + if_ios_x86_64(["-msse4.1"])
+ + select({
clean_dep("//tensorflow:framework_shared_object"): [],
"//conditions:default": ["-DTENSORFLOW_MONOLITHIC_BUILD"],
- }) +
- select({
+ })
+ + select({
clean_dep("//tensorflow:android"): android_copts,
clean_dep("//tensorflow:darwin"): [],
clean_dep("//tensorflow:windows"): get_win_copts(is_external),
clean_dep("//tensorflow:windows_msvc"): get_win_copts(is_external),
clean_dep("//tensorflow:ios"): ["-std=c++11"],
- "//conditions:default": ["-pthread"],
- })
- )
+ "//conditions:default": ["-pthread"]
+ }))
+
def tfe_xla_copts():
- return select({
- "//tensorflow:with_xla_support": ["-DTENSORFLOW_EAGER_USE_XLA"],
- "//conditions:default": [],
- })
+ return select({
+ "//tensorflow:with_xla_support": ["-DTENSORFLOW_EAGER_USE_XLA"],
+ "//conditions:default": [],
+ })
def tf_opts_nortti_if_android():
- return if_android([
- "-fno-rtti",
- "-DGOOGLE_PROTOBUF_NO_RTTI",
- "-DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
- ])
+ return if_android([
+ "-fno-rtti",
+ "-DGOOGLE_PROTOBUF_NO_RTTI",
+ "-DGOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
+ ])
# LINT.ThenChange(//tensorflow/contrib/android/cmake/CMakeLists.txt)
def tf_features_nomodules_if_android():
- return if_android(["-use_header_modules"])
+ return if_android(["-use_header_modules"])
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate a library for that file.
-def tf_gen_op_libs(op_lib_names, deps = None, is_external = True):
- # Make library out of each op so it can also be used to generate wrappers
- # for various languages.
- if not deps:
- deps = []
- for n in op_lib_names:
- native.cc_library(
- name = n + "_op_lib",
- copts = tf_copts(is_external = is_external),
- srcs = ["ops/" + n + ".cc"],
- deps = deps + [clean_dep("//tensorflow/core:framework")],
- visibility = ["//visibility:public"],
- alwayslink = 1,
- linkstatic = 1,
- )
+def tf_gen_op_libs(op_lib_names, deps=None, is_external=True):
+ # Make library out of each op so it can also be used to generate wrappers
+ # for various languages.
+ if not deps:
+ deps = []
+ for n in op_lib_names:
+ native.cc_library(
+ name=n + "_op_lib",
+ copts=tf_copts(is_external=is_external),
+ srcs=["ops/" + n + ".cc"],
+ deps=deps + [clean_dep("//tensorflow/core:framework")],
+ visibility=["//visibility:public"],
+ alwayslink=1,
+ linkstatic=1,)
def _make_search_paths(prefix, levels_to_root):
- return ",".join(
- [
- "-rpath,%s/%s" % (prefix, "/".join([".."] * search_level))
- for search_level in range(levels_to_root + 1)
- ],
- )
+ return ",".join(
+ ["-rpath,%s/%s" % (prefix, "/".join([".."] * search_level))
+ for search_level in range(levels_to_root + 1)])
def _rpath_linkopts(name):
- # Search parent directories up to the TensorFlow root directory for shared
- # object dependencies, even if this op shared object is deeply nested
- # (e.g. tensorflow/contrib/package:python/ops/_op_lib.so). tensorflow/ is then
- # the root and tensorflow/libtensorflow_framework.so should exist when
- # deployed. Other shared object dependencies (e.g. shared between contrib/
- # ops) are picked up as long as they are in either the same or a parent
- # directory in the tensorflow/ tree.
- levels_to_root = native.package_name().count("/") + name.count("/")
- return select({
- clean_dep("//tensorflow:darwin"): [
- "-Wl,%s" % (_make_search_paths("@loader_path", levels_to_root),),
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- "//conditions:default": [
- "-Wl,%s" % (_make_search_paths("$$ORIGIN", levels_to_root),),
- ],
- })
+ # Search parent directories up to the TensorFlow root directory for shared
+ # object dependencies, even if this op shared object is deeply nested
+ # (e.g. tensorflow/contrib/package:python/ops/_op_lib.so). tensorflow/ is then
+ # the root and tensorflow/libtensorflow_framework.so should exist when
+ # deployed. Other shared object dependencies (e.g. shared between contrib/
+ # ops) are picked up as long as they are in either the same or a parent
+ # directory in the tensorflow/ tree.
+ levels_to_root = native.package_name().count("/") + name.count("/")
+ return select({
+ clean_dep("//tensorflow:darwin"): [
+ "-Wl,%s" % (_make_search_paths("@loader_path", levels_to_root),),
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ "//conditions:default": [
+ "-Wl,%s" % (_make_search_paths("$$ORIGIN", levels_to_root),),
+ ],
+ })
# Bazel-generated shared objects which must be linked into TensorFlow binaries
# to define symbols from //tensorflow/core:framework and //tensorflow/core:lib.
def tf_binary_additional_srcs():
- return if_static(
- extra_deps = [],
- otherwise = [
- clean_dep("//tensorflow:libtensorflow_framework.so"),
- ],
- )
+ return if_static(
+ extra_deps=[],
+ otherwise=[
+ clean_dep("//tensorflow:libtensorflow_framework.so"),
+ ])
def tf_cc_shared_object(
- name,
- srcs = [],
- deps = [],
- linkopts = [],
- framework_so = tf_binary_additional_srcs(),
- **kwargs):
- native.cc_binary(
- name = name,
- srcs = srcs + framework_so,
- deps = deps,
- linkshared = 1,
- linkopts = linkopts + _rpath_linkopts(name) + select({
- clean_dep("//tensorflow:darwin"): [
- "-Wl,-install_name,@rpath/" + name.split("/")[-1],
- ],
- clean_dep("//tensorflow:windows"): [],
- "//conditions:default": [
- "-Wl,-soname," + name.split("/")[-1],
- ],
- }),
- **kwargs
- )
+ name,
+ srcs=[],
+ deps=[],
+ linkopts=[],
+ framework_so=tf_binary_additional_srcs(),
+ **kwargs):
+ native.cc_binary(
+ name=name,
+ srcs=srcs + framework_so,
+ deps=deps,
+ linkshared = 1,
+ linkopts=linkopts + _rpath_linkopts(name) + select({
+ clean_dep("//tensorflow:darwin"): [
+ "-Wl,-install_name,@rpath/" + name.split("/")[-1],
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ "//conditions:default": [
+ "-Wl,-soname," + name.split("/")[-1],
+ ],
+ }),
+ **kwargs)
register_extension_info(
extension_name = "tf_cc_shared_object",
@@ -340,25 +333,23 @@ register_extension_info(
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
# statically. Also adds linker options (rpaths) so that the framework shared
# object can be found.
-def tf_cc_binary(
- name,
- srcs = [],
- deps = [],
- linkopts = [],
- copts = tf_copts(),
- **kwargs):
- native.cc_binary(
- name = name,
- copts = copts,
- srcs = srcs + tf_binary_additional_srcs(),
- deps = deps + if_mkl(
- [
- "//third_party/mkl:intel_binary_blob",
- ],
- ),
- linkopts = linkopts + _rpath_linkopts(name),
- **kwargs
- )
+def tf_cc_binary(name,
+ srcs=[],
+ deps=[],
+ linkopts=[],
+ copts=tf_copts(),
+ **kwargs):
+ native.cc_binary(
+ name=name,
+ copts=copts,
+ srcs=srcs + tf_binary_additional_srcs(),
+ deps=deps + if_mkl(
+ [
+ "//third_party/mkl:intel_binary_blob",
+ ],
+ ),
+ linkopts=linkopts + _rpath_linkopts(name),
+ **kwargs)
register_extension_info(
extension_name = "tf_cc_binary",
@@ -368,72 +359,64 @@ register_extension_info(
# A simple wrap around native.cc_binary rule.
# When using this rule, you should realize it doesn't link to any tensorflow
# dependencies by default.
-def tf_native_cc_binary(
- name,
- copts = tf_copts(),
- **kwargs):
- native.cc_binary(
- name = name,
- copts = copts,
- **kwargs
- )
+def tf_native_cc_binary(name,
+ copts=tf_copts(),
+ **kwargs):
+ native.cc_binary(
+ name=name,
+ copts=copts,
+ **kwargs)
register_extension_info(
extension_name = "tf_native_cc_binary",
label_regex_for_dep = "{extension_name}.*",
)
-def tf_gen_op_wrapper_cc(
- name,
- out_ops_file,
- pkg = "",
- op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
- deps = None,
- include_internal_ops = 0,
- # ApiDefs will be loaded in the order specified in this list.
- api_def_srcs = []):
- # Construct an op generator binary for these ops.
- tool = out_ops_file + "_gen_cc"
- if deps == None:
- deps = [pkg + ":" + name + "_op_lib"]
- tf_cc_binary(
- name = tool,
- copts = tf_copts(),
- linkopts = if_not_windows(["-lm"]),
- linkstatic = 1, # Faster to link this one-time-use binary dynamically
- deps = [op_gen] + deps,
- )
-
- srcs = api_def_srcs[:]
-
- if not api_def_srcs:
- api_def_args_str = ","
- else:
- api_def_args = []
- for api_def_src in api_def_srcs:
- # Add directory of the first ApiDef source to args.
- # We are assuming all ApiDefs in a single api_def_src are in the
- # same directory.
- api_def_args.append(
- " $$(dirname $$(echo $(locations " + api_def_src +
- ") | cut -d\" \" -f1))",
- )
- api_def_args_str = ",".join(api_def_args)
-
- native.genrule(
- name = name + "_genrule",
- outs = [
- out_ops_file + ".h",
- out_ops_file + ".cc",
- out_ops_file + "_internal.h",
- out_ops_file + "_internal.cc",
- ],
- srcs = srcs,
- tools = [":" + tool] + tf_binary_additional_srcs(),
- cmd = ("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
- "$(location :" + out_ops_file + ".cc) " +
- str(include_internal_ops) + " " + api_def_args_str),
- )
+def tf_gen_op_wrapper_cc(name,
+ out_ops_file,
+ pkg="",
+ op_gen=clean_dep("//tensorflow/cc:cc_op_gen_main"),
+ deps=None,
+ include_internal_ops=0,
+ # ApiDefs will be loaded in the order specified in this list.
+ api_def_srcs=[]):
+ # Construct an op generator binary for these ops.
+ tool = out_ops_file + "_gen_cc"
+ if deps == None:
+ deps = [pkg + ":" + name + "_op_lib"]
+ tf_cc_binary(
+ name=tool,
+ copts=tf_copts(),
+ linkopts=if_not_windows(["-lm"]),
+ linkstatic=1, # Faster to link this one-time-use binary dynamically
+ deps=[op_gen] + deps)
+
+ srcs = api_def_srcs[:]
+
+ if not api_def_srcs:
+ api_def_args_str = ","
+ else:
+ api_def_args = []
+ for api_def_src in api_def_srcs:
+ # Add directory of the first ApiDef source to args.
+ # We are assuming all ApiDefs in a single api_def_src are in the
+ # same directory.
+ api_def_args.append(
+ " $$(dirname $$(echo $(locations " + api_def_src +
+ ") | cut -d\" \" -f1))")
+ api_def_args_str = ",".join(api_def_args)
+
+ native.genrule(
+ name=name + "_genrule",
+ outs=[
+ out_ops_file + ".h", out_ops_file + ".cc",
+ out_ops_file + "_internal.h", out_ops_file + "_internal.cc"
+ ],
+ srcs=srcs,
+ tools=[":" + tool] + tf_binary_additional_srcs(),
+ cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
+ "$(location :" + out_ops_file + ".cc) " +
+ str(include_internal_ops) + " " + api_def_args_str))
# Given a list of "op_lib_names" (a list of files in the ops directory
# without their .cc extensions), generate individual C++ .cc and .h
@@ -462,72 +445,68 @@ def tf_gen_op_wrapper_cc(
# "ops/math_ops_internal.h" ],
# deps = [ ... ])
# TODO(joshl): Cleaner approach for hidden ops.
-def tf_gen_op_wrappers_cc(
- name,
- op_lib_names = [],
- other_srcs = [],
- other_hdrs = [],
- pkg = "",
- deps = [
- clean_dep("//tensorflow/cc:ops"),
- clean_dep("//tensorflow/cc:scope"),
- clean_dep("//tensorflow/cc:const_op"),
- ],
- op_gen = clean_dep("//tensorflow/cc:cc_op_gen_main"),
- include_internal_ops = 0,
- visibility = None,
- # ApiDefs will be loaded in the order apecified in this list.
- api_def_srcs = []):
- subsrcs = other_srcs[:]
- subhdrs = other_hdrs[:]
- internalsrcs = []
- internalhdrs = []
- for n in op_lib_names:
- tf_gen_op_wrapper_cc(
- n,
- "ops/" + n,
- pkg = pkg,
- op_gen = op_gen,
- include_internal_ops = include_internal_ops,
- api_def_srcs = api_def_srcs,
- )
- subsrcs += ["ops/" + n + ".cc"]
- subhdrs += ["ops/" + n + ".h"]
- internalsrcs += ["ops/" + n + "_internal.cc"]
- internalhdrs += ["ops/" + n + "_internal.h"]
-
- native.cc_library(
- name = name,
- srcs = subsrcs,
- hdrs = subhdrs,
- deps = deps + if_not_android([
- clean_dep("//tensorflow/core:core_cpu"),
- clean_dep("//tensorflow/core:framework"),
- clean_dep("//tensorflow/core:lib"),
- clean_dep("//tensorflow/core:protos_all_cc"),
- ]) + if_android([
- clean_dep("//tensorflow/core:android_tensorflow_lib"),
- ]),
- copts = tf_copts(),
- alwayslink = 1,
- visibility = visibility,
- )
- native.cc_library(
- name = name + "_internal",
- srcs = internalsrcs,
- hdrs = internalhdrs,
- deps = deps + if_not_android([
- clean_dep("//tensorflow/core:core_cpu"),
- clean_dep("//tensorflow/core:framework"),
- clean_dep("//tensorflow/core:lib"),
- clean_dep("//tensorflow/core:protos_all_cc"),
- ]) + if_android([
- clean_dep("//tensorflow/core:android_tensorflow_lib"),
- ]),
- copts = tf_copts(),
- alwayslink = 1,
- visibility = [clean_dep("//tensorflow:internal")],
- )
+def tf_gen_op_wrappers_cc(name,
+ op_lib_names=[],
+ other_srcs=[],
+ other_hdrs=[],
+ pkg="",
+ deps=[
+ clean_dep("//tensorflow/cc:ops"),
+ clean_dep("//tensorflow/cc:scope"),
+ clean_dep("//tensorflow/cc:const_op"),
+ ],
+ op_gen=clean_dep("//tensorflow/cc:cc_op_gen_main"),
+ include_internal_ops=0,
+ visibility=None,
+ # ApiDefs will be loaded in the order apecified in this list.
+ api_def_srcs=[]):
+ subsrcs = other_srcs[:]
+ subhdrs = other_hdrs[:]
+ internalsrcs = []
+ internalhdrs = []
+ for n in op_lib_names:
+ tf_gen_op_wrapper_cc(
+ n,
+ "ops/" + n,
+ pkg=pkg,
+ op_gen=op_gen,
+ include_internal_ops=include_internal_ops,
+ api_def_srcs=api_def_srcs)
+ subsrcs += ["ops/" + n + ".cc"]
+ subhdrs += ["ops/" + n + ".h"]
+ internalsrcs += ["ops/" + n + "_internal.cc"]
+ internalhdrs += ["ops/" + n + "_internal.h"]
+
+ native.cc_library(
+ name=name,
+ srcs=subsrcs,
+ hdrs=subhdrs,
+ deps=deps + if_not_android([
+ clean_dep("//tensorflow/core:core_cpu"),
+ clean_dep("//tensorflow/core:framework"),
+ clean_dep("//tensorflow/core:lib"),
+ clean_dep("//tensorflow/core:protos_all_cc"),
+ ]) + if_android([
+ clean_dep("//tensorflow/core:android_tensorflow_lib"),
+ ]),
+ copts=tf_copts(),
+ alwayslink=1,
+ visibility=visibility)
+ native.cc_library(
+ name=name + "_internal",
+ srcs=internalsrcs,
+ hdrs=internalhdrs,
+ deps=deps + if_not_android([
+ clean_dep("//tensorflow/core:core_cpu"),
+ clean_dep("//tensorflow/core:framework"),
+ clean_dep("//tensorflow/core:lib"),
+ clean_dep("//tensorflow/core:protos_all_cc"),
+ ]) + if_android([
+ clean_dep("//tensorflow/core:android_tensorflow_lib"),
+ ]),
+ copts=tf_copts(),
+ alwayslink=1,
+ visibility=[clean_dep("//tensorflow:internal")])
# Generates a Python library target wrapping the ops registered in "deps".
#
@@ -556,105 +535,99 @@ def tf_gen_op_wrappers_cc(
# gen_locally: if True, the genrule to generate the Python library will be run
# without sandboxing. This would help when the genrule depends on symlinks
# which may not be supported in the sandbox.
-def tf_gen_op_wrapper_py(
- name,
- out = None,
- hidden = None,
- visibility = None,
- deps = [],
- require_shape_functions = False,
- hidden_file = None,
- generated_target_name = None,
- op_whitelist = [],
- cc_linkopts = [],
- api_def_srcs = [],
- gen_locally = False):
- if (hidden or hidden_file) and op_whitelist:
- fail("Cannot pass specify both hidden and op_whitelist.")
-
- # Construct a cc_binary containing the specified ops.
- tool_name = "gen_" + name + "_py_wrappers_cc"
- if not deps:
- deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
- tf_cc_binary(
- name = tool_name,
- linkopts = if_not_windows(["-lm"]) + cc_linkopts,
- copts = tf_copts(),
- linkstatic = 1, # Faster to link this one-time-use binary dynamically
- deps = ([
- clean_dep("//tensorflow/core:framework"),
- clean_dep("//tensorflow/python:python_op_gen_main"),
- ] + deps),
- visibility = [clean_dep("//tensorflow:internal")],
- )
-
- # Invoke the previous cc_binary to generate a python file.
- if not out:
- out = "ops/gen_" + name + ".py"
-
- if hidden:
- op_list_arg = ",".join(hidden)
- op_list_is_whitelist = False
- elif op_whitelist:
- op_list_arg = ",".join(op_whitelist)
- op_list_is_whitelist = True
- else:
- op_list_arg = "''"
- op_list_is_whitelist = False
-
- # Prepare ApiDef directories to pass to the genrule.
- if not api_def_srcs:
- api_def_args_str = ","
- else:
- api_def_args = []
- for api_def_src in api_def_srcs:
- # Add directory of the first ApiDef source to args.
- # We are assuming all ApiDefs in a single api_def_src are in the
- # same directory.
- api_def_args.append(
- "$$(dirname $$(echo $(locations " + api_def_src +
- ") | cut -d\" \" -f1))",
- )
- api_def_args_str = ",".join(api_def_args)
-
- if hidden_file:
- # `hidden_file` is file containing a list of op names to be hidden in the
- # generated module.
- native.genrule(
- name = name + "_pygenrule",
- outs = [out],
- srcs = api_def_srcs + [hidden_file],
- tools = [tool_name] + tf_binary_additional_srcs(),
- local = (1 if gen_locally else 0),
- cmd = ("$(location " + tool_name + ") " + api_def_args_str +
- " @$(location " + hidden_file + ") " +
- ("1" if require_shape_functions else "0") + " > $@"),
- )
- else:
- native.genrule(
- name = name + "_pygenrule",
- outs = [out],
- srcs = api_def_srcs,
- tools = [tool_name] + tf_binary_additional_srcs(),
- local = (1 if gen_locally else 0),
- cmd = ("$(location " + tool_name + ") " + api_def_args_str + " " +
- op_list_arg + " " +
- ("1" if require_shape_functions else "0") + " " +
- ("1" if op_list_is_whitelist else "0") + " > $@"),
- )
-
- # Make a py_library out of the generated python file.
- if not generated_target_name:
- generated_target_name = name
- native.py_library(
- name = generated_target_name,
- srcs = [out],
- srcs_version = "PY2AND3",
- visibility = visibility,
- deps = [
- clean_dep("//tensorflow/python:framework_for_generated_wrappers_v2"),
- ],
- )
+def tf_gen_op_wrapper_py(name,
+ out=None,
+ hidden=None,
+ visibility=None,
+ deps=[],
+ require_shape_functions=False,
+ hidden_file=None,
+ generated_target_name=None,
+ op_whitelist=[],
+ cc_linkopts=[],
+ api_def_srcs=[],
+ gen_locally=False):
+ if (hidden or hidden_file) and op_whitelist:
+ fail('Cannot pass specify both hidden and op_whitelist.')
+
+ # Construct a cc_binary containing the specified ops.
+ tool_name = "gen_" + name + "_py_wrappers_cc"
+ if not deps:
+ deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
+ tf_cc_binary(
+ name=tool_name,
+ linkopts=if_not_windows(["-lm"]) + cc_linkopts,
+ copts=tf_copts(),
+ linkstatic=1, # Faster to link this one-time-use binary dynamically
+ deps=([
+ clean_dep("//tensorflow/core:framework"),
+ clean_dep("//tensorflow/python:python_op_gen_main")
+ ] + deps),
+ visibility=[clean_dep("//tensorflow:internal")],)
+
+ # Invoke the previous cc_binary to generate a python file.
+ if not out:
+ out = "ops/gen_" + name + ".py"
+
+ if hidden:
+ op_list_arg = ",".join(hidden)
+ op_list_is_whitelist = False
+ elif op_whitelist:
+ op_list_arg = ",".join(op_whitelist)
+ op_list_is_whitelist = True
+ else:
+ op_list_arg = "''"
+ op_list_is_whitelist = False
+
+ # Prepare ApiDef directories to pass to the genrule.
+ if not api_def_srcs:
+ api_def_args_str = ","
+ else:
+ api_def_args = []
+ for api_def_src in api_def_srcs:
+ # Add directory of the first ApiDef source to args.
+ # We are assuming all ApiDefs in a single api_def_src are in the
+ # same directory.
+ api_def_args.append(
+ "$$(dirname $$(echo $(locations " + api_def_src +
+ ") | cut -d\" \" -f1))")
+ api_def_args_str = ",".join(api_def_args)
+
+ if hidden_file:
+ # `hidden_file` is file containing a list of op names to be hidden in the
+ # generated module.
+ native.genrule(
+ name=name + "_pygenrule",
+ outs=[out],
+ srcs=api_def_srcs + [hidden_file],
+ tools=[tool_name] + tf_binary_additional_srcs(),
+ local = (1 if gen_locally else 0),
+ cmd=("$(location " + tool_name + ") " + api_def_args_str +
+ " @$(location " + hidden_file + ") " +
+ ("1" if require_shape_functions else "0") + " > $@"))
+ else:
+ native.genrule(
+ name=name + "_pygenrule",
+ outs=[out],
+ srcs=api_def_srcs,
+ tools=[tool_name] + tf_binary_additional_srcs(),
+ local = (1 if gen_locally else 0),
+ cmd=("$(location " + tool_name + ") " + api_def_args_str + " " +
+ op_list_arg + " " +
+ ("1" if require_shape_functions else "0") + " " +
+ ("1" if op_list_is_whitelist else "0") + " > $@"))
+
+ # Make a py_library out of the generated python file.
+ if not generated_target_name:
+ generated_target_name = name
+ native.py_library(
+ name=generated_target_name,
+ srcs=[out],
+ srcs_version="PY2AND3",
+ visibility=visibility,
+ deps=[
+ clean_dep("//tensorflow/python:framework_for_generated_wrappers_v2"),
+ ],)
# Define a bazel macro that creates cc_test for tensorflow.
#
@@ -665,52 +638,50 @@ def tf_gen_op_wrapper_py(
#
# TODO(opensource): we need to enable this to work around the hidden symbol
# __cudaRegisterFatBinary error. Need more investigations.
-def tf_cc_test(
- name,
- srcs,
- deps,
- linkstatic = 0,
- extra_copts = [],
- suffix = "",
- linkopts = [],
- nocopts = None,
- **kwargs):
- native.cc_test(
- name = "%s%s" % (name, suffix),
- srcs = srcs + tf_binary_additional_srcs(),
- copts = tf_copts() + extra_copts,
- linkopts = select({
- clean_dep("//tensorflow:android"): [
- "-pie",
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- clean_dep("//tensorflow:darwin"): [
- "-lm",
- ],
- "//conditions:default": [
- "-lpthread",
- "-lm",
- ],
- }) + linkopts + _rpath_linkopts(name),
- deps = deps + if_mkl(
- [
- "//third_party/mkl:intel_binary_blob",
- ],
- ),
- # Nested select() statements seem not to be supported when passed to
- # linkstatic, and we already have a cuda select() passed in to this
- # function.
- linkstatic = linkstatic or select({
- # cc_tests with ".so"s in srcs incorrectly link on Darwin unless
- # linkstatic=1 (https://github.com/bazelbuild/bazel/issues/3450).
- # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
- clean_dep("//tensorflow:darwin"): 1,
- "//conditions:default": 0,
- }),
- nocopts = nocopts,
- **kwargs
- )
+def tf_cc_test(name,
+ srcs,
+ deps,
+ linkstatic=0,
+ extra_copts=[],
+ suffix="",
+ linkopts=[],
+ nocopts=None,
+ **kwargs):
+ native.cc_test(
+ name="%s%s" % (name, suffix),
+ srcs=srcs + tf_binary_additional_srcs(),
+ copts=tf_copts() + extra_copts,
+ linkopts=select({
+ clean_dep("//tensorflow:android"): [
+ "-pie",
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ clean_dep("//tensorflow:darwin"): [
+ "-lm",
+ ],
+ "//conditions:default": [
+ "-lpthread",
+ "-lm"
+ ],
+ }) + linkopts + _rpath_linkopts(name),
+ deps=deps + if_mkl(
+ [
+ "//third_party/mkl:intel_binary_blob",
+ ],
+ ),
+ # Nested select() statements seem not to be supported when passed to
+ # linkstatic, and we already have a cuda select() passed in to this
+ # function.
+ linkstatic=linkstatic or select({
+ # cc_tests with ".so"s in srcs incorrectly link on Darwin unless
+ # linkstatic=1 (https://github.com/bazelbuild/bazel/issues/3450).
+ # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
+ clean_dep("//tensorflow:darwin"): 1,
+ "//conditions:default": 0,
+ }),
+ nocopts=nocopts,
+ **kwargs)
register_extension_info(
extension_name = "tf_cc_test",
@@ -719,114 +690,106 @@ register_extension_info(
# Part of the testing workflow requires a distinguishable name for the build
# rules that involve a GPU, even if otherwise identical to the base rule.
-def tf_cc_test_gpu(
- name,
- srcs,
- deps,
- linkstatic = 0,
- tags = [],
- data = [],
- size = "medium",
- suffix = "",
- args = None):
- tf_cc_test(
- name,
- srcs,
- deps,
- linkstatic = linkstatic,
- tags = tags,
- data = data,
- size = size,
- suffix = suffix,
- args = args,
- )
+def tf_cc_test_gpu(name,
+ srcs,
+ deps,
+ linkstatic=0,
+ tags=[],
+ data=[],
+ size="medium",
+ suffix="",
+ args=None):
+ tf_cc_test(
+ name,
+ srcs,
+ deps,
+ linkstatic=linkstatic,
+ tags=tags,
+ data=data,
+ size=size,
+ suffix=suffix,
+ args=args)
register_extension_info(
extension_name = "tf_cc_test_gpu",
label_regex_for_dep = "{extension_name}",
)
-def tf_cuda_cc_test(
- name,
- srcs = [],
- deps = [],
- tags = [],
- data = [],
- size = "medium",
- extra_copts = [],
- linkstatic = 0,
- args = [],
- linkopts = []):
- tf_cc_test(
- name = name,
- srcs = srcs,
- deps = deps,
- tags = tags + ["manual"],
- data = data,
- size = size,
- extra_copts = extra_copts,
- linkstatic = linkstatic,
- linkopts = linkopts,
- args = args,
- )
- tf_cc_test(
- name = name,
- srcs = srcs,
- suffix = "_gpu",
- deps = deps + if_cuda([
- clean_dep("//tensorflow/core:gpu_runtime"),
- ]),
- linkstatic = select({
- # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
- clean_dep("//tensorflow:darwin"): 1,
- "@local_config_cuda//cuda:using_nvcc": 1,
- "@local_config_cuda//cuda:using_clang": 1,
- "//conditions:default": 0,
- }),
- tags = tags + tf_cuda_tests_tags(),
- data = data,
- size = size,
- extra_copts = extra_copts,
- linkopts = linkopts,
- args = args,
- )
+def tf_cuda_cc_test(name,
+ srcs=[],
+ deps=[],
+ tags=[],
+ data=[],
+ size="medium",
+ extra_copts=[],
+ linkstatic=0,
+ args=[],
+ linkopts=[]):
+ tf_cc_test(
+ name=name,
+ srcs=srcs,
+ deps=deps,
+ tags=tags + ["manual"],
+ data=data,
+ size=size,
+ extra_copts=extra_copts,
+ linkstatic=linkstatic,
+ linkopts=linkopts,
+ args=args)
+ tf_cc_test(
+ name=name,
+ srcs=srcs,
+ suffix="_gpu",
+ deps=deps + if_cuda([
+ clean_dep("//tensorflow/core:gpu_runtime"),
+ ]),
+ linkstatic=select({
+ # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
+ clean_dep("//tensorflow:darwin"): 1,
+ "@local_config_cuda//cuda:using_nvcc": 1,
+ "@local_config_cuda//cuda:using_clang": 1,
+ "//conditions:default": 0,
+ }),
+ tags=tags + tf_cuda_tests_tags(),
+ data=data,
+ size=size,
+ extra_copts=extra_copts,
+ linkopts=linkopts,
+ args=args)
register_extension_info(
extension_name = "tf_cuda_cc_test",
label_regex_for_dep = "{extension_name}",
)
-def tf_cuda_only_cc_test(
- name,
- srcs = [],
- deps = [],
- tags = [],
- data = [],
- size = "medium",
- linkstatic = 0,
- args = [],
- linkopts = []):
- native.cc_test(
- name = "%s%s" % (name, "_gpu"),
- srcs = srcs + tf_binary_additional_srcs(),
- size = size,
- args = args,
- copts = _cuda_copts() + tf_copts(),
- data = data,
- deps = deps + if_cuda([
- clean_dep("//tensorflow/core:cuda"),
- clean_dep("//tensorflow/core:gpu_lib"),
- ]),
- linkopts = if_not_windows(["-lpthread", "-lm"]) + linkopts + _rpath_linkopts(name),
- linkstatic = linkstatic or select({
- # cc_tests with ".so"s in srcs incorrectly link on Darwin
- # unless linkstatic=1.
- # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
- clean_dep("//tensorflow:darwin"): 1,
- "//conditions:default": 0,
- }),
- tags = tags + tf_cuda_tests_tags(),
- )
+def tf_cuda_only_cc_test(name,
+ srcs=[],
+ deps=[],
+ tags=[],
+ data=[],
+ size="medium",
+ linkstatic=0,
+ args=[],
+ linkopts=[]):
+ native.cc_test(
+ name="%s%s" % (name, "_gpu"),
+ srcs=srcs + tf_binary_additional_srcs(),
+ size=size,
+ args=args,
+ copts= _cuda_copts() + tf_copts(),
+ data=data,
+ deps=deps + if_cuda([
+ clean_dep("//tensorflow/core:cuda"),
+ clean_dep("//tensorflow/core:gpu_lib")]),
+ linkopts=if_not_windows(["-lpthread", "-lm"]) + linkopts + _rpath_linkopts(name),
+ linkstatic=linkstatic or select({
+ # cc_tests with ".so"s in srcs incorrectly link on Darwin
+ # unless linkstatic=1.
+ # TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
+ clean_dep("//tensorflow:darwin"): 1,
+ "//conditions:default": 0,
+ }),
+ tags=tags + tf_cuda_tests_tags())
register_extension_info(
extension_name = "tf_cuda_only_cc_test",
@@ -834,109 +797,101 @@ register_extension_info(
)
# Create a cc_test for each of the tensorflow tests listed in "tests"
-def tf_cc_tests(
- srcs,
- deps,
- name = "",
- linkstatic = 0,
- tags = [],
- size = "medium",
- args = None,
- linkopts = [],
- nocopts = None):
- for src in srcs:
- tf_cc_test(
- name = src_to_test_name(src),
- srcs = [src],
- deps = deps,
- linkstatic = linkstatic,
- tags = tags,
- size = size,
- args = args,
- linkopts = linkopts,
- nocopts = nocopts,
- )
-
-def tf_cc_test_mkl(
- srcs,
- deps,
- name = "",
- linkstatic = 0,
- tags = [],
- size = "medium",
- args = None):
- for src in srcs:
- native.cc_test(
- name = src_to_test_name(src),
- srcs = if_mkl([src]) + tf_binary_additional_srcs(),
- copts = tf_copts(),
- linkopts = select({
- clean_dep("//tensorflow:android"): [
- "-pie",
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- "//conditions:default": [
- "-lpthread",
- "-lm",
- ],
- }) + _rpath_linkopts(src_to_test_name(src)),
- deps = deps + if_mkl(
- [
- "//third_party/mkl:intel_binary_blob",
- ],
- ),
- linkstatic = linkstatic,
- tags = tags,
- size = size,
- args = args,
- nocopts = "-fno-exceptions",
- )
-
-def tf_cc_tests_gpu(
- srcs,
- deps,
- name = "",
- linkstatic = 0,
- tags = [],
- size = "medium",
- args = None):
- tf_cc_tests(srcs, deps, linkstatic, tags = tags, size = size, args = args)
-
-def tf_cuda_cc_tests(
- srcs,
- deps,
- name = "",
- tags = [],
- size = "medium",
- linkstatic = 0,
- args = None,
- linkopts = []):
- for src in srcs:
- tf_cuda_cc_test(
- name = src_to_test_name(src),
- srcs = [src],
- deps = deps,
- tags = tags,
- size = size,
- linkstatic = linkstatic,
- args = args,
- linkopts = linkopts,
- )
-
-def tf_java_test(
- name,
- srcs = [],
- deps = [],
- *args,
- **kwargs):
- native.java_test(
- name = name,
- srcs = srcs,
- deps = deps + tf_binary_additional_srcs(),
- *args,
- **kwargs
- )
+def tf_cc_tests(srcs,
+ deps,
+ name="",
+ linkstatic=0,
+ tags=[],
+ size="medium",
+ args=None,
+ linkopts=[],
+ nocopts=None):
+ for src in srcs:
+ tf_cc_test(
+ name=src_to_test_name(src),
+ srcs=[src],
+ deps=deps,
+ linkstatic=linkstatic,
+ tags=tags,
+ size=size,
+ args=args,
+ linkopts=linkopts,
+ nocopts=nocopts)
+
+def tf_cc_test_mkl(srcs,
+ deps,
+ name="",
+ linkstatic=0,
+ tags=[],
+ size="medium",
+ args=None):
+ for src in srcs:
+ native.cc_test(
+ name=src_to_test_name(src),
+ srcs=if_mkl([src]) + tf_binary_additional_srcs(),
+ copts=tf_copts(),
+ linkopts=select({
+ clean_dep("//tensorflow:android"): [
+ "-pie",
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ "//conditions:default": [
+ "-lpthread",
+ "-lm"
+ ],
+ }) + _rpath_linkopts(src_to_test_name(src)),
+ deps=deps + if_mkl(
+ [
+ "//third_party/mkl:intel_binary_blob",
+ ],
+ ),
+ linkstatic=linkstatic,
+ tags=tags,
+ size=size,
+ args=args,
+ nocopts="-fno-exceptions")
+
+
+def tf_cc_tests_gpu(srcs,
+ deps,
+ name="",
+ linkstatic=0,
+ tags=[],
+ size="medium",
+ args=None):
+ tf_cc_tests(srcs, deps, linkstatic, tags=tags, size=size, args=args)
+
+def tf_cuda_cc_tests(srcs,
+ deps,
+ name="",
+ tags=[],
+ size="medium",
+ linkstatic=0,
+ args=None,
+ linkopts=[]):
+ for src in srcs:
+ tf_cuda_cc_test(
+ name=src_to_test_name(src),
+ srcs=[src],
+ deps=deps,
+ tags=tags,
+ size=size,
+ linkstatic=linkstatic,
+ args=args,
+ linkopts=linkopts)
+
+def tf_java_test(name,
+ srcs=[],
+ deps=[],
+ *args,
+ **kwargs):
+ native.java_test(
+ name=name,
+ srcs=srcs,
+ deps=deps + tf_binary_additional_srcs(),
+ *args,
+ **kwargs)
register_extension_info(
extension_name = "tf_java_test",
@@ -944,214 +899,197 @@ register_extension_info(
)
def _cuda_copts():
- """Gets the appropriate set of copts for (maybe) CUDA compilation.
-
- If we're doing CUDA compilation, returns copts for our particular CUDA
- compiler. If we're not doing CUDA compilation, returns an empty list.
-
- """
- return cuda_default_copts() + select({
- "//conditions:default": [],
- "@local_config_cuda//cuda:using_nvcc": ([
- "-nvcc_options=relaxed-constexpr",
- "-nvcc_options=ftz=true",
- ]),
- "@local_config_cuda//cuda:using_clang": ([
- "-fcuda-flush-denormals-to-zero",
- ]),
- })
+ """Gets the appropriate set of copts for (maybe) CUDA compilation.
+
+ If we're doing CUDA compilation, returns copts for our particular CUDA
+ compiler. If we're not doing CUDA compilation, returns an empty list.
+
+ """
+ return cuda_default_copts() + select({
+ "//conditions:default": [],
+ "@local_config_cuda//cuda:using_nvcc": ([
+ "-nvcc_options=relaxed-constexpr",
+ "-nvcc_options=ftz=true",
+ ]),
+ "@local_config_cuda//cuda:using_clang": ([
+ "-fcuda-flush-denormals-to-zero",
+ ]),
+ })
# Build defs for TensorFlow kernels
# When this target is built using --config=cuda, a cc_library is built
# that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional
# libraries needed by GPU kernels.
-def tf_gpu_kernel_library(
- srcs,
- copts = [],
- cuda_copts = [],
- deps = [],
- hdrs = [],
- **kwargs):
- copts = copts + _cuda_copts() + if_cuda(cuda_copts) + tf_copts()
- kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
-
- native.cc_library(
- srcs = srcs,
- hdrs = hdrs,
- copts = copts,
- deps = deps + if_cuda([
- clean_dep("//tensorflow/core:cuda"),
- clean_dep("//tensorflow/core:gpu_lib"),
- ]),
- alwayslink = 1,
- **kwargs
- )
+def tf_gpu_kernel_library(srcs,
+ copts=[],
+ cuda_copts=[],
+ deps=[],
+ hdrs=[],
+ **kwargs):
+ copts = copts + _cuda_copts() + if_cuda(cuda_copts) + tf_copts()
+ kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
+
+ native.cc_library(
+ srcs=srcs,
+ hdrs=hdrs,
+ copts=copts,
+ deps=deps + if_cuda([
+ clean_dep("//tensorflow/core:cuda"),
+ clean_dep("//tensorflow/core:gpu_lib"),
+ ]),
+ alwayslink=1,
+ **kwargs)
register_extension_info(
extension_name = "tf_gpu_kernel_library",
label_regex_for_dep = "{extension_name}",
)
-def tf_cuda_library(deps = None, cuda_deps = None, copts = tf_copts(), **kwargs):
- """Generate a cc_library with a conditional set of CUDA dependencies.
-
- When the library is built with --config=cuda:
-
- - Both deps and cuda_deps are used as dependencies.
- - The cuda runtime is added as a dependency (if necessary).
- - The library additionally passes -DGOOGLE_CUDA=1 to the list of copts.
- - In addition, when the library is also built with TensorRT enabled, it
- additionally passes -DGOOGLE_TENSORRT=1 to the list of copts.
-
- Args:
- - cuda_deps: BUILD dependencies which will be linked if and only if:
- '--config=cuda' is passed to the bazel command line.
- - deps: dependencies which will always be linked.
- - copts: copts always passed to the cc_library.
- - kwargs: Any other argument to cc_library.
- """
- if not deps:
- deps = []
- if not cuda_deps:
- cuda_deps = []
-
- kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
- native.cc_library(
- deps = deps + if_cuda(cuda_deps + [
- clean_dep("//tensorflow/core:cuda"),
- "@local_config_cuda//cuda:cuda_headers",
- ]),
- copts = (copts + if_cuda(["-DGOOGLE_CUDA=1"]) + if_mkl(["-DINTEL_MKL=1"]) +
- if_tensorrt(["-DGOOGLE_TENSORRT=1"])),
- **kwargs
- )
+def tf_cuda_library(deps=None, cuda_deps=None, copts=tf_copts(), **kwargs):
+ """Generate a cc_library with a conditional set of CUDA dependencies.
+
+ When the library is built with --config=cuda:
+
+ - Both deps and cuda_deps are used as dependencies.
+ - The cuda runtime is added as a dependency (if necessary).
+ - The library additionally passes -DGOOGLE_CUDA=1 to the list of copts.
+ - In addition, when the library is also built with TensorRT enabled, it
+ additionally passes -DGOOGLE_TENSORRT=1 to the list of copts.
+
+ Args:
+ - cuda_deps: BUILD dependencies which will be linked if and only if:
+ '--config=cuda' is passed to the bazel command line.
+ - deps: dependencies which will always be linked.
+ - copts: copts always passed to the cc_library.
+ - kwargs: Any other argument to cc_library.
+ """
+ if not deps:
+ deps = []
+ if not cuda_deps:
+ cuda_deps = []
+
+ kwargs["features"] = kwargs.get("features", []) + ["-use_header_modules"]
+ native.cc_library(
+ deps=deps + if_cuda(cuda_deps + [
+ clean_dep("//tensorflow/core:cuda"),
+ "@local_config_cuda//cuda:cuda_headers"
+ ]),
+ copts=(copts + if_cuda(["-DGOOGLE_CUDA=1"]) + if_mkl(["-DINTEL_MKL=1"]) +
+ if_tensorrt(["-DGOOGLE_TENSORRT=1"])),
+ **kwargs)
register_extension_info(
extension_name = "tf_cuda_library",
label_regex_for_dep = "{extension_name}",
)
-def tf_kernel_library(
- name,
- prefix = None,
- srcs = None,
- gpu_srcs = None,
- hdrs = None,
- deps = None,
- alwayslink = 1,
- copts = None,
- is_external = False,
- **kwargs):
- """A rule to build a TensorFlow OpKernel.
-
- May either specify srcs/hdrs or prefix. Similar to tf_cuda_library,
- but with alwayslink=1 by default. If prefix is specified:
- * prefix*.cc (except *.cu.cc) is added to srcs
- * prefix*.h (except *.cu.h) is added to hdrs
- * prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
- With the exception that test files are excluded.
- For example, with prefix = "cast_op",
- * srcs = ["cast_op.cc"]
- * hdrs = ["cast_op.h"]
- * gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
- * "cast_op_test.cc" is excluded
- With prefix = "cwise_op"
- * srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
- * hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
- * gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
- "cwise_ops.h", "cwise_ops_common.h",
- "cwise_ops_gpu_common.cu.h"]
- * "cwise_ops_test.cc" is excluded
- """
- if not srcs:
- srcs = []
- if not hdrs:
- hdrs = []
- if not deps:
- deps = []
- if not copts:
- copts = []
- copts = copts + tf_copts(is_external = is_external)
- if prefix:
- if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]):
- if not gpu_srcs:
- gpu_srcs = []
- gpu_srcs = gpu_srcs + native.glob(
- [prefix + "*.cu.cc", prefix + "*.h"],
- exclude = [prefix + "*test*"],
- )
- srcs = srcs + native.glob(
- [prefix + "*.cc"],
- exclude = [prefix + "*test*", prefix + "*.cu.cc"],
- )
- hdrs = hdrs + native.glob(
- [prefix + "*.h"],
- exclude = [prefix + "*test*", prefix + "*.cu.h"],
- )
-
- cuda_deps = [clean_dep("//tensorflow/core:gpu_lib")]
- if gpu_srcs:
- for gpu_src in gpu_srcs:
- if gpu_src.endswith(".cc") and not gpu_src.endswith(".cu.cc"):
- fail("{} not allowed in gpu_srcs. .cc sources must end with .cu.cc".format(gpu_src))
- tf_gpu_kernel_library(
- name = name + "_gpu",
- srcs = gpu_srcs,
- deps = deps,
- **kwargs
- )
- cuda_deps.extend([":" + name + "_gpu"])
- tf_cuda_library(
- name = name,
- srcs = srcs,
- hdrs = hdrs,
- copts = copts,
- cuda_deps = cuda_deps,
- linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669
- alwayslink = alwayslink,
- deps = deps,
- **kwargs
- )
+def tf_kernel_library(name,
+ prefix=None,
+ srcs=None,
+ gpu_srcs=None,
+ hdrs=None,
+ deps=None,
+ alwayslink=1,
+ copts=None,
+ is_external=False,
+ **kwargs):
+ """A rule to build a TensorFlow OpKernel.
+
+ May either specify srcs/hdrs or prefix. Similar to tf_cuda_library,
+ but with alwayslink=1 by default. If prefix is specified:
+ * prefix*.cc (except *.cu.cc) is added to srcs
+ * prefix*.h (except *.cu.h) is added to hdrs
+ * prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs.
+ With the exception that test files are excluded.
+ For example, with prefix = "cast_op",
+ * srcs = ["cast_op.cc"]
+ * hdrs = ["cast_op.h"]
+ * gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"]
+ * "cast_op_test.cc" is excluded
+ With prefix = "cwise_op"
+ * srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"],
+ * hdrs = ["cwise_ops.h", "cwise_ops_common.h"],
+ * gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc",
+ "cwise_ops.h", "cwise_ops_common.h",
+ "cwise_ops_gpu_common.cu.h"]
+ * "cwise_ops_test.cc" is excluded
+ """
+ if not srcs:
+ srcs = []
+ if not hdrs:
+ hdrs = []
+ if not deps:
+ deps = []
+ if not copts:
+ copts = []
+ copts = copts + tf_copts(is_external=is_external)
+ if prefix:
+ if native.glob([prefix + "*.cu.cc"], exclude=["*test*"]):
+ if not gpu_srcs:
+ gpu_srcs = []
+ gpu_srcs = gpu_srcs + native.glob(
+ [prefix + "*.cu.cc", prefix + "*.h"], exclude=[prefix + "*test*"])
+ srcs = srcs + native.glob(
+ [prefix + "*.cc"], exclude=[prefix + "*test*", prefix + "*.cu.cc"])
+ hdrs = hdrs + native.glob(
+ [prefix + "*.h"], exclude=[prefix + "*test*", prefix + "*.cu.h"])
+
+ cuda_deps = [clean_dep("//tensorflow/core:gpu_lib")]
+ if gpu_srcs:
+ for gpu_src in gpu_srcs:
+ if gpu_src.endswith(".cc") and not gpu_src.endswith(".cu.cc"):
+ fail("{} not allowed in gpu_srcs. .cc sources must end with .cu.cc".
+ format(gpu_src))
+ tf_gpu_kernel_library(
+ name=name + "_gpu", srcs=gpu_srcs, deps=deps, **kwargs)
+ cuda_deps.extend([":" + name + "_gpu"])
+ tf_cuda_library(
+ name=name,
+ srcs=srcs,
+ hdrs=hdrs,
+ copts=copts,
+ cuda_deps=cuda_deps,
+ linkstatic=1, # Needed since alwayslink is broken in bazel b/27630669
+ alwayslink=alwayslink,
+ deps=deps,
+ **kwargs)
register_extension_info(
extension_name = "tf_kernel_library",
label_regex_for_dep = "{extension_name}(_gpu)?",
)
-def tf_mkl_kernel_library(
- name,
- prefix = None,
- srcs = None,
- hdrs = None,
- deps = None,
- alwayslink = 1,
- copts = tf_copts(),
- nocopts = "-fno-exceptions"):
- """A rule to build MKL-based TensorFlow kernel libraries."""
-
- if not bool(srcs):
- srcs = []
- if not bool(hdrs):
- hdrs = []
-
- if prefix:
- srcs = srcs + native.glob(
- [prefix + "*.cc"],
- )
- hdrs = hdrs + native.glob(
- [prefix + "*.h"],
- )
-
- native.cc_library(
- name = name,
- srcs = if_mkl(srcs),
- hdrs = hdrs,
- deps = deps,
- alwayslink = alwayslink,
- copts = copts,
- nocopts = nocopts,
- )
+def tf_mkl_kernel_library(name,
+ prefix=None,
+ srcs=None,
+ hdrs=None,
+ deps=None,
+ alwayslink=1,
+ copts=tf_copts(),
+ nocopts="-fno-exceptions"):
+ """A rule to build MKL-based TensorFlow kernel libraries."""
+
+ if not bool(srcs):
+ srcs = []
+ if not bool(hdrs):
+ hdrs = []
+
+ if prefix:
+ srcs = srcs + native.glob(
+ [prefix + "*.cc"])
+ hdrs = hdrs + native.glob(
+ [prefix + "*.h"])
+
+ native.cc_library(
+ name=name,
+ srcs=if_mkl(srcs),
+ hdrs=hdrs,
+ deps=deps,
+ alwayslink=alwayslink,
+ copts=copts,
+ nocopts=nocopts
+ )
register_extension_info(
extension_name = "tf_mkl_kernel_library",
@@ -1160,42 +1098,35 @@ register_extension_info(
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
- srcs = ctx.files.srcs
- if len(srcs) != 1:
- fail("Exactly one SWIG source file label must be specified.", "srcs")
- module_name = ctx.attr.module_name
- src = ctx.files.srcs[0]
- inputs = depset([src])
- inputs += ctx.files.swig_includes
- for dep in ctx.attr.deps:
- inputs += dep.cc.transitive_headers
- inputs += ctx.files._swiglib
- inputs += ctx.files.toolchain_deps
- swig_include_dirs = depset(_get_repository_roots(ctx, inputs))
- swig_include_dirs += sorted([f.dirname for f in ctx.files._swiglib])
- args = [
- "-c++",
- "-python",
- "-module",
- module_name,
- "-o",
- ctx.outputs.cc_out.path,
- "-outdir",
- ctx.outputs.py_out.dirname,
- ]
- args += ["-l" + f.path for f in ctx.files.swig_includes]
- args += ["-I" + i for i in swig_include_dirs]
- args += [src.path]
- outputs = [ctx.outputs.cc_out, ctx.outputs.py_out]
- ctx.action(
- executable = ctx.executable._swig,
- arguments = args,
- inputs = list(inputs),
- outputs = outputs,
- mnemonic = "PythonSwig",
- progress_message = "SWIGing " + src.path,
- )
- return struct(files = depset(outputs))
+ srcs = ctx.files.srcs
+ if len(srcs) != 1:
+ fail("Exactly one SWIG source file label must be specified.", "srcs")
+ module_name = ctx.attr.module_name
+ src = ctx.files.srcs[0]
+ inputs = depset([src])
+ inputs += ctx.files.swig_includes
+ for dep in ctx.attr.deps:
+ inputs += dep.cc.transitive_headers
+ inputs += ctx.files._swiglib
+ inputs += ctx.files.toolchain_deps
+ swig_include_dirs = depset(_get_repository_roots(ctx, inputs))
+ swig_include_dirs += sorted([f.dirname for f in ctx.files._swiglib])
+ args = [
+ "-c++", "-python", "-module", module_name, "-o", ctx.outputs.cc_out.path,
+ "-outdir", ctx.outputs.py_out.dirname
+ ]
+ args += ["-l" + f.path for f in ctx.files.swig_includes]
+ args += ["-I" + i for i in swig_include_dirs]
+ args += [src.path]
+ outputs = [ctx.outputs.cc_out, ctx.outputs.py_out]
+ ctx.action(
+ executable=ctx.executable._swig,
+ arguments=args,
+ inputs=list(inputs),
+ outputs=outputs,
+ mnemonic="PythonSwig",
+ progress_message="SWIGing " + src.path)
+ return struct(files=depset(outputs))
_py_wrap_cc = rule(
attrs = {
@@ -1234,40 +1165,40 @@ _py_wrap_cc = rule(
)
def _get_repository_roots(ctx, files):
- """Returns abnormal root directories under which files reside.
-
- When running a ctx.action, source files within the main repository are all
- relative to the current directory; however, files that are generated or exist
- in remote repositories will have their root directory be a subdirectory,
- e.g. bazel-out/local-fastbuild/genfiles/external/jpeg_archive. This function
- returns the set of these devious directories, ranked and sorted by popularity
- in order to hopefully minimize the number of I/O system calls within the
- compiler, because includes have quadratic complexity.
- """
- result = {}
- for f in files:
- root = f.root.path
- if root:
- if root not in result:
- result[root] = 0
- result[root] -= 1
- work = f.owner.workspace_root
- if work:
- if root:
- root += "/"
- root += work
- if root:
- if root not in result:
- result[root] = 0
- result[root] -= 1
- return [k for v, k in sorted([(v, k) for k, v in result.items()])]
+ """Returns abnormal root directories under which files reside.
+
+ When running a ctx.action, source files within the main repository are all
+ relative to the current directory; however, files that are generated or exist
+ in remote repositories will have their root directory be a subdirectory,
+ e.g. bazel-out/local-fastbuild/genfiles/external/jpeg_archive. This function
+ returns the set of these devious directories, ranked and sorted by popularity
+ in order to hopefully minimize the number of I/O system calls within the
+ compiler, because includes have quadratic complexity.
+ """
+ result = {}
+ for f in files:
+ root = f.root.path
+ if root:
+ if root not in result:
+ result[root] = 0
+ result[root] -= 1
+ work = f.owner.workspace_root
+ if work:
+ if root:
+ root += "/"
+ root += work
+ if root:
+ if root not in result:
+ result[root] = 0
+ result[root] -= 1
+ return [k for v, k in sorted([(v, k) for k, v in result.items()])]
# Bazel rule for collecting the header files that a target depends on.
def _transitive_hdrs_impl(ctx):
- outputs = depset()
- for dep in ctx.attr.deps:
- outputs += dep.cc.transitive_headers
- return struct(files = outputs)
+ outputs = depset()
+ for dep in ctx.attr.deps:
+ outputs += dep.cc.transitive_headers
+ return struct(files=outputs)
_transitive_hdrs = rule(
attrs = {
@@ -1279,51 +1210,52 @@ _transitive_hdrs = rule(
implementation = _transitive_hdrs_impl,
)
-def transitive_hdrs(name, deps = [], **kwargs):
- _transitive_hdrs(name = name + "_gather", deps = deps)
- native.filegroup(name = name, srcs = [":" + name + "_gather"])
+def transitive_hdrs(name, deps=[], **kwargs):
+ _transitive_hdrs(name=name + "_gather", deps=deps)
+ native.filegroup(name=name, srcs=[":" + name + "_gather"])
# Create a header only library that includes all the headers exported by
# the libraries in deps.
-def cc_header_only_library(name, deps = [], includes = [], **kwargs):
- _transitive_hdrs(name = name + "_gather", deps = deps)
- native.cc_library(
- name = name,
- hdrs = [":" + name + "_gather"],
- includes = includes,
- **kwargs
- )
+def cc_header_only_library(name, deps=[], includes=[], **kwargs):
+ _transitive_hdrs(name=name + "_gather", deps=deps)
+ native.cc_library(name=name,
+ hdrs=[":" + name + "_gather"],
+ includes=includes,
+ **kwargs)
def tf_custom_op_library_additional_deps():
- return [
- clean_dep("//third_party/eigen3"),
- clean_dep("//tensorflow/core:framework_headers_lib"),
- ] + if_windows(["//tensorflow/python:pywrap_tensorflow_import_lib"])
+ return [
+ "@protobuf_archive//:protobuf_headers",
+ clean_dep("//third_party/eigen3"),
+ clean_dep("//tensorflow/core:framework_headers_lib"),
+ ] + if_windows(["//tensorflow/python:pywrap_tensorflow_import_lib"])
# A list of targets that contains the implemenation of
# tf_custom_op_library_additional_deps. It's used to generate a DEF file for
# exporting symbols from _pywrap_tensorflow.dll on Windows.
def tf_custom_op_library_additional_deps_impl():
- return [
- # for //third_party/eigen3
- clean_dep("//third_party/eigen3"),
- # for //tensorflow/core:framework_headers_lib
- clean_dep("//tensorflow/core:framework"),
- clean_dep("//tensorflow/core:reader_base"),
- ]
+ return [
+ "@protobuf_archive//:protobuf",
+ "@nsync//:nsync_cpp",
+ # for //third_party/eigen3
+ clean_dep("//third_party/eigen3"),
+ # for //tensorflow/core:framework_headers_lib
+ clean_dep("//tensorflow/core:framework"),
+ clean_dep("//tensorflow/core:reader_base"),
+ ]
# Traverse the dependency graph along the "deps" attribute of the
# target and return a struct with one field called 'tf_collected_deps'.
# tf_collected_deps will be the union of the deps of the current target
# and the tf_collected_deps of the dependencies of this target.
def _collect_deps_aspect_impl(target, ctx):
- alldeps = depset()
- if hasattr(ctx.rule.attr, "deps"):
- for dep in ctx.rule.attr.deps:
- alldeps = alldeps | depset([dep.label])
- if hasattr(dep, "tf_collected_deps"):
- alldeps = alldeps | dep.tf_collected_deps
- return struct(tf_collected_deps = alldeps)
+ alldeps = depset()
+ if hasattr(ctx.rule.attr, "deps"):
+ for dep in ctx.rule.attr.deps:
+ alldeps = alldeps | depset([dep.label])
+ if hasattr(dep, "tf_collected_deps"):
+ alldeps = alldeps | dep.tf_collected_deps
+ return struct(tf_collected_deps=alldeps)
collect_deps_aspect = aspect(
attr_aspects = ["deps"],
@@ -1331,26 +1263,24 @@ collect_deps_aspect = aspect(
)
def _dep_label(dep):
- label = dep.label
- return label.package + ":" + label.name
+ label = dep.label
+ return label.package + ":" + label.name
# This rule checks that the transitive dependencies of targets listed
# in the 'deps' attribute don't depend on the targets listed in
# the 'disallowed_deps' attribute.
def _check_deps_impl(ctx):
- disallowed_deps = ctx.attr.disallowed_deps
- for input_dep in ctx.attr.deps:
- if not hasattr(input_dep, "tf_collected_deps"):
- continue
- for dep in input_dep.tf_collected_deps:
- for disallowed_dep in disallowed_deps:
- if dep == disallowed_dep.label:
- fail(
- _dep_label(input_dep) + " cannot depend on " + _dep_label(
- disallowed_dep,
- ),
- )
- return struct()
+ disallowed_deps = ctx.attr.disallowed_deps
+ for input_dep in ctx.attr.deps:
+ if not hasattr(input_dep, "tf_collected_deps"):
+ continue
+ for dep in input_dep.tf_collected_deps:
+ for disallowed_dep in disallowed_deps:
+ if dep == disallowed_dep.label:
+ fail(
+ _dep_label(input_dep) + " cannot depend on " + _dep_label(
+ disallowed_dep))
+ return struct()
check_deps = rule(
_check_deps_impl,
@@ -1369,71 +1299,66 @@ check_deps = rule(
# Helper to build a dynamic library (.so) from the sources containing
# implementations of custom ops and kernels.
-def tf_custom_op_library(name, srcs = [], gpu_srcs = [], deps = [], linkopts = []):
- cuda_deps = [
- clean_dep("//tensorflow/core:stream_executor_headers_lib"),
- "@local_config_cuda//cuda:cuda_headers",
- "@local_config_cuda//cuda:cudart_static",
- ]
- deps = deps + tf_custom_op_library_additional_deps()
- if gpu_srcs:
- basename = name.split(".")[0]
- native.cc_library(
- name = basename + "_gpu",
- srcs = gpu_srcs,
- copts = _cuda_copts() + if_tensorrt(["-DGOOGLE_TENSORRT=1"]),
- features = if_cuda(["-use_header_modules"]),
- deps = deps + if_cuda(cuda_deps),
- )
- cuda_deps.extend([":" + basename + "_gpu"])
-
- check_deps(
- name = name + "_check_deps",
- deps = deps + if_cuda(cuda_deps),
- disallowed_deps = [
- clean_dep("//tensorflow/core:framework"),
- clean_dep("//tensorflow/core:lib"),
- ],
- )
- tf_cc_shared_object(
- name = name,
- srcs = srcs,
- deps = deps + if_cuda(cuda_deps),
- data = [name + "_check_deps"],
- copts = tf_copts(is_external = True),
- features = ["windows_export_all_symbols"],
- linkopts = linkopts + select({
- "//conditions:default": [
- "-lm",
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- clean_dep("//tensorflow:darwin"): [],
- }),
- )
+def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[], linkopts=[]):
+ cuda_deps = [
+ clean_dep("//tensorflow/core:stream_executor_headers_lib"),
+ "@local_config_cuda//cuda:cuda_headers",
+ "@local_config_cuda//cuda:cudart_static",
+ ]
+ deps = deps + tf_custom_op_library_additional_deps()
+ if gpu_srcs:
+ basename = name.split(".")[0]
+ native.cc_library(
+ name=basename + "_gpu",
+ srcs=gpu_srcs,
+ copts=_cuda_copts() + if_tensorrt(["-DGOOGLE_TENSORRT=1"]),
+ features = if_cuda(["-use_header_modules"]),
+ deps=deps + if_cuda(cuda_deps))
+ cuda_deps.extend([":" + basename + "_gpu"])
+
+ check_deps(
+ name=name + "_check_deps",
+ deps=deps + if_cuda(cuda_deps),
+ disallowed_deps=[
+ clean_dep("//tensorflow/core:framework"),
+ clean_dep("//tensorflow/core:lib")
+ ])
+ tf_cc_shared_object(
+ name=name,
+ srcs=srcs,
+ deps=deps + if_cuda(cuda_deps),
+ data=[name + "_check_deps"],
+ copts=tf_copts(is_external=True),
+ features = ["windows_export_all_symbols"],
+ linkopts=linkopts + select({
+ "//conditions:default": [
+ "-lm",
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ clean_dep("//tensorflow:darwin"): [],
+ }),)
register_extension_info(
extension_name = "tf_custom_op_library",
label_regex_for_dep = "{extension_name}",
)
-def tf_custom_op_py_library(
- name,
- srcs = [],
- dso = [],
- kernels = [],
- srcs_version = "PY2AND3",
- visibility = None,
- deps = []):
- kernels = kernels # unused argument
- native.py_library(
- name = name,
- data = dso,
- srcs = srcs,
- srcs_version = srcs_version,
- visibility = visibility,
- deps = deps,
- )
+def tf_custom_op_py_library(name,
+ srcs=[],
+ dso=[],
+ kernels=[],
+ srcs_version="PY2AND3",
+ visibility=None,
+ deps=[]):
+ kernels = kernels # unused argument
+ native.py_library(
+ name=name,
+ data=dso,
+ srcs=srcs,
+ srcs_version=srcs_version,
+ visibility=visibility,
+ deps=deps,)
register_extension_info(
extension_name = "tf_custom_op_py_library",
@@ -1447,130 +1372,120 @@ register_extension_info(
# This function attempts to append init_module_name to list of
# exported functions in version script
def _append_init_to_versionscript_impl(ctx):
- mod_name = ctx.attr.module_name
- if ctx.attr.is_version_script:
- ctx.actions.expand_template(
- template = ctx.file.template_file,
- output = ctx.outputs.versionscript,
- substitutions = {
- "global:": "global:\n init_%s;\n PyInit_*;" % (mod_name),
- },
- is_executable = False,
- )
- else:
- ctx.actions.expand_template(
- template = ctx.file.template_file,
- output = ctx.outputs.versionscript,
- substitutions = {
- "*tensorflow*": "*tensorflow*\ninit_%s\nPyInit_*\n" % (mod_name),
- },
- is_executable = False,
- )
-
-_append_init_to_versionscript = rule(
- implementation = _append_init_to_versionscript_impl,
- attrs = {
- "module_name": attr.string(mandatory = True),
- "template_file": attr.label(allow_files = True, single_file = True, mandatory = True),
- "is_version_script": attr.bool(
- default = True,
- doc = "whether target is a ld version script or exported symbol list",
- mandatory = False,
- ),
- },
- outputs = {"versionscript": "%{name}.lds"},
-)
-
-def tf_py_wrap_cc(
- name,
- srcs,
- swig_includes = [],
- deps = [],
- copts = [],
- **kwargs):
- module_name = name.split("/")[-1]
-
- # Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
- # and use that as the name for the rule producing the .so file.
- cc_library_name = "/".join(name.split("/")[:-1] + ["_" + module_name + ".so"])
- cc_library_pyd_name = "/".join(
- name.split("/")[:-1] + ["_" + module_name + ".pyd"],
- )
- extra_deps = []
- _py_wrap_cc(
- name = name + "_py_wrap",
- srcs = srcs,
- swig_includes = swig_includes,
- deps = deps + extra_deps,
- toolchain_deps = ["//tools/defaults:crosstool"],
- module_name = module_name,
- py_module_name = name,
- )
- vscriptname = name + "_versionscript"
- _append_init_to_versionscript(
- name = vscriptname,
- module_name = module_name,
- is_version_script = select({
- "@local_config_cuda//cuda:darwin": False,
- "//conditions:default": True,
- }),
- template_file = select({
- "@local_config_cuda//cuda:darwin": clean_dep("//tensorflow:tf_exported_symbols.lds"),
- "//conditions:default": clean_dep("//tensorflow:tf_version_script.lds"),
- }),
+ mod_name = ctx.attr.module_name
+ if ctx.attr.is_version_script:
+ ctx.actions.expand_template(
+ template=ctx.file.template_file,
+ output=ctx.outputs.versionscript,
+ substitutions={
+ "global:":"global:\n init_%s;\n PyInit_*;"%(mod_name),
+ },
+ is_executable=False,
)
- extra_linkopts = select({
- "@local_config_cuda//cuda:darwin": [
- "-Wl,-exported_symbols_list",
- "$(location %s.lds)" % vscriptname,
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- "//conditions:default": [
- "-Wl,--version-script",
- "$(location %s.lds)" % vscriptname,
- ],
- })
- extra_deps += select({
- "@local_config_cuda//cuda:darwin": [
- "%s.lds" % vscriptname,
- ],
- clean_dep("//tensorflow:windows"): [],
- clean_dep("//tensorflow:windows_msvc"): [],
- "//conditions:default": [
- "%s.lds" % vscriptname,
- ],
- })
-
- tf_cc_shared_object(
- name = cc_library_name,
- srcs = [module_name + ".cc"],
- copts = copts + if_not_windows([
- "-Wno-self-assign",
- "-Wno-sign-compare",
- "-Wno-write-strings",
- ]),
- linkopts = extra_linkopts,
- linkstatic = 1,
- deps = deps + extra_deps,
- **kwargs
- )
- native.genrule(
- name = "gen_" + cc_library_pyd_name,
- srcs = [":" + cc_library_name],
- outs = [cc_library_pyd_name],
- cmd = "cp $< $@",
- )
- native.py_library(
- name = name,
- srcs = [":" + name + ".py"],
- srcs_version = "PY2AND3",
- data = select({
- clean_dep("//tensorflow:windows"): [":" + cc_library_pyd_name],
- "//conditions:default": [":" + cc_library_name],
- }),
+ else:
+ ctx.actions.expand_template(
+ template=ctx.file.template_file,
+ output=ctx.outputs.versionscript,
+ substitutions={
+ "*tensorflow*":"*tensorflow*\ninit_%s\nPyInit_*\n"%(mod_name),
+ },
+ is_executable=False,
)
+
+_append_init_to_versionscript= rule(
+ implementation=_append_init_to_versionscript_impl,
+ attrs={
+ "module_name":attr.string(mandatory=True),
+ "template_file":attr.label(allow_files=True,single_file=True,mandatory=True),
+ "is_version_script":attr.bool(default=True,
+ doc='whether target is a ld version script or exported symbol list',
+ mandatory=False),
+ },
+ outputs={"versionscript":"%{name}.lds"},
+)
+
+def tf_py_wrap_cc(name,
+ srcs,
+ swig_includes=[],
+ deps=[],
+ copts=[],
+ **kwargs):
+ module_name = name.split("/")[-1]
+ # Convert a rule name such as foo/bar/baz to foo/bar/_baz.so
+ # and use that as the name for the rule producing the .so file.
+ cc_library_name = "/".join(name.split("/")[:-1] + ["_" + module_name + ".so"])
+ cc_library_pyd_name = "/".join(
+ name.split("/")[:-1] + ["_" + module_name + ".pyd"])
+ extra_deps = []
+ _py_wrap_cc(
+ name=name + "_py_wrap",
+ srcs=srcs,
+ swig_includes=swig_includes,
+ deps=deps + extra_deps,
+ toolchain_deps=["//tools/defaults:crosstool"],
+ module_name=module_name,
+ py_module_name=name)
+ vscriptname=name+"_versionscript"
+ _append_init_to_versionscript(
+ name=vscriptname,
+ module_name=module_name,
+ is_version_script=select({
+ "@local_config_cuda//cuda:darwin":False,
+ "//conditions:default":True,
+ }),
+ template_file=select({
+ "@local_config_cuda//cuda:darwin":clean_dep("//tensorflow:tf_exported_symbols.lds"),
+ "//conditions:default":clean_dep("//tensorflow:tf_version_script.lds")
+ })
+ )
+ extra_linkopts = select({
+ "@local_config_cuda//cuda:darwin": [
+ "-Wl,-exported_symbols_list",
+ "$(location %s.lds)"%vscriptname,
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ "//conditions:default": [
+ "-Wl,--version-script",
+ "$(location %s.lds)"%vscriptname,
+ ]
+ })
+ extra_deps += select({
+ "@local_config_cuda//cuda:darwin": [
+ "%s.lds"%vscriptname,
+ ],
+ clean_dep("//tensorflow:windows"): [],
+ clean_dep("//tensorflow:windows_msvc"): [],
+ "//conditions:default": [
+ "%s.lds"%vscriptname,
+ ]
+ })
+
+ tf_cc_shared_object(
+ name=cc_library_name,
+ srcs=[module_name + ".cc"],
+ copts=copts + if_not_windows([
+ "-Wno-self-assign", "-Wno-sign-compare", "-Wno-write-strings"
+ ]),
+ linkopts=extra_linkopts,
+ linkstatic=1,
+ deps=deps + extra_deps,
+ **kwargs)
+ native.genrule(
+ name="gen_" + cc_library_pyd_name,
+ srcs=[":" + cc_library_name],
+ outs=[cc_library_pyd_name],
+ cmd="cp $< $@",)
+ native.py_library(
+ name=name,
+ srcs=[":" + name + ".py"],
+ srcs_version="PY2AND3",
+ data=select({
+ clean_dep("//tensorflow:windows"): [":" + cc_library_pyd_name],
+ "//conditions:default": [":" + cc_library_name],
+ }))
+
# This macro is for running python tests against system installed pip package
# on Windows.
#
@@ -1587,263 +1502,246 @@ def tf_py_wrap_cc(
# Note that this only works on Windows. See the definition of
# //third_party/tensorflow/tools/pip_package:win_pip_package_marker for specific reasons.
# 2. When --define=no_tensorflow_py_deps=false (by default), it's a normal py_test.
-def py_test(deps = [], data = [], **kwargs):
- native.py_test(
- malloc = "//tcmalloc:tcmalloc_or_debug",
- deps = select({
- "//conditions:default": deps,
- clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
- }),
- data = data + select({
- "//conditions:default": [],
- clean_dep("//tensorflow:no_tensorflow_py_deps"): ["//tensorflow/tools/pip_package:win_pip_package_marker"],
- }),
- **kwargs
- )
+def py_test(deps=[], data=[], **kwargs):
+ native.py_test(
+ # TODO(jlebar): Ideally we'd use tcmalloc here.,
+ deps=select({
+ "//conditions:default": deps,
+ clean_dep("//tensorflow:no_tensorflow_py_deps"): [],
+ }),
+ data = data + select({
+ "//conditions:default": [],
+ clean_dep("//tensorflow:no_tensorflow_py_deps"):
+ ["//tensorflow/tools/pip_package:win_pip_package_marker"],
+ }),
+ **kwargs)
register_extension_info(
extension_name = "py_test",
label_regex_for_dep = "{extension_name}",
)
-def tf_py_test(
- name,
- srcs,
- size = "medium",
- data = [],
- main = None,
- args = [],
- tags = [],
- shard_count = 1,
- additional_deps = [],
- flaky = 0,
- xla_enabled = False,
- grpc_enabled = False):
- if xla_enabled:
- additional_deps = additional_deps + tf_additional_xla_deps_py()
- if grpc_enabled:
- additional_deps = additional_deps + tf_additional_grpc_deps_py()
- py_test(
- name = name,
- size = size,
- srcs = srcs,
- main = main,
- args = args,
- tags = tags,
- visibility = [clean_dep("//tensorflow:internal")],
- shard_count = shard_count,
- data = data,
- deps = [
+def tf_py_test(name,
+ srcs,
+ size="medium",
+ data=[],
+ main=None,
+ args=[],
+ tags=[],
+ shard_count=1,
+ additional_deps=[],
+ flaky=0,
+ xla_enabled=False,
+ grpc_enabled=False):
+ if xla_enabled:
+ additional_deps = additional_deps + tf_additional_xla_deps_py()
+ if grpc_enabled:
+ additional_deps = additional_deps + tf_additional_grpc_deps_py()
+ py_test(
+ name=name,
+ size=size,
+ srcs=srcs,
+ main=main,
+ args=args,
+ tags=tags,
+ visibility=[clean_dep("//tensorflow:internal")],
+ shard_count=shard_count,
+ data=data,
+ deps=[
clean_dep("//tensorflow/python:extra_py_tests_deps"),
clean_dep("//tensorflow/python:gradient_checker"),
- ] + additional_deps,
- flaky = flaky,
- srcs_version = "PY2AND3",
- )
+ ] + additional_deps,
+ flaky=flaky,
+ srcs_version="PY2AND3")
register_extension_info(
extension_name = "tf_py_test",
label_regex_map = {"additional_deps": "deps:{extension_name}"},
)
-def cuda_py_test(
- name,
- srcs,
- size = "medium",
- data = [],
- main = None,
- args = [],
- shard_count = 1,
- additional_deps = [],
- tags = [],
- flaky = 0,
- xla_enabled = False,
- grpc_enabled = False):
- test_tags = tags + tf_cuda_tests_tags()
- tf_py_test(
- name = name,
- size = size,
- srcs = srcs,
- data = data,
- main = main,
- args = args,
- tags = test_tags,
- shard_count = shard_count,
- additional_deps = additional_deps,
- flaky = flaky,
- xla_enabled = xla_enabled,
- grpc_enabled = grpc_enabled,
- )
+def cuda_py_test(name,
+ srcs,
+ size="medium",
+ data=[],
+ main=None,
+ args=[],
+ shard_count=1,
+ additional_deps=[],
+ tags=[],
+ flaky=0,
+ xla_enabled=False,
+ grpc_enabled=False):
+ test_tags = tags + tf_cuda_tests_tags()
+ tf_py_test(
+ name=name,
+ size=size,
+ srcs=srcs,
+ data=data,
+ main=main,
+ args=args,
+ tags=test_tags,
+ shard_count=shard_count,
+ additional_deps=additional_deps,
+ flaky=flaky,
+ xla_enabled=xla_enabled,
+ grpc_enabled=grpc_enabled)
register_extension_info(
extension_name = "cuda_py_test",
label_regex_map = {"additional_deps": "additional_deps:{extension_name}"},
)
-def sycl_py_test(
- name,
- srcs,
- size = "medium",
- data = [],
- main = None,
- args = [],
- shard_count = 1,
- additional_deps = [],
- tags = [],
- flaky = 0,
- xla_enabled = False,
- grpc_enabled = False):
- test_tags = tags + tf_sycl_tests_tags()
- tf_py_test(
- name = name,
- size = size,
- srcs = srcs,
- data = data,
- main = main,
- args = args,
- tags = test_tags,
- shard_count = shard_count,
- additional_deps = additional_deps,
- flaky = flaky,
- xla_enabled = xla_enabled,
- grpc_enabled = grpc_enabled,
- )
+def sycl_py_test(name,
+ srcs,
+ size="medium",
+ data=[],
+ main=None,
+ args=[],
+ shard_count=1,
+ additional_deps=[],
+ tags=[],
+ flaky=0,
+ xla_enabled=False,
+ grpc_enabled=False):
+ test_tags = tags + tf_sycl_tests_tags()
+ tf_py_test(
+ name=name,
+ size=size,
+ srcs=srcs,
+ data=data,
+ main=main,
+ args=args,
+ tags=test_tags,
+ shard_count=shard_count,
+ additional_deps=additional_deps,
+ flaky=flaky,
+ xla_enabled=xla_enabled,
+ grpc_enabled=grpc_enabled)
register_extension_info(
extension_name = "sycl_py_test",
label_regex_map = {"additional_deps": "additional_deps:{extension_name}"},
)
-def py_tests(
- name,
- srcs,
- size = "medium",
- additional_deps = [],
- data = [],
- tags = [],
- shard_count = 1,
- prefix = "",
- xla_enabled = False,
- grpc_enabled = False):
- for src in srcs:
- test_name = src.split("/")[-1].split(".")[0]
- if prefix:
- test_name = "%s_%s" % (prefix, test_name)
- tf_py_test(
- name = test_name,
- size = size,
- srcs = [src],
- main = src,
- tags = tags,
- shard_count = shard_count,
- data = data,
- additional_deps = additional_deps,
- xla_enabled = xla_enabled,
- grpc_enabled = grpc_enabled,
- )
-
-def cuda_py_tests(
- name,
- srcs,
- size = "medium",
- additional_deps = [],
- data = [],
- shard_count = 1,
- tags = [],
- prefix = "",
- xla_enabled = False,
- grpc_enabled = False):
- test_tags = tags + tf_cuda_tests_tags()
- py_tests(
- name = name,
- size = size,
- srcs = srcs,
- additional_deps = additional_deps,
- data = data,
- tags = test_tags,
- shard_count = shard_count,
- prefix = prefix,
- xla_enabled = xla_enabled,
- grpc_enabled = grpc_enabled,
- )
+def py_tests(name,
+ srcs,
+ size="medium",
+ additional_deps=[],
+ data=[],
+ tags=[],
+ shard_count=1,
+ prefix="",
+ xla_enabled=False,
+ grpc_enabled=False):
+ for src in srcs:
+ test_name = src.split("/")[-1].split(".")[0]
+ if prefix:
+ test_name = "%s_%s" % (prefix, test_name)
+ tf_py_test(
+ name=test_name,
+ size=size,
+ srcs=[src],
+ main=src,
+ tags=tags,
+ shard_count=shard_count,
+ data=data,
+ additional_deps=additional_deps,
+ xla_enabled=xla_enabled,
+ grpc_enabled=grpc_enabled)
+
+def cuda_py_tests(name,
+ srcs,
+ size="medium",
+ additional_deps=[],
+ data=[],
+ shard_count=1,
+ tags=[],
+ prefix="",
+ xla_enabled=False,
+ grpc_enabled=False):
+ test_tags = tags + tf_cuda_tests_tags()
+ py_tests(
+ name=name,
+ size=size,
+ srcs=srcs,
+ additional_deps=additional_deps,
+ data=data,
+ tags=test_tags,
+ shard_count=shard_count,
+ prefix=prefix,
+ xla_enabled=xla_enabled,
+ grpc_enabled=grpc_enabled)
# Creates a genrule named <name> for running tools/proto_text's generator to
# make the proto_text functions, for the protos passed in <srcs>.
#
# Return a struct with fields (hdrs, srcs) containing the names of the
# generated files.
-def tf_generate_proto_text_sources(name, srcs_relative_dir, srcs, protodeps = [], deps = [], visibility = None):
- out_hdrs = (
- [
- p.replace(".proto", ".pb_text.h")
- for p in srcs
- ] + [p.replace(".proto", ".pb_text-impl.h") for p in srcs]
- )
- out_srcs = [p.replace(".proto", ".pb_text.cc") for p in srcs]
- native.genrule(
- name = name + "_srcs",
- srcs = srcs + protodeps + [clean_dep("//tensorflow/tools/proto_text:placeholder.txt")],
- outs = out_hdrs + out_srcs,
- visibility = visibility,
- cmd =
- "$(location //tensorflow/tools/proto_text:gen_proto_text_functions) " +
- "$(@D) " + srcs_relative_dir + " $(SRCS)",
- tools = [
- clean_dep("//tensorflow/tools/proto_text:gen_proto_text_functions"),
- ],
- )
-
- native.filegroup(
- name = name + "_hdrs",
- srcs = out_hdrs,
- visibility = visibility,
- )
-
- native.cc_library(
- name = name,
- srcs = out_srcs,
- hdrs = out_hdrs,
- visibility = visibility,
- deps = deps,
- )
+def tf_generate_proto_text_sources(name, srcs_relative_dir, srcs, protodeps=[], deps=[], visibility=None):
+ out_hdrs = (
+ [p.replace(".proto", ".pb_text.h")
+ for p in srcs] + [p.replace(".proto", ".pb_text-impl.h") for p in srcs])
+ out_srcs = [p.replace(".proto", ".pb_text.cc") for p in srcs]
+ native.genrule(
+ name=name + "_srcs",
+ srcs=srcs + protodeps + [clean_dep("//tensorflow/tools/proto_text:placeholder.txt")],
+ outs=out_hdrs + out_srcs,
+ visibility=visibility,
+ cmd=
+ "$(location //tensorflow/tools/proto_text:gen_proto_text_functions) "
+ + "$(@D) " + srcs_relative_dir + " $(SRCS)",
+ tools=[
+ clean_dep("//tensorflow/tools/proto_text:gen_proto_text_functions")
+ ],)
+
+ native.filegroup(
+ name=name + "_hdrs",
+ srcs=out_hdrs,
+ visibility=visibility,
+ )
+
+ native.cc_library(
+ name=name,
+ srcs=out_srcs,
+ hdrs=out_hdrs,
+ visibility=visibility,
+ deps = deps,
+ )
def tf_genrule_cmd_append_to_srcs(to_append):
- return ("cat $(SRCS) > $(@) && " + "echo >> $(@) && " + "echo " + to_append +
- " >> $(@)")
+ return ("cat $(SRCS) > $(@) && " + "echo >> $(@) && " + "echo " + to_append +
+ " >> $(@)")
def tf_version_info_genrule():
- native.genrule(
- name = "version_info_gen",
- srcs = [
- clean_dep("@local_config_git//:gen/spec.json"),
- clean_dep("@local_config_git//:gen/head"),
- clean_dep("@local_config_git//:gen/branch_ref"),
- ],
- outs = ["util/version_info.cc"],
- cmd =
- "$(location //tensorflow/tools/git:gen_git_source.py) --generate $(SRCS) \"$@\" --git_tag_override=$${GIT_TAG_OVERRIDE:-}",
- local = 1,
- tools = [clean_dep("//tensorflow/tools/git:gen_git_source.py")],
- )
+ native.genrule(
+ name="version_info_gen",
+ srcs=[
+ clean_dep("@local_config_git//:gen/spec.json"),
+ clean_dep("@local_config_git//:gen/head"),
+ clean_dep("@local_config_git//:gen/branch_ref"),
+ ],
+ outs=["util/version_info.cc"],
+ cmd=
+ "$(location //tensorflow/tools/git:gen_git_source.py) --generate $(SRCS) \"$@\" --git_tag_override=$${GIT_TAG_OVERRIDE:-}",
+ local=1,
+ tools=[clean_dep("//tensorflow/tools/git:gen_git_source.py")],)
def tf_py_build_info_genrule():
- native.genrule(
- name = "py_build_info_gen",
- outs = ["platform/build_info.py"],
- cmd =
- "$(location //tensorflow/tools/build_info:gen_build_info.py) --raw_generate \"$@\" --build_config " + if_cuda("cuda", "cpu"),
- local = 1,
- tools = [clean_dep("//tensorflow/tools/build_info:gen_build_info.py")],
- )
-
-def cc_library_with_android_deps(
- deps,
- android_deps = [],
- common_deps = [],
- copts = tf_copts(),
- **kwargs):
- deps = if_not_android(deps) + if_android(android_deps) + common_deps
- native.cc_library(deps = deps, copts = copts, **kwargs)
+ native.genrule(
+ name="py_build_info_gen",
+ outs=["platform/build_info.py"],
+ cmd=
+ "$(location //tensorflow/tools/build_info:gen_build_info.py) --raw_generate \"$@\" --build_config " + if_cuda("cuda", "cpu"),
+ local=1,
+ tools=[clean_dep("//tensorflow/tools/build_info:gen_build_info.py")],)
+
+def cc_library_with_android_deps(deps,
+ android_deps=[],
+ common_deps=[],
+ copts=tf_copts(),
+ **kwargs):
+ deps = if_not_android(deps) + if_android(android_deps) + common_deps
+ native.cc_library(deps=deps, copts=copts, **kwargs)
register_extension_info(
extension_name = "cc_library_with_android_deps",
diff --git a/tensorflow/tools/api/generator/api_gen.bzl b/tensorflow/tools/api/generator/api_gen.bzl
index e5ec86c0a0..d746b5d3e4 100644
--- a/tensorflow/tools/api/generator/api_gen.bzl
+++ b/tensorflow/tools/api/generator/api_gen.bzl
@@ -134,7 +134,7 @@ def gen_api_init_files(
package_dep = "//tensorflow/python:no_contrib"):
root_init_template_flag = ""
if root_init_template:
- root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")"
+ root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")"
api_gen_binary_target = "create_" + package + "_api"
native.py_binary(
@@ -154,9 +154,8 @@ def gen_api_init_files(
outs = output_files,
cmd = (
"$(location :" + api_gen_binary_target + ") " +
- root_init_template_flag + " --apidir=$(@D) --apiname=" + api_name + " --package=" + package + " $(OUTS)"
- ),
+ root_init_template_flag + " --apidir=$(@D) --apiname=" + api_name + " --package=" + package + " $(OUTS)"),
srcs = srcs,
- tools = [":" + api_gen_binary_target],
+ tools = [":" + api_gen_binary_target ],
visibility = ["//tensorflow:__pkg__"],
)
diff --git a/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
index d5a1cbde15..f8f63e276c 100644
--- a/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
+++ b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
@@ -24,27 +24,27 @@ load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_msvc_tool")
load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "auto_configure_fail")
def _def_file_filter_configure_impl(repository_ctx):
- if repository_ctx.os.name.lower().find("windows") == -1:
- repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
- repository_ctx.file("def_file_filter.py", "")
- return
- vc_path = find_vc_path(repository_ctx)
- if vc_path == "visual-studio-not-found":
- auto_configure_fail("Visual C++ build tools not found on your machine")
-
- undname = find_msvc_tool(repository_ctx, vc_path, "undname.exe")
- if undname == None:
- auto_configure_fail("Couldn't find undname.exe under %s, please check your VC installation and set BAZEL_VC environment variable correctly." % vc_path)
- undname_bin_path = undname.replace("\\", "\\\\")
-
- repository_ctx.template(
- "def_file_filter.py",
- Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
- {
- "%{undname_bin_path}": undname_bin_path,
- },
- )
+ if repository_ctx.os.name.lower().find("windows") == -1:
repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
+ repository_ctx.file("def_file_filter.py", "")
+ return
+ vc_path = find_vc_path(repository_ctx)
+ if vc_path == "visual-studio-not-found":
+ auto_configure_fail("Visual C++ build tools not found on your machine")
+
+ undname = find_msvc_tool(repository_ctx, vc_path, "undname.exe")
+ if undname == None:
+ auto_configure_fail("Couldn't find undname.exe under %s, please check your VC installation and set BAZEL_VC environment variable correctly." % vc_path)
+ undname_bin_path = undname.replace("\\", "\\\\")
+
+ repository_ctx.template(
+ "def_file_filter.py",
+ Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
+ {
+ "%{undname_bin_path}": undname_bin_path,
+ })
+ repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
+
def_file_filter_configure = repository_rule(
implementation = _def_file_filter_configure_impl,
@@ -55,6 +55,6 @@ def_file_filter_configure = repository_rule(
"VS100COMNTOOLS",
"VS110COMNTOOLS",
"VS120COMNTOOLS",
- "VS140COMNTOOLS",
+ "VS140COMNTOOLS"
],
)
diff --git a/tensorflow/tools/test/performance.bzl b/tensorflow/tools/test/performance.bzl
index 9786111034..3486871080 100644
--- a/tensorflow/tools/test/performance.bzl
+++ b/tensorflow/tools/test/performance.bzl
@@ -4,66 +4,60 @@ load("//tensorflow:tensorflow.bzl", "tf_py_test")
# Create a benchmark test target of a TensorFlow C++ test (tf_cc_*_test)
def tf_cc_logged_benchmark(
- name = None,
- target = None,
- benchmarks = "..",
- tags = [],
- test_log_output_prefix = "",
- benchmark_type = "cpp_microbenchmark"):
- if not name:
- fail("Must provide a name")
- if not target:
- fail("Must provide a target")
- if (not ":" in target or
- not target.startswith("//") or
- target.endswith(":all") or
- target.endswith(".")):
- fail(" ".join((
- "Target must be a single well-defined test, e.g.,",
- "//path/to:test. Received: %s" % target,
- )))
+ name=None,
+ target=None,
+ benchmarks="..",
+ tags=[],
+ test_log_output_prefix="",
+ benchmark_type="cpp_microbenchmark"):
+ if not name:
+ fail("Must provide a name")
+ if not target:
+ fail("Must provide a target")
+ if (not ":" in target
+ or not target.startswith("//")
+ or target.endswith(":all")
+ or target.endswith(".")):
+ fail(" ".join(("Target must be a single well-defined test, e.g.,",
+ "//path/to:test. Received: %s" % target)))
- all_tags = (
- depset(tags) + depset(
- ["benchmark-test", "local", "manual", "regression-test"],
- )
- ).to_list()
+ all_tags = (
+ depset(tags) + depset(
+ ["benchmark-test", "local", "manual", "regression-test"])).to_list()
- tf_py_test(
- name = name,
- tags = all_tags,
- size = "large",
- srcs = ["//tensorflow/tools/test:run_and_gather_logs"],
- args = [
- "--name=//%s:%s" % (native.package_name(), name),
- "--test_name=" + target,
- "--test_args=--benchmarks=%s" % benchmarks,
- "--benchmark_type=%s" % benchmark_type,
- ],
- data = [
- target,
- ],
- main = "run_and_gather_logs.py",
- additional_deps = [
- "//tensorflow/tools/test:run_and_gather_logs",
- ],
- )
+ tf_py_test(
+ name = name,
+ tags = all_tags,
+ size = "large",
+ srcs = ["//tensorflow/tools/test:run_and_gather_logs"],
+ args = [
+ "--name=//%s:%s" % (native.package_name(), name),
+ "--test_name=" + target,
+ "--test_args=--benchmarks=%s" % benchmarks,
+ "--benchmark_type=%s" % benchmark_type,
+ ],
+ data = [
+ target,
+ ],
+ main = "run_and_gather_logs.py",
+ additional_deps = [
+ "//tensorflow/tools/test:run_and_gather_logs"
+ ])
# Create a benchmark test target of a TensorFlow python test (*py_tests)
def tf_py_logged_benchmark(
- name = None,
- target = None,
- benchmarks = "..",
- tags = [],
- test_log_output_prefix = ""):
- # For now generating a py benchmark is the same as generating a C++
- # benchmark target. In the future this may change, so we have
- # two macros just in case
- tf_cc_logged_benchmark(
- name = name,
- target = target,
- benchmarks = benchmarks,
- tags = tags,
- test_log_output_prefix = test_log_output_prefix,
- benchmark_type = "python_benchmark",
- )
+ name=None,
+ target=None,
+ benchmarks="..",
+ tags=[],
+ test_log_output_prefix=""):
+ # For now generating a py benchmark is the same as generating a C++
+ # benchmark target. In the future this may change, so we have
+ # two macros just in case
+ tf_cc_logged_benchmark(
+ name=name,
+ target=target,
+ benchmarks=benchmarks,
+ tags=tags,
+ test_log_output_prefix=test_log_output_prefix,
+ benchmark_type="python_benchmark")
diff --git a/tensorflow/version_check.bzl b/tensorflow/version_check.bzl
index 3b61827139..79e721dab4 100644
--- a/tensorflow/version_check.bzl
+++ b/tensorflow/version_check.bzl
@@ -1,50 +1,48 @@
""" Helpers to check minimum version of bazel."""
def _extract_version_number(bazel_version):
- """Extracts the semantic version number from a version string
+ """Extracts the semantic version number from a version string
- Args:
- bazel_version: the version string that begins with the semantic version
- e.g. "1.2.3rc1 abc1234" where "abc1234" is a commit hash.
+ Args:
+ bazel_version: the version string that begins with the semantic version
+ e.g. "1.2.3rc1 abc1234" where "abc1234" is a commit hash.
- Returns:
- The semantic version string, like "1.2.3".
- """
- for i in range(len(bazel_version)):
- c = bazel_version[i]
- if not (c.isdigit() or c == "."):
- return bazel_version[:i]
- return bazel_version
+ Returns:
+ The semantic version string, like "1.2.3".
+ """
+ for i in range(len(bazel_version)):
+ c = bazel_version[i]
+ if not (c.isdigit() or c == "."):
+ return bazel_version[:i]
+ return bazel_version
# Parse the bazel version string from `native.bazel_version`.
# e.g.
# "0.10.0rc1 abc123d" => (0, 10, 0)
# "0.3.0" => (0, 3, 0)
def _parse_bazel_version(bazel_version):
- """Parses a version string into a 3-tuple of ints
+ """Parses a version string into a 3-tuple of ints
- int tuples can be compared directly using binary operators (<, >).
+ int tuples can be compared directly using binary operators (<, >).
- Args:
- bazel_version: the Bazel version string
+ Args:
+ bazel_version: the Bazel version string
- Returns:
- An int 3-tuple of a (major, minor, patch) version.
- """
+ Returns:
+ An int 3-tuple of a (major, minor, patch) version.
+ """
- version = _extract_version_number(bazel_version)
- return tuple([int(n) for n in version.split(".")])
+ version = _extract_version_number(bazel_version)
+ return tuple([int(n) for n in version.split(".")])
def check_bazel_version_at_least(minimum_bazel_version):
- if "bazel_version" not in dir(native):
- fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % minimum_bazel_version)
- elif not native.bazel_version:
- print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
- print("Make sure that you are running at least Bazel %s.\n" % minimum_bazel_version)
- return
-
- if _parse_bazel_version(native.bazel_version) < _parse_bazel_version(minimum_bazel_version):
- fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
- native.bazel_version,
- minimum_bazel_version,
- ))
+ if "bazel_version" not in dir(native):
+ fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" % minimum_bazel_version)
+ elif not native.bazel_version:
+ print("\nCurrent Bazel is not a release version, cannot check for compatibility.")
+ print("Make sure that you are running at least Bazel %s.\n" % minimum_bazel_version)
+ return
+
+ if _parse_bazel_version(native.bazel_version) < _parse_bazel_version(minimum_bazel_version):
+ fail("\nCurrent Bazel version is {}, expected at least {}\n".format(
+ native.bazel_version, minimum_bazel_version))
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index e337bc684c..7318b1e2c3 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -14,876 +14,875 @@ load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@io_bazel_rules_closure//closure/private:java_import_external.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
-load(
- "//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
- "def_file_filter_configure",
-)
+load("//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
+ "def_file_filter_configure")
+
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
- return str(Label(dep))
+ return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
-def tf_workspace(path_prefix = "", tf_repo_name = ""):
- # Note that we check the minimum bazel version in WORKSPACE.
- clang6_configure(name = "local_config_clang6")
- cc_download_clang_toolchain(name = "local_config_download_clang")
- cuda_configure(name = "local_config_cuda")
- tensorrt_configure(name = "local_config_tensorrt")
- nccl_configure(name = "local_config_nccl")
- git_configure(name = "local_config_git")
- sycl_configure(name = "local_config_sycl")
- python_configure(name = "local_config_python")
-
- # For windows bazel build
- # TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
- def_file_filter_configure(name = "local_config_def_file_filter")
-
- # Point //external/local_config_arm_compiler to //external/arm_compiler
- arm_compiler_configure(
- name = "local_config_arm_compiler",
- remote_config_repo = "../arm_compiler",
- build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
- )
-
- mkl_repository(
- name = "mkl_linux",
- urls = [
- "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz",
- "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz",
- ],
- sha256 = "d2305244fdc9b87db7426ed4496e87a4b3977ad3374d73b8000e8b7a5b7aa725",
- strip_prefix = "mklml_lnx_2018.0.3.20180406",
- build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
- )
- mkl_repository(
- name = "mkl_windows",
- urls = [
- "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_win_2018.0.3.20180406.zip",
- "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_win_2018.0.3.20180406.zip",
- ],
- sha256 = "a584a5bf1c8d2ad70b90d12b52652030e9a338217719064fdb84b7ad0d693694",
- strip_prefix = "mklml_win_2018.0.3.20180406",
- build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
- )
- mkl_repository(
- name = "mkl_darwin",
- urls = [
- "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_mac_2018.0.3.20180406.tgz",
- "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_mac_2018.0.3.20180406.tgz",
- ],
- sha256 = "094e3dfd61c816136dc8d12a45cc611ce26c5f4828176a3644cd0b0efa15a25b",
- strip_prefix = "mklml_mac_2018.0.3.20180406",
- build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
- )
-
- if path_prefix:
- print("path_prefix was specified to tf_workspace but is no longer used " +
- "and will be removed in the future.")
-
- tf_http_archive(
- name = "mkl_dnn",
- urls = [
- "https://mirror.bazel.build/github.com/intel/mkl-dnn/archive/v0.14.tar.gz",
- "https://github.com/intel/mkl-dnn/archive/v0.14.tar.gz",
- ],
- sha256 = "efebc53882856afec86457a2da644693f5d59c68772d41d640d6b60a8efc4eb0",
- strip_prefix = "mkl-dnn-0.14",
- build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
- )
-
- tf_http_archive(
- name = "com_google_absl",
- urls = [
- "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/9613678332c976568272c8f4a78631a29159271d.tar.gz",
- "https://github.com/abseil/abseil-cpp/archive/9613678332c976568272c8f4a78631a29159271d.tar.gz",
- ],
- sha256 = "1273a1434ced93bc3e703a48c5dced058c95e995c8c009e9bdcb24a69e2180e9",
- strip_prefix = "abseil-cpp-9613678332c976568272c8f4a78631a29159271d",
- build_file = clean_dep("//third_party:com_google_absl.BUILD"),
- )
-
- tf_http_archive(
- name = "eigen_archive",
- urls = [
- "https://mirror.bazel.build/bitbucket.org/eigen/eigen/get/e5e305a158a0.tar.gz",
- "https://bitbucket.org/eigen/eigen/get/e5e305a158a0.tar.gz",
- ],
- sha256 = "8bbe676d69e7f59070c83a949454b8b6344034e0ebbf686b337528e5dc04c7de",
- strip_prefix = "eigen-eigen-e5e305a158a0",
- build_file = clean_dep("//third_party:eigen.BUILD"),
- )
-
- tf_http_archive(
- name = "arm_compiler",
- sha256 = "970285762565c7890c6c087d262b0a18286e7d0384f13a37786d8521773bc969",
- strip_prefix = "tools-0e906ebc527eab1cdbf7adabff5b474da9562e9f/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf",
- urls = [
- "https://mirror.bazel.build/github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
- # Please uncomment me, when the next upgrade happens. Then
- # remove the whitelist entry in third_party/repo.bzl.
- # "https://github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
- ],
- build_file = clean_dep("//:arm_compiler.BUILD"),
- )
-
- tf_http_archive(
- name = "libxsmm_archive",
- urls = [
- "https://mirror.bazel.build/github.com/hfp/libxsmm/archive/1.9.tar.gz",
- "https://github.com/hfp/libxsmm/archive/1.9.tar.gz",
- ],
- sha256 = "cd8532021352b4a0290d209f7f9bfd7c2411e08286a893af3577a43457287bfa",
- strip_prefix = "libxsmm-1.9",
- build_file = clean_dep("//third_party:libxsmm.BUILD"),
- )
-
- tf_http_archive(
- name = "ortools_archive",
- urls = [
- "https://mirror.bazel.build/github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
- # Please uncomment me, when the next upgrade happens. Then
- # remove the whitelist entry in third_party/repo.bzl.
- # "https://github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
- ],
- sha256 = "932075525642b04ac6f1b50589f1df5cd72ec2f448b721fd32234cf183f0e755",
- strip_prefix = "or-tools-253f7955c6a1fd805408fba2e42ac6d45b312d15/src",
- build_file = clean_dep("//third_party:ortools.BUILD"),
- )
-
- tf_http_archive(
- name = "com_googlesource_code_re2",
- urls = [
- "https://mirror.bazel.build/github.com/google/re2/archive/2018-04-01.tar.gz",
- "https://github.com/google/re2/archive/2018-04-01.tar.gz",
- ],
- sha256 = "2f945446b71336e7f5a2bcace1abcf0b23fbba368266c6a1be33de3de3b3c912",
- strip_prefix = "re2-2018-04-01",
- )
-
- tf_http_archive(
- name = "com_github_googlecloudplatform_google_cloud_cpp",
- urls = [
- "https://mirror.bazel.build/github.com/GoogleCloudPlatform/google-cloud-cpp/archive/53f822805e77ea7715f5b52c592a162c515c7219.tar.gz",
- "https://github.com/GoogleCloudPlatform/google-cloud-cpp/archive/53f822805e77ea7715f5b52c592a162c515c7219.tar.gz",
- ],
- sha256 = "06853bfca77ef4aec09db5ab48c548f68ef2e18f17404cbce61f8d9b820f951b",
- strip_prefix = "google-cloud-cpp-53f822805e77ea7715f5b52c592a162c515c7219",
- )
-
- tf_http_archive(
- name = "com_github_googleapis_googleapis",
- urls = [
- "https://mirror.bazel.build/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
- "https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
- ],
- sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
- strip_prefix = "googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
- build_file = clean_dep("//third_party:googleapis.BUILD"),
- )
-
- tf_http_archive(
- name = "gemmlowp",
- urls = [
- "https://mirror.bazel.build/github.com/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.zip",
- "https://github.com/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.zip",
- ],
- sha256 = "b87faa7294dfcc5d678f22a59d2c01ca94ea1e2a3b488c38a95a67889ed0a658",
- strip_prefix = "gemmlowp-38ebac7b059e84692f53e5938f97a9943c120d98",
- )
-
- tf_http_archive(
- name = "farmhash_archive",
- urls = [
- "https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
- "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
- ],
- sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
- strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
- build_file = clean_dep("//third_party:farmhash.BUILD"),
- )
-
- tf_http_archive(
- name = "highwayhash",
- urls = [
- "http://mirror.bazel.build/github.com/google/highwayhash/archive/fd3d9af80465e4383162e4a7c5e2f406e82dd968.tar.gz",
- "https://github.com/google/highwayhash/archive/fd3d9af80465e4383162e4a7c5e2f406e82dd968.tar.gz",
- ],
- sha256 = "9c3e0e87d581feeb0c18d814d98f170ff23e62967a2bd6855847f0b2fe598a37",
- strip_prefix = "highwayhash-fd3d9af80465e4383162e4a7c5e2f406e82dd968",
- build_file = clean_dep("//third_party:highwayhash.BUILD"),
- )
-
- tf_http_archive(
- name = "nasm",
- urls = [
- "https://mirror.bazel.build/www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
- "http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.12.02.tar.bz2/d15843c3fb7db39af80571ee27ec6fad/nasm-2.12.02.tar.bz2",
- "http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
- ],
- sha256 = "00b0891c678c065446ca59bcee64719d0096d54d6886e6e472aeee2e170ae324",
- strip_prefix = "nasm-2.12.02",
- build_file = clean_dep("//third_party:nasm.BUILD"),
- )
-
- tf_http_archive(
- name = "jpeg",
- urls = [
- "https://mirror.bazel.build/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
- "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
- ],
- sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
- strip_prefix = "libjpeg-turbo-1.5.3",
- build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
- )
-
- tf_http_archive(
- name = "png_archive",
- urls = [
- "https://mirror.bazel.build/github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
- "https://github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
- ],
- sha256 = "e45ce5f68b1d80e2cb9a2b601605b374bdf51e1798ef1c2c2bd62131dfcf9eef",
- strip_prefix = "libpng-1.6.34",
- build_file = clean_dep("//third_party:png.BUILD"),
- patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
- )
-
- tf_http_archive(
- name = "org_sqlite",
- urls = [
- "https://mirror.bazel.build/www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
- "https://www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
- ],
- sha256 = "4239a1f69e5721d07d9a374eb84d594225229e54be4ee628da2995f4315d8dfc",
- strip_prefix = "sqlite-amalgamation-3230100",
- build_file = clean_dep("//third_party:sqlite.BUILD"),
- )
-
- tf_http_archive(
- name = "gif_archive",
- urls = [
- "https://mirror.bazel.build/ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
- "http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
- ],
- sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
- strip_prefix = "giflib-5.1.4",
- build_file = clean_dep("//third_party:gif.BUILD"),
- )
-
- tf_http_archive(
- name = "six_archive",
- urls = [
- "https://mirror.bazel.build/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
- "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
- ],
- sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
- strip_prefix = "six-1.10.0",
- build_file = clean_dep("//third_party:six.BUILD"),
- )
-
- tf_http_archive(
- name = "astor_archive",
- urls = [
- "https://mirror.bazel.build/pypi.python.org/packages/d8/be/c4276b3199ec3feee2a88bc64810fbea8f26d961e0a4cd9c68387a9f35de/astor-0.6.2.tar.gz",
- "https://pypi.python.org/packages/d8/be/c4276b3199ec3feee2a88bc64810fbea8f26d961e0a4cd9c68387a9f35de/astor-0.6.2.tar.gz",
- ],
- sha256 = "ff6d2e2962d834acb125cc4dcc80c54a8c17c253f4cc9d9c43b5102a560bb75d",
- strip_prefix = "astor-0.6.2",
- build_file = clean_dep("//third_party:astor.BUILD"),
- )
-
- tf_http_archive(
- name = "gast_archive",
- urls = [
- "https://mirror.bazel.build/pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
- "https://pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
- ],
- sha256 = "7068908321ecd2774f145193c4b34a11305bd104b4551b09273dfd1d6a374930",
- strip_prefix = "gast-0.2.0",
- build_file = clean_dep("//third_party:gast.BUILD"),
- )
-
- tf_http_archive(
- name = "termcolor_archive",
- urls = [
- "https://mirror.bazel.build/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
- "https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
- ],
- sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
- strip_prefix = "termcolor-1.1.0",
- build_file = clean_dep("//third_party:termcolor.BUILD"),
- )
-
- tf_http_archive(
- name = "absl_py",
- urls = [
- "https://mirror.bazel.build/github.com/abseil/abseil-py/archive/pypi-v0.2.2.tar.gz",
- "https://github.com/abseil/abseil-py/archive/pypi-v0.2.2.tar.gz",
- ],
- sha256 = "95160f778a62c7a60ddeadc7bf2d83f85a23a27359814aca12cf949e896fa82c",
- strip_prefix = "abseil-py-pypi-v0.2.2",
- )
-
- tf_http_archive(
- name = "org_python_pypi_backports_weakref",
- urls = [
- "https://mirror.bazel.build/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
- "https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
- ],
- sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
- strip_prefix = "backports.weakref-1.0rc1/src",
- build_file = clean_dep("//third_party:backports_weakref.BUILD"),
- )
-
- filegroup_external(
- name = "org_python_license",
- licenses = ["notice"], # Python 2.0
- sha256_urls = {
- "b5556e921715ddb9242c076cae3963f483aa47266c5e37ea4c187f77cc79501c": [
- "https://mirror.bazel.build/docs.python.org/2.7/_sources/license.txt",
- "https://docs.python.org/2.7/_sources/license.txt",
- ],
- },
- )
-
- tf_http_archive(
- name = "protobuf_archive",
- urls = [
- "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
- "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
- ],
- sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
- strip_prefix = "protobuf-3.6.0",
- )
-
- # We need to import the protobuf library under the names com_google_protobuf
- # and com_google_protobuf_cc to enable proto_library support in bazel.
- # Unfortunately there is no way to alias http_archives at the moment.
- tf_http_archive(
- name = "com_google_protobuf",
- urls = [
- "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
- "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
- ],
- sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
- strip_prefix = "protobuf-3.6.0",
- )
-
- tf_http_archive(
- name = "com_google_protobuf_cc",
- urls = [
- "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
- "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
- ],
- sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
- strip_prefix = "protobuf-3.6.0",
- )
-
- tf_http_archive(
- name = "nsync",
- urls = [
- "https://mirror.bazel.build/github.com/google/nsync/archive/1.20.0.tar.gz",
- "https://github.com/google/nsync/archive/1.20.0.tar.gz",
- ],
- sha256 = "0c1b03962b2f8450f21e74a5a46116bf2d6009a807c57eb4207e974a8c4bb7dd",
- strip_prefix = "nsync-1.20.0",
- )
-
- tf_http_archive(
- name = "com_google_googletest",
- urls = [
- "https://mirror.bazel.build/github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip",
- "https://github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip",
- ],
- sha256 = "9cbca84c4256bed17df2c8f4d00c912c19d247c11c9ba6647cd6dd5b5c996b8d",
- strip_prefix = "googletest-9816b96a6ddc0430671693df90192bbee57108b6",
- )
-
- tf_http_archive(
- name = "com_github_gflags_gflags",
- urls = [
- "https://mirror.bazel.build/github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
- "https://github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
- ],
- sha256 = "4d222fab8f1ede4709cdff417d15a1336f862d7334a81abf76d09c15ecf9acd1",
- strip_prefix = "gflags-f8a0efe03aa69b3336d8e228b37d4ccb17324b88",
- )
-
- tf_http_archive(
- name = "pcre",
- sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
- urls = [
- "https://mirror.bazel.build/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
- "http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
- ],
- strip_prefix = "pcre-8.42",
- build_file = clean_dep("//third_party:pcre.BUILD"),
- )
-
- tf_http_archive(
- name = "swig",
- sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
- urls = [
- "https://mirror.bazel.build/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
- "http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
- "http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
- ],
- strip_prefix = "swig-3.0.8",
- build_file = clean_dep("//third_party:swig.BUILD"),
- )
-
- tf_http_archive(
- name = "curl",
- sha256 = "e9c37986337743f37fd14fe8737f246e97aec94b39d1b71e8a5973f72a9fc4f5",
- urls = [
- "https://mirror.bazel.build/curl.haxx.se/download/curl-7.60.0.tar.gz",
- "https://curl.haxx.se/download/curl-7.60.0.tar.gz",
- ],
- strip_prefix = "curl-7.60.0",
- build_file = clean_dep("//third_party:curl.BUILD"),
- )
-
- tf_http_archive(
- name = "grpc",
- urls = [
- "https://mirror.bazel.build/github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
- "https://github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
- ],
- sha256 = "895b31310e718a61f7335759a778c068a6edde1c089883598a0830cbb7075673",
- strip_prefix = "grpc-d184fa229d75d336aedea0041bd59cb93e7e267f",
- )
-
- tf_http_archive(
- name = "linenoise",
- sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
- urls = [
- "https://mirror.bazel.build/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
- "https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
- ],
- strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
- build_file = clean_dep("//third_party:linenoise.BUILD"),
- )
-
- # TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
- # Switch to an official source of snapshots if/when possible.
- tf_http_archive(
- name = "llvm",
- urls = [
- "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
- "https://github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
- ],
- sha256 = "280fdc888e2eb88a3a8cc4e7d3034fffc87f98e3e686be31f8c719c6e5b67d2d",
- strip_prefix = "llvm-d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93",
- build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
- )
-
- tf_http_archive(
- name = "lmdb",
- urls = [
- "https://mirror.bazel.build/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
- "https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
- ],
- sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
- strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
- build_file = clean_dep("//third_party:lmdb.BUILD"),
- )
-
- tf_http_archive(
- name = "jsoncpp_git",
- urls = [
- "https://mirror.bazel.build/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
- "https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
- ],
- sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
- strip_prefix = "jsoncpp-1.8.4",
- build_file = clean_dep("//third_party:jsoncpp.BUILD"),
- )
-
- tf_http_archive(
- name = "boringssl",
- urls = [
- "https://mirror.bazel.build/github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz",
- "https://github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz",
- ],
- sha256 = "524ba98a56300149696481b4cb9ddebd0c7b7ac9b9f6edee81da2d2d7e5d2bb3",
- strip_prefix = "boringssl-a0fb951d2a26a8ee746b52f3ba81ab011a0af778",
- )
-
- tf_http_archive(
- name = "zlib_archive",
- urls = [
- "https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz",
- "https://zlib.net/zlib-1.2.11.tar.gz",
- ],
- sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
- strip_prefix = "zlib-1.2.11",
- build_file = clean_dep("//third_party:zlib.BUILD"),
- )
-
- tf_http_archive(
- name = "fft2d",
- urls = [
- "https://mirror.bazel.build/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
- "http://www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
- ],
- sha256 = "52bb637c70b971958ec79c9c8752b1df5ff0218a4db4510e60826e0cb79b5296",
- build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
- )
-
- tf_http_archive(
- name = "snappy",
- urls = [
- "https://mirror.bazel.build/github.com/google/snappy/archive/1.1.7.tar.gz",
- "https://github.com/google/snappy/archive/1.1.7.tar.gz",
- ],
- sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
- strip_prefix = "snappy-1.1.7",
- build_file = clean_dep("//third_party:snappy.BUILD"),
- )
-
- tf_http_archive(
- name = "nccl_archive",
- urls = [
- "https://mirror.bazel.build/github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz",
- "https://github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz",
- ],
- sha256 = "2ca86fb6179ecbff789cc67c836139c1bbc0324ed8c04643405a30bf26325176",
- strip_prefix = "nccl-03d856977ecbaac87e598c0c4bafca96761b9ac7",
- build_file = clean_dep("//third_party:nccl/nccl_archive.BUILD"),
- )
-
- tf_http_archive(
- name = "kafka",
- urls = [
- "https://mirror.bazel.build/github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
- "https://github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
- ],
- sha256 = "9d8f1eb7b0e29e9ab1168347c939cb7ae5dff00a39cef99e7ef033fd8f92737c",
- strip_prefix = "librdkafka-0.11.4",
- build_file = clean_dep("//third_party:kafka/BUILD"),
- patch_file = clean_dep("//third_party/kafka:config.patch"),
- )
-
- tf_http_archive(
- name = "aws",
- urls = [
- "https://mirror.bazel.build/github.com/aws/aws-sdk-cpp/archive/1.3.15.tar.gz",
- "https://github.com/aws/aws-sdk-cpp/archive/1.3.15.tar.gz",
- ],
- sha256 = "b888d8ce5fc10254c3dd6c9020c7764dd53cf39cf011249d0b4deda895de1b7c",
- strip_prefix = "aws-sdk-cpp-1.3.15",
- build_file = clean_dep("//third_party:aws.BUILD"),
- )
-
- java_import_external(
- name = "junit",
- jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
- jar_urls = [
- "https://mirror.bazel.build/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
- "http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
- "http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
- ],
- licenses = ["reciprocal"], # Common Public License Version 1.0
- testonly_ = True,
- deps = ["@org_hamcrest_core"],
- )
-
- java_import_external(
- name = "org_hamcrest_core",
- jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
- jar_urls = [
- "https://mirror.bazel.build/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
- "http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
- "http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
- ],
- licenses = ["notice"], # New BSD License
- testonly_ = True,
- )
-
- tf_http_archive(
- name = "jemalloc",
- urls = [
- "https://mirror.bazel.build/github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
- "https://github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
- ],
- sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
- strip_prefix = "jemalloc-4.4.0",
- build_file = clean_dep("//third_party:jemalloc.BUILD"),
- )
-
- java_import_external(
- name = "com_google_testing_compile",
- jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
- jar_urls = [
- "http://mirror.bazel.build/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
- "http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
- ],
- licenses = ["notice"], # New BSD License
- testonly_ = True,
- deps = ["@com_google_guava", "@com_google_truth"],
- )
-
- java_import_external(
- name = "com_google_truth",
- jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
- jar_urls = [
- "http://mirror.bazel.build/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
- "http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
- ],
- licenses = ["notice"], # Apache 2.0
- testonly_ = True,
- deps = ["@com_google_guava"],
- )
-
- java_import_external(
- name = "org_checkerframework_qual",
- jar_sha256 = "a17501717ef7c8dda4dba73ded50c0d7cde440fd721acfeacbf19786ceac1ed6",
- jar_urls = [
- "http://mirror.bazel.build/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
- "http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
- ],
- licenses = ["notice"], # Apache 2.0
- )
-
- java_import_external(
- name = "com_squareup_javapoet",
- jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
- jar_urls = [
- "http://mirror.bazel.build/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
- "http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
- ],
- licenses = ["notice"], # Apache 2.0
- )
-
- tf_http_archive(
- name = "com_google_pprof",
- urls = [
- "https://mirror.bazel.build/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
- "https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
- ],
- sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
- strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
- build_file = clean_dep("//third_party:pprof.BUILD"),
- )
-
- tf_http_archive(
- name = "cub_archive",
- urls = [
- "https://mirror.bazel.build/github.com/NVlabs/cub/archive/1.8.0.zip",
- "https://github.com/NVlabs/cub/archive/1.8.0.zip",
- ],
- sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
- strip_prefix = "cub-1.8.0",
- build_file = clean_dep("//third_party:cub.BUILD"),
- )
-
- tf_http_archive(
- name = "cython",
- sha256 = "6dcd30b5ceb887b2b965ee7ceb82ea3acb5f0642fe2206c7636b45acea4798e5",
- urls = [
- "https://mirror.bazel.build/github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
- "https://github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
- ],
- strip_prefix = "cython-3732784c45cfb040a5b0936951d196f83a12ea17",
- build_file = clean_dep("//third_party:cython.BUILD"),
- delete = ["BUILD.bazel"],
- )
-
- tf_http_archive(
- name = "bazel_toolchains",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
- "https://github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
- ],
- strip_prefix = "bazel-toolchains-44200e0c026d86c53470d107b3697a3e46469c43",
- sha256 = "699b55a6916c687f4b7dc092dbbf5f64672cde0dc965f79717735ec4e5416556",
- )
-
- tf_http_archive(
- name = "arm_neon_2_x86_sse",
- sha256 = "c8d90aa4357f8079d427e87a6f4c493da1fa4140aee926c05902d7ec1533d9a5",
- strip_prefix = "ARM_NEON_2_x86_SSE-0f77d9d182265259b135dad949230ecbf1a2633d",
- urls = [
- "https://mirror.bazel.build/github.com/intel/ARM_NEON_2_x86_SSE/archive/0f77d9d182265259b135dad949230ecbf1a2633d.tar.gz",
- "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/0f77d9d182265259b135dad949230ecbf1a2633d.tar.gz",
- ],
- build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
- )
-
- tf_http_archive(
- name = "flatbuffers",
- strip_prefix = "flatbuffers-1.9.0",
- sha256 = "5ca5491e4260cacae30f1a5786d109230db3f3a6e5a0eb45d0d0608293d247e3",
- urls = [
- "https://mirror.bazel.build/github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
- "https://github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
- ],
- build_file = clean_dep("//third_party/flatbuffers:flatbuffers.BUILD"),
- )
-
- native.new_http_archive(
- name = "double_conversion",
- urls = [
- "https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
- ],
- sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
- strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
- build_file = clean_dep("//third_party:double_conversion.BUILD"),
- )
-
- tf_http_archive(
- name = "tflite_mobilenet",
- sha256 = "23f814d1c076bdf03715dfb6cab3713aa4fbdf040fd5448c43196bd2e97a4c1b",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip",
- "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip",
- ],
- build_file = clean_dep("//third_party:tflite_mobilenet.BUILD"),
- )
-
- tf_http_archive(
- name = "tflite_mobilenet_ssd",
- sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
- "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
- ],
- build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
- )
- tf_http_archive(
- name = "tflite_mobilenet_ssd_quant",
- sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
- "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
- ],
- build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
- )
-
- tf_http_archive(
- name = "tflite_conv_actions_frozen",
- sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
- "https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
- ],
- build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
- )
-
- tf_http_archive(
- name = "tflite_smartreply",
- sha256 = "8980151b85a87a9c1a3bb1ed4748119e4a85abd3cb5744d83da4d4bd0fbeef7c",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
- "https://storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
- ],
- build_file = clean_dep("//third_party:tflite_smartreply.BUILD"),
- )
-
- tf_http_archive(
- name = "tflite_ovic_testdata",
- sha256 = "a9a705d8d519220178e2e65d383fdb21da37fdb31d1e909b0a1acdac46479e9c",
- urls = [
- "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/data/ovic.zip",
- "https://storage.googleapis.com/download.tensorflow.org/data/ovic.zip",
- ],
- build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
- strip_prefix = "ovic",
- )
-
- tf_http_archive(
- name = "build_bazel_rules_android",
- sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
- "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
- ],
- strip_prefix = "rules_android-0.1.1",
- )
-
- ##############################################################################
- # BIND DEFINITIONS
- #
- # Please do not add bind() definitions unless we have no other choice.
- # If that ends up being the case, please leave a comment explaining
- # why we can't depend on the canonical build target.
-
- # gRPC wants a cares dependency but its contents is not actually
- # important since we have set GRPC_ARES=0 in tools/bazel.rc
- native.bind(
- name = "cares",
- actual = "@grpc//third_party/nanopb:nanopb",
- )
-
- # Needed by Protobuf
- native.bind(
- name = "grpc_cpp_plugin",
- actual = "@grpc//:grpc_cpp_plugin",
- )
- native.bind(
- name = "grpc_python_plugin",
- actual = "@grpc//:grpc_python_plugin",
- )
-
- native.bind(
- name = "grpc_lib",
- actual = "@grpc//:grpc++",
- )
-
- native.bind(
- name = "grpc_lib_unsecure",
- actual = "@grpc//:grpc++_unsecure",
- )
-
- # Needed by gRPC
- native.bind(
- name = "libssl",
- actual = "@boringssl//:ssl",
- )
-
- # Needed by gRPC
- native.bind(
- name = "nanopb",
- actual = "@grpc//third_party/nanopb:nanopb",
- )
-
- # Needed by gRPC
- native.bind(
- name = "protobuf",
- actual = "@protobuf_archive//:protobuf",
- )
-
- # gRPC expects //external:protobuf_clib and //external:protobuf_compiler
- # to point to Protobuf's compiler library.
- native.bind(
- name = "protobuf_clib",
- actual = "@protobuf_archive//:protoc_lib",
- )
-
- # Needed by gRPC
- native.bind(
- name = "protobuf_headers",
- actual = "@protobuf_archive//:protobuf_headers",
- )
-
- # Needed by Protobuf
- native.bind(
- name = "python_headers",
- actual = clean_dep("//third_party/python_runtime:headers"),
- )
-
- # Needed by Protobuf
- native.bind(
- name = "six",
- actual = "@six_archive//:six",
- )
-
- # Needed by gRPC
- native.bind(
- name = "zlib",
- actual = "@zlib_archive//:zlib",
- )
+def tf_workspace(path_prefix="", tf_repo_name=""):
+ # Note that we check the minimum bazel version in WORKSPACE.
+ clang6_configure(name="local_config_clang6")
+ cc_download_clang_toolchain(name="local_config_download_clang")
+ cuda_configure(name="local_config_cuda")
+ tensorrt_configure(name="local_config_tensorrt")
+ nccl_configure(name="local_config_nccl")
+ git_configure(name="local_config_git")
+ sycl_configure(name="local_config_sycl")
+ python_configure(name="local_config_python")
+
+ # For windows bazel build
+ # TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
+ def_file_filter_configure(name = "local_config_def_file_filter")
+
+ # Point //external/local_config_arm_compiler to //external/arm_compiler
+ arm_compiler_configure(
+ name="local_config_arm_compiler",
+ remote_config_repo="../arm_compiler",
+ build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"))
+
+ mkl_repository(
+ name = "mkl_linux",
+ urls = [
+ "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz",
+ "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz"
+ ],
+ sha256 = "d2305244fdc9b87db7426ed4496e87a4b3977ad3374d73b8000e8b7a5b7aa725",
+ strip_prefix = "mklml_lnx_2018.0.3.20180406",
+ build_file = clean_dep("//third_party/mkl:mkl.BUILD")
+ )
+ mkl_repository(
+ name = "mkl_windows",
+ urls = [
+ "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_win_2018.0.3.20180406.zip",
+ "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_win_2018.0.3.20180406.zip"
+ ],
+ sha256 = "a584a5bf1c8d2ad70b90d12b52652030e9a338217719064fdb84b7ad0d693694",
+ strip_prefix = "mklml_win_2018.0.3.20180406",
+ build_file = clean_dep("//third_party/mkl:mkl.BUILD")
+ )
+ mkl_repository(
+ name = "mkl_darwin",
+ urls = [
+ "https://mirror.bazel.build/github.com/intel/mkl-dnn/releases/download/v0.14/mklml_mac_2018.0.3.20180406.tgz",
+ "https://github.com/intel/mkl-dnn/releases/download/v0.14/mklml_mac_2018.0.3.20180406.tgz"
+ ],
+ sha256 = "094e3dfd61c816136dc8d12a45cc611ce26c5f4828176a3644cd0b0efa15a25b",
+ strip_prefix = "mklml_mac_2018.0.3.20180406",
+ build_file = clean_dep("//third_party/mkl:mkl.BUILD")
+ )
+
+ if path_prefix:
+ print("path_prefix was specified to tf_workspace but is no longer used " +
+ "and will be removed in the future.")
+
+ tf_http_archive(
+ name = "mkl_dnn",
+ urls = [
+ "https://mirror.bazel.build/github.com/intel/mkl-dnn/archive/v0.14.tar.gz",
+ "https://github.com/intel/mkl-dnn/archive/v0.14.tar.gz",
+ ],
+ sha256 = "efebc53882856afec86457a2da644693f5d59c68772d41d640d6b60a8efc4eb0",
+ strip_prefix = "mkl-dnn-0.14",
+ build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "com_google_absl",
+ urls = [
+ "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/9613678332c976568272c8f4a78631a29159271d.tar.gz",
+ "https://github.com/abseil/abseil-cpp/archive/9613678332c976568272c8f4a78631a29159271d.tar.gz",
+ ],
+ sha256 = "1273a1434ced93bc3e703a48c5dced058c95e995c8c009e9bdcb24a69e2180e9",
+ strip_prefix = "abseil-cpp-9613678332c976568272c8f4a78631a29159271d",
+ build_file = clean_dep("//third_party:com_google_absl.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "eigen_archive",
+ urls = [
+ "https://mirror.bazel.build/bitbucket.org/eigen/eigen/get/e5e305a158a0.tar.gz",
+ "https://bitbucket.org/eigen/eigen/get/e5e305a158a0.tar.gz",
+ ],
+ sha256 = "8bbe676d69e7f59070c83a949454b8b6344034e0ebbf686b337528e5dc04c7de",
+ strip_prefix = "eigen-eigen-e5e305a158a0",
+ build_file = clean_dep("//third_party:eigen.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "arm_compiler",
+ sha256 = "970285762565c7890c6c087d262b0a18286e7d0384f13a37786d8521773bc969",
+ strip_prefix = "tools-0e906ebc527eab1cdbf7adabff5b474da9562e9f/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf",
+ urls = [
+ "https://mirror.bazel.build/github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
+ # Please uncomment me, when the next upgrade happens. Then
+ # remove the whitelist entry in third_party/repo.bzl.
+ # "https://github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
+ ],
+ build_file = clean_dep("//:arm_compiler.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "libxsmm_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/hfp/libxsmm/archive/1.9.tar.gz",
+ "https://github.com/hfp/libxsmm/archive/1.9.tar.gz",
+ ],
+ sha256 = "cd8532021352b4a0290d209f7f9bfd7c2411e08286a893af3577a43457287bfa",
+ strip_prefix = "libxsmm-1.9",
+ build_file = clean_dep("//third_party:libxsmm.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "ortools_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
+ # Please uncomment me, when the next upgrade happens. Then
+ # remove the whitelist entry in third_party/repo.bzl.
+ # "https://github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
+ ],
+ sha256 = "932075525642b04ac6f1b50589f1df5cd72ec2f448b721fd32234cf183f0e755",
+ strip_prefix = "or-tools-253f7955c6a1fd805408fba2e42ac6d45b312d15/src",
+ build_file = clean_dep("//third_party:ortools.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "com_googlesource_code_re2",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/re2/archive/2018-04-01.tar.gz",
+ "https://github.com/google/re2/archive/2018-04-01.tar.gz",
+
+ ],
+ sha256 = "2f945446b71336e7f5a2bcace1abcf0b23fbba368266c6a1be33de3de3b3c912",
+ strip_prefix = "re2-2018-04-01",
+ )
+
+ tf_http_archive(
+ name = "com_github_googlecloudplatform_google_cloud_cpp",
+ urls = [
+ "https://mirror.bazel.build/github.com/GoogleCloudPlatform/google-cloud-cpp/archive/53f822805e77ea7715f5b52c592a162c515c7219.tar.gz",
+ "https://github.com/GoogleCloudPlatform/google-cloud-cpp/archive/53f822805e77ea7715f5b52c592a162c515c7219.tar.gz",
+ ],
+ sha256 = "06853bfca77ef4aec09db5ab48c548f68ef2e18f17404cbce61f8d9b820f951b",
+ strip_prefix = "google-cloud-cpp-53f822805e77ea7715f5b52c592a162c515c7219",
+ )
+
+ tf_http_archive(
+ name = "com_github_googleapis_googleapis",
+ urls = [
+ "https://mirror.bazel.build/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
+ "https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
+ ],
+ sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
+ strip_prefix="googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
+ build_file = clean_dep("//third_party:googleapis.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "gemmlowp",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.zip",
+ "https://github.com/google/gemmlowp/archive/38ebac7b059e84692f53e5938f97a9943c120d98.zip",
+ ],
+ sha256 = "b87faa7294dfcc5d678f22a59d2c01ca94ea1e2a3b488c38a95a67889ed0a658",
+ strip_prefix = "gemmlowp-38ebac7b059e84692f53e5938f97a9943c120d98",
+ )
+
+ tf_http_archive(
+ name = "farmhash_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
+ "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
+ ],
+ sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
+ strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
+ build_file = clean_dep("//third_party:farmhash.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "highwayhash",
+ urls = [
+ "http://mirror.bazel.build/github.com/google/highwayhash/archive/fd3d9af80465e4383162e4a7c5e2f406e82dd968.tar.gz",
+ "https://github.com/google/highwayhash/archive/fd3d9af80465e4383162e4a7c5e2f406e82dd968.tar.gz",
+ ],
+ sha256 = "9c3e0e87d581feeb0c18d814d98f170ff23e62967a2bd6855847f0b2fe598a37",
+ strip_prefix = "highwayhash-fd3d9af80465e4383162e4a7c5e2f406e82dd968",
+ build_file = clean_dep("//third_party:highwayhash.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "nasm",
+ urls = [
+ "https://mirror.bazel.build/www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
+ "http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.12.02.tar.bz2/d15843c3fb7db39af80571ee27ec6fad/nasm-2.12.02.tar.bz2",
+ "http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
+ ],
+ sha256 = "00b0891c678c065446ca59bcee64719d0096d54d6886e6e472aeee2e170ae324",
+ strip_prefix = "nasm-2.12.02",
+ build_file = clean_dep("//third_party:nasm.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "jpeg",
+ urls = [
+ "https://mirror.bazel.build/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
+ "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
+ ],
+ sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
+ strip_prefix = "libjpeg-turbo-1.5.3",
+ build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "png_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
+ "https://github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
+ ],
+ sha256 = "e45ce5f68b1d80e2cb9a2b601605b374bdf51e1798ef1c2c2bd62131dfcf9eef",
+ strip_prefix = "libpng-1.6.34",
+ build_file = clean_dep("//third_party:png.BUILD"),
+ patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
+ )
+
+ tf_http_archive(
+ name = "org_sqlite",
+ urls = [
+ "https://mirror.bazel.build/www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
+ "https://www.sqlite.org/2018/sqlite-amalgamation-3230100.zip",
+ ],
+ sha256 = "4239a1f69e5721d07d9a374eb84d594225229e54be4ee628da2995f4315d8dfc",
+ strip_prefix = "sqlite-amalgamation-3230100",
+ build_file = clean_dep("//third_party:sqlite.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "gif_archive",
+ urls = [
+ "https://mirror.bazel.build/ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
+ "http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
+ ],
+ sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
+ strip_prefix = "giflib-5.1.4",
+ build_file = clean_dep("//third_party:gif.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "six_archive",
+ urls = [
+ "https://mirror.bazel.build/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
+ "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
+ ],
+ sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
+ strip_prefix = "six-1.10.0",
+ build_file = clean_dep("//third_party:six.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "astor_archive",
+ urls = [
+ "https://mirror.bazel.build/pypi.python.org/packages/d8/be/c4276b3199ec3feee2a88bc64810fbea8f26d961e0a4cd9c68387a9f35de/astor-0.6.2.tar.gz",
+ "https://pypi.python.org/packages/d8/be/c4276b3199ec3feee2a88bc64810fbea8f26d961e0a4cd9c68387a9f35de/astor-0.6.2.tar.gz",
+ ],
+ sha256 = "ff6d2e2962d834acb125cc4dcc80c54a8c17c253f4cc9d9c43b5102a560bb75d",
+ strip_prefix = "astor-0.6.2",
+ build_file = clean_dep("//third_party:astor.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "gast_archive",
+ urls = [
+ "https://mirror.bazel.build/pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
+ "https://pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
+ ],
+ sha256 = "7068908321ecd2774f145193c4b34a11305bd104b4551b09273dfd1d6a374930",
+ strip_prefix = "gast-0.2.0",
+ build_file = clean_dep("//third_party:gast.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "termcolor_archive",
+ urls = [
+ "https://mirror.bazel.build/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
+ "https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
+ ],
+ sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
+ strip_prefix = "termcolor-1.1.0",
+ build_file = clean_dep("//third_party:termcolor.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "absl_py",
+ urls = [
+ "https://mirror.bazel.build/github.com/abseil/abseil-py/archive/pypi-v0.2.2.tar.gz",
+ "https://github.com/abseil/abseil-py/archive/pypi-v0.2.2.tar.gz",
+ ],
+ sha256 = "95160f778a62c7a60ddeadc7bf2d83f85a23a27359814aca12cf949e896fa82c",
+ strip_prefix = "abseil-py-pypi-v0.2.2",
+ )
+
+ tf_http_archive(
+ name = "org_python_pypi_backports_weakref",
+ urls = [
+ "https://mirror.bazel.build/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
+ "https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
+ ],
+ sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
+ strip_prefix = "backports.weakref-1.0rc1/src",
+ build_file = clean_dep("//third_party:backports_weakref.BUILD"),
+ )
+
+ filegroup_external(
+ name = "org_python_license",
+ licenses = ["notice"], # Python 2.0
+ sha256_urls = {
+ "b5556e921715ddb9242c076cae3963f483aa47266c5e37ea4c187f77cc79501c": [
+ "https://mirror.bazel.build/docs.python.org/2.7/_sources/license.txt",
+ "https://docs.python.org/2.7/_sources/license.txt",
+ ],
+ },
+ )
+
+ tf_http_archive(
+ name = "protobuf_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ ],
+ sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
+ strip_prefix = "protobuf-3.6.0",
+ )
+
+ # We need to import the protobuf library under the names com_google_protobuf
+ # and com_google_protobuf_cc to enable proto_library support in bazel.
+ # Unfortunately there is no way to alias http_archives at the moment.
+ tf_http_archive(
+ name = "com_google_protobuf",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ ],
+ sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
+ strip_prefix = "protobuf-3.6.0",
+ )
+
+ tf_http_archive(
+ name = "com_google_protobuf_cc",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ "https://github.com/google/protobuf/archive/v3.6.0.tar.gz",
+ ],
+ sha256 = "50a5753995b3142627ac55cfd496cebc418a2e575ca0236e29033c67bd5665f4",
+ strip_prefix = "protobuf-3.6.0",
+ )
+
+ tf_http_archive(
+ name = "nsync",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/nsync/archive/1.20.0.tar.gz",
+ "https://github.com/google/nsync/archive/1.20.0.tar.gz",
+ ],
+ sha256 = "0c1b03962b2f8450f21e74a5a46116bf2d6009a807c57eb4207e974a8c4bb7dd",
+ strip_prefix = "nsync-1.20.0",
+ )
+
+ tf_http_archive(
+ name = "com_google_googletest",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip",
+ "https://github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip",
+ ],
+ sha256 = "9cbca84c4256bed17df2c8f4d00c912c19d247c11c9ba6647cd6dd5b5c996b8d",
+ strip_prefix = "googletest-9816b96a6ddc0430671693df90192bbee57108b6",
+ )
+
+ tf_http_archive(
+ name = "com_github_gflags_gflags",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
+ "https://github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz",
+ ],
+ sha256 = "4d222fab8f1ede4709cdff417d15a1336f862d7334a81abf76d09c15ecf9acd1",
+ strip_prefix = "gflags-f8a0efe03aa69b3336d8e228b37d4ccb17324b88",
+ )
+
+ tf_http_archive(
+ name = "pcre",
+ sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
+ urls = [
+ "https://mirror.bazel.build/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
+ "http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
+ ],
+ strip_prefix = "pcre-8.42",
+ build_file = clean_dep("//third_party:pcre.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "swig",
+ sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
+ urls = [
+ "https://mirror.bazel.build/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
+ "http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
+ "http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
+ ],
+ strip_prefix = "swig-3.0.8",
+ build_file = clean_dep("//third_party:swig.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "curl",
+ sha256 = "e9c37986337743f37fd14fe8737f246e97aec94b39d1b71e8a5973f72a9fc4f5",
+ urls = [
+ "https://mirror.bazel.build/curl.haxx.se/download/curl-7.60.0.tar.gz",
+ "https://curl.haxx.se/download/curl-7.60.0.tar.gz",
+ ],
+ strip_prefix = "curl-7.60.0",
+ build_file = clean_dep("//third_party:curl.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "grpc",
+ urls = [
+ "https://mirror.bazel.build/github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
+ "https://github.com/grpc/grpc/archive/d184fa229d75d336aedea0041bd59cb93e7e267f.tar.gz",
+ ],
+ sha256 = "895b31310e718a61f7335759a778c068a6edde1c089883598a0830cbb7075673",
+ strip_prefix = "grpc-d184fa229d75d336aedea0041bd59cb93e7e267f",
+ )
+
+
+ tf_http_archive(
+ name = "linenoise",
+ sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
+ urls = [
+ "https://mirror.bazel.build/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
+ "https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
+ ],
+ strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
+ build_file = clean_dep("//third_party:linenoise.BUILD"),
+ )
+
+ # TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
+ # Switch to an official source of snapshots if/when possible.
+ tf_http_archive(
+ name = "llvm",
+ urls = [
+ "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
+ "https://github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
+ ],
+ sha256 = "280fdc888e2eb88a3a8cc4e7d3034fffc87f98e3e686be31f8c719c6e5b67d2d",
+ strip_prefix = "llvm-d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93",
+ build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "lmdb",
+ urls = [
+ "https://mirror.bazel.build/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
+ "https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
+ ],
+ sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
+ strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
+ build_file = clean_dep("//third_party:lmdb.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "jsoncpp_git",
+ urls = [
+ "https://mirror.bazel.build/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
+ "https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
+ ],
+ sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
+ strip_prefix = "jsoncpp-1.8.4",
+ build_file = clean_dep("//third_party:jsoncpp.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "boringssl",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz",
+ "https://github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz",
+ ],
+ sha256 = "524ba98a56300149696481b4cb9ddebd0c7b7ac9b9f6edee81da2d2d7e5d2bb3",
+ strip_prefix = "boringssl-a0fb951d2a26a8ee746b52f3ba81ab011a0af778",
+ )
+
+ tf_http_archive(
+ name = "zlib_archive",
+ urls = [
+ "https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz",
+ "https://zlib.net/zlib-1.2.11.tar.gz",
+ ],
+ sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
+ strip_prefix = "zlib-1.2.11",
+ build_file = clean_dep("//third_party:zlib.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "fft2d",
+ urls = [
+ "https://mirror.bazel.build/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
+ "http://www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
+ ],
+ sha256 = "52bb637c70b971958ec79c9c8752b1df5ff0218a4db4510e60826e0cb79b5296",
+ build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "snappy",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/snappy/archive/1.1.7.tar.gz",
+ "https://github.com/google/snappy/archive/1.1.7.tar.gz",
+ ],
+ sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
+ strip_prefix = "snappy-1.1.7",
+ build_file = clean_dep("//third_party:snappy.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "nccl_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz",
+ "https://github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz",
+ ],
+ sha256 = "2ca86fb6179ecbff789cc67c836139c1bbc0324ed8c04643405a30bf26325176",
+ strip_prefix = "nccl-03d856977ecbaac87e598c0c4bafca96761b9ac7",
+ build_file = clean_dep("//third_party:nccl/nccl_archive.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "kafka",
+ urls = [
+ "https://mirror.bazel.build/github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
+ "https://github.com/edenhill/librdkafka/archive/v0.11.4.tar.gz",
+ ],
+ sha256 = "9d8f1eb7b0e29e9ab1168347c939cb7ae5dff00a39cef99e7ef033fd8f92737c",
+ strip_prefix = "librdkafka-0.11.4",
+ build_file = clean_dep("//third_party:kafka/BUILD"),
+ patch_file = clean_dep("//third_party/kafka:config.patch"),
+ )
+
+ tf_http_archive(
+ name = "aws",
+ urls = [
+ "https://mirror.bazel.build/github.com/aws/aws-sdk-cpp/archive/1.3.15.tar.gz",
+ "https://github.com/aws/aws-sdk-cpp/archive/1.3.15.tar.gz",
+ ],
+ sha256 = "b888d8ce5fc10254c3dd6c9020c7764dd53cf39cf011249d0b4deda895de1b7c",
+ strip_prefix = "aws-sdk-cpp-1.3.15",
+ build_file = clean_dep("//third_party:aws.BUILD"),
+ )
+
+ java_import_external(
+ name = "junit",
+ jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
+ jar_urls = [
+ "https://mirror.bazel.build/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
+ "http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
+ "http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
+ ],
+ licenses = ["reciprocal"], # Common Public License Version 1.0
+ testonly_ = True,
+ deps = ["@org_hamcrest_core"],
+ )
+
+ java_import_external(
+ name = "org_hamcrest_core",
+ jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
+ jar_urls = [
+ "https://mirror.bazel.build/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
+ "http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
+ "http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
+ ],
+ licenses = ["notice"], # New BSD License
+ testonly_ = True,
+ )
+
+ tf_http_archive(
+ name = "jemalloc",
+ urls = [
+ "https://mirror.bazel.build/github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
+ "https://github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz",
+ ],
+ sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
+ strip_prefix = "jemalloc-4.4.0",
+ build_file = clean_dep("//third_party:jemalloc.BUILD"),
+ )
+
+ java_import_external(
+ name = "com_google_testing_compile",
+ jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
+ jar_urls = [
+ "http://mirror.bazel.build/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
+ "http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
+ ],
+ licenses = ["notice"], # New BSD License
+ testonly_ = True,
+ deps = ["@com_google_guava", "@com_google_truth"],
+ )
+
+ java_import_external(
+ name = "com_google_truth",
+ jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
+ jar_urls = [
+ "http://mirror.bazel.build/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
+ "http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
+ ],
+ licenses = ["notice"], # Apache 2.0
+ testonly_ = True,
+ deps = ["@com_google_guava"],
+ )
+
+ java_import_external(
+ name = "org_checkerframework_qual",
+ jar_sha256 = "a17501717ef7c8dda4dba73ded50c0d7cde440fd721acfeacbf19786ceac1ed6",
+ jar_urls = [
+ "http://mirror.bazel.build/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
+ "http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
+ ],
+ licenses = ["notice"], # Apache 2.0
+ )
+
+ java_import_external(
+ name = "com_squareup_javapoet",
+ jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
+ jar_urls = [
+ "http://mirror.bazel.build/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
+ "http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
+ ],
+ licenses = ["notice"], # Apache 2.0
+ )
+
+ tf_http_archive(
+ name = "com_google_pprof",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
+ "https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
+ ],
+ sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
+ strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
+ build_file = clean_dep("//third_party:pprof.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "cub_archive",
+ urls = [
+ "https://mirror.bazel.build/github.com/NVlabs/cub/archive/1.8.0.zip",
+ "https://github.com/NVlabs/cub/archive/1.8.0.zip",
+ ],
+ sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
+ strip_prefix = "cub-1.8.0",
+ build_file = clean_dep("//third_party:cub.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "cython",
+ sha256 = "6dcd30b5ceb887b2b965ee7ceb82ea3acb5f0642fe2206c7636b45acea4798e5",
+ urls = [
+ "https://mirror.bazel.build/github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
+ "https://github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
+ ],
+ strip_prefix = "cython-3732784c45cfb040a5b0936951d196f83a12ea17",
+ build_file = clean_dep("//third_party:cython.BUILD"),
+ delete = ["BUILD.bazel"],
+ )
+
+ tf_http_archive(
+ name = "bazel_toolchains",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
+ "https://github.com/bazelbuild/bazel-toolchains/archive/44200e0c026d86c53470d107b3697a3e46469c43.tar.gz",
+ ],
+ strip_prefix = "bazel-toolchains-44200e0c026d86c53470d107b3697a3e46469c43",
+ sha256 = "699b55a6916c687f4b7dc092dbbf5f64672cde0dc965f79717735ec4e5416556",
+ )
+
+ tf_http_archive(
+ name = "arm_neon_2_x86_sse",
+ sha256 = "c8d90aa4357f8079d427e87a6f4c493da1fa4140aee926c05902d7ec1533d9a5",
+ strip_prefix = "ARM_NEON_2_x86_SSE-0f77d9d182265259b135dad949230ecbf1a2633d",
+ urls = [
+ "https://mirror.bazel.build/github.com/intel/ARM_NEON_2_x86_SSE/archive/0f77d9d182265259b135dad949230ecbf1a2633d.tar.gz",
+ "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/0f77d9d182265259b135dad949230ecbf1a2633d.tar.gz",
+ ],
+ build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "flatbuffers",
+ strip_prefix = "flatbuffers-1.9.0",
+ sha256 = "5ca5491e4260cacae30f1a5786d109230db3f3a6e5a0eb45d0d0608293d247e3",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
+ "https://github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
+ ],
+ build_file = clean_dep("//third_party/flatbuffers:flatbuffers.BUILD"),
+ )
+
+ native.new_http_archive(
+ name = "double_conversion",
+ urls = [
+ "https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
+ ],
+ sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
+ strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
+ build_file = clean_dep("//third_party:double_conversion.BUILD")
+ )
+
+ tf_http_archive(
+ name = "tflite_mobilenet",
+ sha256 = "23f814d1c076bdf03715dfb6cab3713aa4fbdf040fd5448c43196bd2e97a4c1b",
+ urls = [
+ "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip",
+ ],
+ build_file = clean_dep("//third_party:tflite_mobilenet.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "tflite_mobilenet_ssd",
+ sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
+ urls = [
+ "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
+ ],
+ build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
+ )
+ tf_http_archive(
+ name = "tflite_mobilenet_ssd_quant",
+ sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
+ urls = ["https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
+ ],
+ build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
+ )
+
+ tf_http_archive(
+ name = "tflite_conv_actions_frozen",
+ sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
+ urls = [
+ "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
+ ],
+ build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
+ )
+
+ tf_http_archive(
+ name = "tflite_smartreply",
+ sha256 = "8980151b85a87a9c1a3bb1ed4748119e4a85abd3cb5744d83da4d4bd0fbeef7c",
+ urls = [
+ "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip"
+ ],
+ build_file = clean_dep("//third_party:tflite_smartreply.BUILD"),
+ )
+
+ tf_http_archive(
+ name = "tflite_ovic_testdata",
+ sha256 = "a9a705d8d519220178e2e65d383fdb21da37fdb31d1e909b0a1acdac46479e9c",
+ urls = [
+ "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/data/ovic.zip",
+ "https://storage.googleapis.com/download.tensorflow.org/data/ovic.zip",
+ ],
+ build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
+ strip_prefix = "ovic",
+ )
+
+ tf_http_archive(
+ name = "build_bazel_rules_android",
+ sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
+ "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
+ ],
+ strip_prefix = "rules_android-0.1.1",
+ )
+
+ ##############################################################################
+ # BIND DEFINITIONS
+ #
+ # Please do not add bind() definitions unless we have no other choice.
+ # If that ends up being the case, please leave a comment explaining
+ # why we can't depend on the canonical build target.
+
+ # gRPC wants a cares dependency but its contents is not actually
+ # important since we have set GRPC_ARES=0 in tools/bazel.rc
+ native.bind(
+ name = "cares",
+ actual = "@grpc//third_party/nanopb:nanopb",
+ )
+
+ # Needed by Protobuf
+ native.bind(
+ name = "grpc_cpp_plugin",
+ actual = "@grpc//:grpc_cpp_plugin",
+ )
+ native.bind(
+ name = "grpc_python_plugin",
+ actual = "@grpc//:grpc_python_plugin",
+ )
+
+ native.bind(
+ name = "grpc_lib",
+ actual = "@grpc//:grpc++",
+ )
+
+ native.bind(
+ name = "grpc_lib_unsecure",
+ actual = "@grpc//:grpc++_unsecure",
+ )
+
+ # Needed by gRPC
+ native.bind(
+ name = "libssl",
+ actual = "@boringssl//:ssl",
+ )
+
+ # Needed by gRPC
+ native.bind(
+ name = "nanopb",
+ actual = "@grpc//third_party/nanopb:nanopb",
+ )
+
+ # Needed by gRPC
+ native.bind(
+ name = "protobuf",
+ actual = "@protobuf_archive//:protobuf",
+ )
+
+ # gRPC expects //external:protobuf_clib and //external:protobuf_compiler
+ # to point to Protobuf's compiler library.
+ native.bind(
+ name = "protobuf_clib",
+ actual = "@protobuf_archive//:protoc_lib",
+ )
+
+ # Needed by gRPC
+ native.bind(
+ name = "protobuf_headers",
+ actual = "@protobuf_archive//:protobuf_headers",
+ )
+
+ # Needed by Protobuf
+ native.bind(
+ name = "python_headers",
+ actual = clean_dep("//third_party/python_runtime:headers"),
+ )
+
+ # Needed by Protobuf
+ native.bind(
+ name = "six",
+ actual = "@six_archive//:six",
+ )
+
+ # Needed by gRPC
+ native.bind(
+ name = "zlib",
+ actual = "@zlib_archive//:zlib",
+ )
diff --git a/third_party/android/android_configure.bzl b/third_party/android/android_configure.bzl
index 646ed732a1..da09bdf39e 100644
--- a/third_party/android/android_configure.bzl
+++ b/third_party/android/android_configure.bzl
@@ -36,39 +36,33 @@ _ANDROID_NDK_REPO_TEMPLATE = """
"""
def _android_autoconf_impl(repository_ctx):
- """Implementation of the android_autoconf repository rule."""
- sdk_home = repository_ctx.os.environ.get(_ANDROID_SDK_HOME)
- sdk_api_level = repository_ctx.os.environ.get(_ANDROID_SDK_API_VERSION)
- build_tools_version = repository_ctx.os.environ.get(
- _ANDROID_BUILD_TOOLS_VERSION,
- )
- ndk_home = repository_ctx.os.environ.get(_ANDROID_NDK_HOME)
- ndk_api_level = repository_ctx.os.environ.get(_ANDROID_NDK_API_VERSION)
+ """Implementation of the android_autoconf repository rule."""
+ sdk_home = repository_ctx.os.environ.get(_ANDROID_SDK_HOME)
+ sdk_api_level = repository_ctx.os.environ.get(_ANDROID_SDK_API_VERSION)
+ build_tools_version = repository_ctx.os.environ.get(
+ _ANDROID_BUILD_TOOLS_VERSION)
+ ndk_home = repository_ctx.os.environ.get(_ANDROID_NDK_HOME)
+ ndk_api_level = repository_ctx.os.environ.get(_ANDROID_NDK_API_VERSION)
- sdk_rule = "pass"
- if all([sdk_home, sdk_api_level, build_tools_version]):
- sdk_rule = _ANDROID_SDK_REPO_TEMPLATE % (
- sdk_home,
- sdk_api_level,
- build_tools_version,
- )
+ sdk_rule = "pass"
+ if all([sdk_home, sdk_api_level, build_tools_version]):
+ sdk_rule = _ANDROID_SDK_REPO_TEMPLATE % (
+ sdk_home, sdk_api_level, build_tools_version)
- ndk_rule = "pass"
- if all([ndk_home, ndk_api_level]):
- ndk_rule = _ANDROID_NDK_REPO_TEMPLATE % (ndk_home, ndk_api_level)
+ ndk_rule = "pass"
+ if all([ndk_home, ndk_api_level]):
+ ndk_rule = _ANDROID_NDK_REPO_TEMPLATE % (ndk_home, ndk_api_level)
- repository_ctx.template(
- "BUILD",
- Label("//third_party/android:android_configure.BUILD.tpl"),
- )
- repository_ctx.template(
- "android.bzl",
- Label("//third_party/android:android.bzl.tpl"),
- substitutions = {
- "MAYBE_ANDROID_SDK_REPOSITORY": sdk_rule,
- "MAYBE_ANDROID_NDK_REPOSITORY": ndk_rule,
- },
- )
+ repository_ctx.template(
+ "BUILD",
+ Label("//third_party/android:android_configure.BUILD.tpl"))
+ repository_ctx.template(
+ "android.bzl",
+ Label("//third_party/android:android.bzl.tpl"),
+ substitutions={
+ "MAYBE_ANDROID_SDK_REPOSITORY": sdk_rule,
+ "MAYBE_ANDROID_NDK_REPOSITORY": ndk_rule,
+ })
android_configure = repository_rule(
implementation = _android_autoconf_impl,
diff --git a/third_party/clang_toolchain/cc_configure_clang.bzl b/third_party/clang_toolchain/cc_configure_clang.bzl
index 0778c43c53..1181110ea9 100644
--- a/third_party/clang_toolchain/cc_configure_clang.bzl
+++ b/third_party/clang_toolchain/cc_configure_clang.bzl
@@ -7,16 +7,16 @@ _TF_DOWNLOAD_CLANG = "TF_DOWNLOAD_CLANG"
_TF_NEED_CUDA = "TF_NEED_CUDA"
def _cc_clang_autoconf(repo_ctx):
- if repo_ctx.os.environ.get(_TF_DOWNLOAD_CLANG) != "1":
- return
- if repo_ctx.os.environ.get(_TF_NEED_CUDA) == "1":
- # Clang is handled separately for CUDA configs.
- # See cuda_configure.bzl for more details.
- return
+ if repo_ctx.os.environ.get(_TF_DOWNLOAD_CLANG) != "1":
+ return
+ if repo_ctx.os.environ.get(_TF_NEED_CUDA) == "1":
+ # Clang is handled separately for CUDA configs.
+ # See cuda_configure.bzl for more details.
+ return
- download_clang(repo_ctx, out_folder = "extra_tools")
- overriden_tools = {"gcc": "extra_tools/bin/clang"}
- cc_autoconf_impl(repo_ctx, overriden_tools)
+ download_clang(repo_ctx, out_folder='extra_tools')
+ overriden_tools = {'gcc': 'extra_tools/bin/clang'}
+ cc_autoconf_impl(repo_ctx, overriden_tools)
cc_download_clang_toolchain = repository_rule(
environ = [
diff --git a/third_party/clang_toolchain/download_clang.bzl b/third_party/clang_toolchain/download_clang.bzl
index 6a4c029243..a014a806a6 100644
--- a/third_party/clang_toolchain/download_clang.bzl
+++ b/third_party/clang_toolchain/download_clang.bzl
@@ -1,60 +1,54 @@
""" Helpers to download a recent clang release."""
def _get_platform_folder(os_name):
- os_name = os_name.lower()
- if os_name.startswith("windows"):
- return "Win"
- if os_name.startswith("mac os"):
- return "Mac"
- if not os_name.startswith("linux"):
- fail("Unknown platform")
- return "Linux_x64"
-
-def _download_chromium_clang(
- repo_ctx,
- platform_folder,
- package_version,
- sha256,
- out_folder):
- cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang"
- cds_file = "clang-%s.tgz" % package_version
- cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file)
- repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256)
+ os_name = os_name.lower()
+ if os_name.startswith('windows'):
+ return 'Win'
+ if os_name.startswith('mac os'):
+ return 'Mac'
+ if not os_name.startswith('linux'):
+ fail('Unknown platform')
+ return 'Linux_x64'
+
+def _download_chromium_clang(repo_ctx, platform_folder, package_version, sha256,
+ out_folder):
+ cds_url = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
+ cds_file = 'clang-%s.tgz' % package_version
+ cds_full_url = '{0}/{1}/{2}'.format(cds_url, platform_folder, cds_file)
+ repo_ctx.download_and_extract(cds_full_url, output=out_folder, sha256=sha256)
def download_clang(repo_ctx, out_folder):
- """ Download a fresh clang release and put it into out_folder.
-
- Clang itself will be located in 'out_folder/bin/clang'.
- We currently download one of the latest releases of clang by the
- Chromium project (see
- https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md).
-
- Args:
- repo_ctx: An instance of repository_context object.
- out_folder: A folder to extract the compiler into.
- """
- # TODO(ibiryukov): we currently download and extract some extra tools in the
- # clang release (e.g., sanitizers). We should probably remove the ones
- # we don't need and document the ones we want provide in addition to clang.
-
- # Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release
- # can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py
- CLANG_REVISION = "335091"
- CLANG_SUB_REVISION = 1
-
- package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
-
- checksums = {
- "Linux_x64": "17002b75293fccfdd175eacdc9ee47d97b58d7e98fef343384fbbef1b68ce99f",
- "Mac": "9351e46d28315daaa06a1eb55bd0370ed4aaeb693a2a3e82e48d2737d7723468",
- "Win": "e78a1e469224d6f6751b4df4374bf58893ac03900ec924e4c8264888ba4aeb1e",
- }
-
- platform_folder = _get_platform_folder(repo_ctx.os.name)
- _download_chromium_clang(
- repo_ctx,
- platform_folder,
- package_version,
- checksums[platform_folder],
- out_folder,
- )
+ """ Download a fresh clang release and put it into out_folder.
+
+ Clang itself will be located in 'out_folder/bin/clang'.
+ We currently download one of the latest releases of clang by the
+ Chromium project (see
+ https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md).
+
+ Args:
+ repo_ctx: An instance of repository_context object.
+ out_folder: A folder to extract the compiler into.
+ """
+ # TODO(ibiryukov): we currently download and extract some extra tools in the
+ # clang release (e.g., sanitizers). We should probably remove the ones
+ # we don't need and document the ones we want provide in addition to clang.
+
+ # Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release
+ # can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py
+ CLANG_REVISION = '335091'
+ CLANG_SUB_REVISION = 1
+
+ package_version = '%s-%s' % (CLANG_REVISION, CLANG_SUB_REVISION)
+
+ checksums = {
+ 'Linux_x64':
+ '17002b75293fccfdd175eacdc9ee47d97b58d7e98fef343384fbbef1b68ce99f',
+ 'Mac':
+ '9351e46d28315daaa06a1eb55bd0370ed4aaeb693a2a3e82e48d2737d7723468',
+ 'Win':
+ 'e78a1e469224d6f6751b4df4374bf58893ac03900ec924e4c8264888ba4aeb1e',
+ }
+
+ platform_folder = _get_platform_folder(repo_ctx.os.name)
+ _download_chromium_clang(repo_ctx, platform_folder, package_version,
+ checksums[platform_folder], out_folder)
diff --git a/third_party/common.bzl b/third_party/common.bzl
index 8134bf3d25..db981a5e31 100644
--- a/third_party/common.bzl
+++ b/third_party/common.bzl
@@ -21,11 +21,11 @@
# substitutions: A dictionary mapping strings to their substitutions
def template_rule_impl(ctx):
- ctx.template_action(
- template = ctx.file.src,
- output = ctx.outputs.out,
- substitutions = ctx.attr.substitutions,
- )
+ ctx.template_action(
+ template = ctx.file.src,
+ output = ctx.outputs.out,
+ substitutions = ctx.attr.substitutions,
+ )
template_rule = rule(
attrs = {
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index fb24cc73df..ae8d7feebe 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -8,49 +8,66 @@ DEFAULT_FLATC_ARGS = [
"--gen-object-api",
]
-def flatbuffer_library_public(
- name,
- srcs,
- outs,
- language_flag,
- out_prefix = "",
- includes = [],
- include_paths = [],
- flatc_args = DEFAULT_FLATC_ARGS,
- reflection_name = "",
- reflection_visiblity = None,
- output_to_bindir = False):
- """Generates code files for reading/writing the given flatbuffers in the requested language using the public compiler.
-
- Args:
- name: Rule name.
- srcs: Source .fbs files. Sent in order to the compiler.
- outs: Output files from flatc.
- language_flag: Target language flag. One of [-c, -j, -js].
- out_prefix: Prepend this path to the front of all generated files except on
- single source targets. Usually is a directory name.
- includes: Optional, list of filegroups of schemas that the srcs depend on.
- include_paths: Optional, list of paths the includes files can be found in.
- flatc_args: Optional, list of additional arguments to pass to flatc.
- reflection_name: Optional, if set this will generate the flatbuffer
- reflection binaries for the schemas.
- reflection_visiblity: The visibility of the generated reflection Fileset.
- output_to_bindir: Passed to genrule for output to bin directory.
- Outs:
- filegroup(name): all generated source files.
- Fileset([reflection_name]): (Optional) all generated reflection binaries.
- """
- include_paths_cmd = ["-I %s" % (s) for s in include_paths]
+def flatbuffer_library_public(name,
+ srcs,
+ outs,
+ language_flag,
+ out_prefix="",
+ includes=[],
+ include_paths=[],
+ flatc_args=DEFAULT_FLATC_ARGS,
+ reflection_name="",
+ reflection_visiblity=None,
+ output_to_bindir=False):
+ '''Generates code files for reading/writing the given flatbuffers in the requested language using the public compiler.
- # '$(@D)' when given a single source target will give the appropriate
- # directory. Appending 'out_prefix' is only necessary when given a build
- # target with multiple sources.
- output_directory = (
- ("-o $(@D)/%s" % (out_prefix)) if len(srcs) > 1 else ("-o $(@D)")
- )
- genrule_cmd = " ".join([
+ Args:
+ name: Rule name.
+ srcs: Source .fbs files. Sent in order to the compiler.
+ outs: Output files from flatc.
+ language_flag: Target language flag. One of [-c, -j, -js].
+ out_prefix: Prepend this path to the front of all generated files except on
+ single source targets. Usually is a directory name.
+ includes: Optional, list of filegroups of schemas that the srcs depend on.
+ include_paths: Optional, list of paths the includes files can be found in.
+ flatc_args: Optional, list of additional arguments to pass to flatc.
+ reflection_name: Optional, if set this will generate the flatbuffer
+ reflection binaries for the schemas.
+ reflection_visiblity: The visibility of the generated reflection Fileset.
+ output_to_bindir: Passed to genrule for output to bin directory.
+ Outs:
+ filegroup(name): all generated source files.
+ Fileset([reflection_name]): (Optional) all generated reflection binaries.
+ '''
+ include_paths_cmd = ["-I %s" % (s) for s in include_paths]
+ # '$(@D)' when given a single source target will give the appropriate
+ # directory. Appending 'out_prefix' is only necessary when given a build
+ # target with multiple sources.
+ output_directory = (
+ ("-o $(@D)/%s" % (out_prefix)) if len(srcs) > 1 else ("-o $(@D)"))
+ genrule_cmd = " ".join([
+ "for f in $(SRCS); do",
+ "$(location %s)" % (flatc_path),
+ " ".join(flatc_args),
+ " ".join(include_paths_cmd),
+ language_flag,
+ output_directory,
+ "$$f;",
+ "done",
+ ])
+ native.genrule(
+ name=name,
+ srcs=srcs,
+ outs=outs,
+ output_to_bindir=output_to_bindir,
+ tools=includes + [flatc_path,],
+ cmd=genrule_cmd,
+ message="Generating flatbuffer files for %s:" % (name),)
+ if reflection_name:
+ reflection_genrule_cmd = " ".join([
"for f in $(SRCS); do",
"$(location %s)" % (flatc_path),
+ "-b --schema",
" ".join(flatc_args),
" ".join(include_paths_cmd),
language_flag,
@@ -58,156 +75,122 @@ def flatbuffer_library_public(
"$$f;",
"done",
])
+ reflection_outs = [
+ (out_prefix + "%s.bfbs") % (s.replace(".fbs", "").split("/")[-1]) for s in srcs
+ ]
native.genrule(
- name = name,
- srcs = srcs,
- outs = outs,
- output_to_bindir = output_to_bindir,
- tools = includes + [flatc_path],
- cmd = genrule_cmd,
- message = "Generating flatbuffer files for %s:" % (name),
+ name= "%s_srcs" % reflection_name,
+ srcs=srcs,
+ outs=reflection_outs,
+ output_to_bindir=output_to_bindir,
+ tools=includes + [flatc_path,],
+ cmd=reflection_genrule_cmd,
+ message="Generating flatbuffer reflection binary for %s:" % (name),)
+ native.Fileset(
+ name=reflection_name,
+ out="%s_out" % reflection_name,
+ entries=[
+ native.FilesetEntry(files=reflection_outs),
+ ],
+ visibility=reflection_visiblity
)
- if reflection_name:
- reflection_genrule_cmd = " ".join([
- "for f in $(SRCS); do",
- "$(location %s)" % (flatc_path),
- "-b --schema",
- " ".join(flatc_args),
- " ".join(include_paths_cmd),
- language_flag,
- output_directory,
- "$$f;",
- "done",
- ])
- reflection_outs = [
- (out_prefix + "%s.bfbs") % (s.replace(".fbs", "").split("/")[-1])
- for s in srcs
- ]
- native.genrule(
- name = "%s_srcs" % reflection_name,
- srcs = srcs,
- outs = reflection_outs,
- output_to_bindir = output_to_bindir,
- tools = includes + [flatc_path],
- cmd = reflection_genrule_cmd,
- message = "Generating flatbuffer reflection binary for %s:" % (name),
- )
- native.Fileset(
- name = reflection_name,
- out = "%s_out" % reflection_name,
- entries = [
- native.FilesetEntry(files = reflection_outs),
- ],
- visibility = reflection_visiblity,
- )
-def flatbuffer_cc_library(
- name,
- srcs,
- srcs_filegroup_name = "",
- out_prefix = "",
- includes = [],
- include_paths = [],
- flatc_args = DEFAULT_FLATC_ARGS,
- visibility = None,
- srcs_filegroup_visibility = None,
- gen_reflections = False):
- '''A cc_library with the generated reader/writers for the given flatbuffer definitions.
- Args:
- name: Rule name.
- srcs: Source .fbs files. Sent in order to the compiler.
- srcs_filegroup_name: Name of the output filegroup that holds srcs. Pass this
- filegroup into the `includes` parameter of any other
- flatbuffer_cc_library that depends on this one's schemas.
- out_prefix: Prepend this path to the front of all generated files. Usually
- is a directory name.
- includes: Optional, list of filegroups of schemas that the srcs depend on.
- ** SEE REMARKS BELOW **
- include_paths: Optional, list of paths the includes files can be found in.
- flatc_args: Optional list of additional arguments to pass to flatc
- (e.g. --gen-mutable).
- visibility: The visibility of the generated cc_library. By default, use the
- default visibility of the project.
- srcs_filegroup_visibility: The visibility of the generated srcs filegroup.
- By default, use the value of the visibility parameter above.
- gen_reflections: Optional, if true this will generate the flatbuffer
- reflection binaries for the schemas.
- Outs:
- filegroup([name]_srcs): all generated .h files.
- filegroup(srcs_filegroup_name if specified, or [name]_includes if not):
- Other flatbuffer_cc_library's can pass this in for their `includes`
- parameter, if they depend on the schemas in this library.
- Fileset([name]_reflection): (Optional) all generated reflection binaries.
- cc_library([name]): library with sources and flatbuffers deps.
+def flatbuffer_cc_library(name, srcs, srcs_filegroup_name="",
+ out_prefix="", includes=[], include_paths=[],
+ flatc_args=DEFAULT_FLATC_ARGS,
+ visibility=None, srcs_filegroup_visibility=None,
+ gen_reflections=False):
+ '''A cc_library with the generated reader/writers for the given flatbuffer definitions.
- Remarks:
- ** Because the genrule used to call flatc does not have any trivial way of
- computing the output list of files transitively generated by includes and
- --gen-includes (the default) being defined for flatc, the --gen-includes
- flag will not work as expected. The way around this is to add a dependency
- to the flatbuffer_cc_library defined alongside the flatc included Fileset.
- For example you might define:
+ Args:
+ name: Rule name.
+ srcs: Source .fbs files. Sent in order to the compiler.
+ srcs_filegroup_name: Name of the output filegroup that holds srcs. Pass this
+ filegroup into the `includes` parameter of any other
+ flatbuffer_cc_library that depends on this one's schemas.
+ out_prefix: Prepend this path to the front of all generated files. Usually
+ is a directory name.
+ includes: Optional, list of filegroups of schemas that the srcs depend on.
+ ** SEE REMARKS BELOW **
+ include_paths: Optional, list of paths the includes files can be found in.
+ flatc_args: Optional list of additional arguments to pass to flatc
+ (e.g. --gen-mutable).
+ visibility: The visibility of the generated cc_library. By default, use the
+ default visibility of the project.
+ srcs_filegroup_visibility: The visibility of the generated srcs filegroup.
+ By default, use the value of the visibility parameter above.
+ gen_reflections: Optional, if true this will generate the flatbuffer
+ reflection binaries for the schemas.
+ Outs:
+ filegroup([name]_srcs): all generated .h files.
+ filegroup(srcs_filegroup_name if specified, or [name]_includes if not):
+ Other flatbuffer_cc_library's can pass this in for their `includes`
+ parameter, if they depend on the schemas in this library.
+ Fileset([name]_reflection): (Optional) all generated reflection binaries.
+ cc_library([name]): library with sources and flatbuffers deps.
- flatbuffer_cc_library(
- name = "my_fbs",
- srcs = [ "schemas/foo.fbs" ],
- includes = [ "//third_party/bazz:bazz_fbs_includes" ],
- )
+ Remarks:
+ ** Because the genrule used to call flatc does not have any trivial way of
+ computing the output list of files transitively generated by includes and
+ --gen-includes (the default) being defined for flatc, the --gen-includes
+ flag will not work as expected. The way around this is to add a dependency
+ to the flatbuffer_cc_library defined alongside the flatc included Fileset.
+ For example you might define:
- In which foo.fbs includes a few files from the Fileset defined at
- //third_party/bazz:bazz_fbs_includes. When compiling the library that
- includes foo_generated.h, and therefore has my_fbs as a dependency, it
- will fail to find any of the bazz *_generated.h files unless you also
- add bazz's flatbuffer_cc_library to your own dependency list, e.g.:
+ flatbuffer_cc_library(
+ name = "my_fbs",
+ srcs = [ "schemas/foo.fbs" ],
+ includes = [ "//third_party/bazz:bazz_fbs_includes" ],
+ )
- cc_library(
- name = "my_lib",
- deps = [
- ":my_fbs",
- "//third_party/bazz:bazz_fbs"
- ],
- )
+ In which foo.fbs includes a few files from the Fileset defined at
+ //third_party/bazz:bazz_fbs_includes. When compiling the library that
+ includes foo_generated.h, and therefore has my_fbs as a dependency, it
+ will fail to find any of the bazz *_generated.h files unless you also
+ add bazz's flatbuffer_cc_library to your own dependency list, e.g.:
- Happy dependent Flatbuffering!
- '''
- output_headers = [
- (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1])
- for s in srcs
- ]
- reflection_name = "%s_reflection" % name if gen_reflections else ""
+ cc_library(
+ name = "my_lib",
+ deps = [
+ ":my_fbs",
+ "//third_party/bazz:bazz_fbs"
+ ],
+ )
- flatbuffer_library_public(
- name = "%s_srcs" % (name),
- srcs = srcs,
- outs = output_headers,
- language_flag = "-c",
- out_prefix = out_prefix,
- includes = includes,
- include_paths = include_paths,
- flatc_args = flatc_args,
- reflection_name = reflection_name,
- reflection_visiblity = visibility,
- )
- native.cc_library(
- name = name,
- hdrs = output_headers,
- srcs = output_headers,
- features = [
- "-parse_headers",
- ],
- deps = [
- "@flatbuffers//:runtime_cc",
- ],
- includes = ["."],
- linkstatic = 1,
- visibility = visibility,
- )
+ Happy dependent Flatbuffering!
+ '''
+ output_headers = [
+ (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1]) for s in srcs
+ ]
+ reflection_name = "%s_reflection" % name if gen_reflections else ""
- # A filegroup for the `srcs`. That is, all the schema files for this
- # Flatbuffer set.
- native.filegroup(
- name = srcs_filegroup_name if srcs_filegroup_name else "%s_includes" % (name),
- srcs = srcs,
- visibility = srcs_filegroup_visibility if srcs_filegroup_visibility != None else visibility,
- )
+ flatbuffer_library_public(name="%s_srcs" % (name),
+ srcs=srcs,
+ outs=output_headers,
+ language_flag="-c",
+ out_prefix=out_prefix,
+ includes=includes,
+ include_paths=include_paths,
+ flatc_args=flatc_args,
+ reflection_name=reflection_name,
+ reflection_visiblity=visibility,)
+ native.cc_library(name=name,
+ hdrs=output_headers,
+ srcs=output_headers,
+ features=[
+ "-parse_headers",
+ ],
+ deps=[
+ "@flatbuffers//:runtime_cc",
+ ],
+ includes=["."],
+ linkstatic=1,
+ visibility=visibility)
+
+ # A filegroup for the `srcs`. That is, all the schema files for this
+ # Flatbuffer set.
+ native.filegroup(
+ name = srcs_filegroup_name if srcs_filegroup_name else "%s_includes" % (name),
+ srcs = srcs,
+ visibility=srcs_filegroup_visibility if srcs_filegroup_visibility != None else visibility)
diff --git a/third_party/llvm/llvm.bzl b/third_party/llvm/llvm.bzl
index b65bfe3544..2e809e5f14 100644
--- a/third_party/llvm/llvm.bzl
+++ b/third_party/llvm/llvm.bzl
@@ -8,114 +8,102 @@ correctly understood by the build system.
"""
def gentbl(name, tblgen, td_file, td_srcs, tbl_outs, library = True, **kwargs):
- """gentbl() generates tabular code from a table definition file.
-
- Args:
- name: The name of the build rule for use in dependencies.
- tblgen: The binary used to produce the output.
- td_file: The primary table definitions file.
- td_srcs: A list of table definition files included transitively.
- tbl_outs: A list of tuples (opts, out), where each opts is a string of
- options passed to tblgen, and the out is the corresponding output file
- produced.
- library: Whether to bundle the generated files into a library.
- **kwargs: Keyword arguments to pass to subsidiary cc_library() rule.
- """
- if td_file not in td_srcs:
- td_srcs += [td_file]
- includes = []
- for (opts, out) in tbl_outs:
- outdir = out[:out.rindex("/")]
- if outdir not in includes:
- includes.append(outdir)
- rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
- native.genrule(
- name = "%s_%s_genrule" % (name, rule_suffix),
- srcs = td_srcs,
- outs = [out],
- tools = [tblgen],
- message = "Generating code from table: %s" % td_file,
- cmd = (("$(location %s) " + "-I external/llvm/include " +
- "-I external/llvm/tools/clang/include " +
- "-I $$(dirname $(location %s)) " + "%s $(location %s) -o $@") % (
- tblgen,
- td_file,
- opts,
- td_file,
- )),
- )
-
- # For now, all generated files can be assumed to comprise public interfaces.
- # If this is not true, you should specify library = False
- # and list the generated '.inc' files in "srcs".
- if library:
- native.cc_library(
- name = name,
- textual_hdrs = [f for (_, f) in tbl_outs],
- includes = includes,
- **kwargs
- )
+ """gentbl() generates tabular code from a table definition file.
+
+ Args:
+ name: The name of the build rule for use in dependencies.
+ tblgen: The binary used to produce the output.
+ td_file: The primary table definitions file.
+ td_srcs: A list of table definition files included transitively.
+ tbl_outs: A list of tuples (opts, out), where each opts is a string of
+ options passed to tblgen, and the out is the corresponding output file
+ produced.
+ library: Whether to bundle the generated files into a library.
+ **kwargs: Keyword arguments to pass to subsidiary cc_library() rule.
+ """
+ if td_file not in td_srcs:
+ td_srcs += [td_file]
+ includes = []
+ for (opts, out) in tbl_outs:
+ outdir = out[:out.rindex("/")]
+ if outdir not in includes:
+ includes.append(outdir)
+ rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
+ native.genrule(
+ name="%s_%s_genrule" % (name, rule_suffix),
+ srcs=td_srcs,
+ outs=[out],
+ tools=[tblgen],
+ message="Generating code from table: %s" % td_file,
+ cmd=(("$(location %s) " + "-I external/llvm/include " +
+ "-I external/llvm/tools/clang/include " +
+ "-I $$(dirname $(location %s)) " + "%s $(location %s) -o $@") % (
+ tblgen, td_file, opts, td_file)))
+ # For now, all generated files can be assumed to comprise public interfaces.
+ # If this is not true, you should specify library = False
+ # and list the generated '.inc' files in "srcs".
+ if library:
+ native.cc_library(name=name, textual_hdrs=[f for (_, f) in tbl_outs],
+ includes=includes, **kwargs)
def llvm_target_cmake_vars(native_arch, target_triple):
- return {
- "LLVM_HOST_TRIPLE": target_triple,
- "LLVM_DEFAULT_TARGET_TRIPLE": target_triple,
- "LLVM_NATIVE_ARCH": native_arch,
- }
+ return {
+ "LLVM_HOST_TRIPLE": target_triple,
+ "LLVM_DEFAULT_TARGET_TRIPLE": target_triple,
+ "LLVM_NATIVE_ARCH": native_arch,
+ }
def _quote(s):
- """Quotes the given string for use in a shell command.
-
- This function double-quotes the given string (in case it contains spaces or
- other special characters) and escapes any special characters (dollar signs,
- double-quotes, and backslashes) that may be present.
-
- Args:
- s: The string to quote.
- Returns:
- An escaped and quoted version of the string that can be passed to a shell
- command.
- """
- return ('"' +
- s.replace("\\", "\\\\").replace("$", "\\$").replace('"', '\\"') +
- '"')
+ """Quotes the given string for use in a shell command.
+
+ This function double-quotes the given string (in case it contains spaces or
+ other special characters) and escapes any special characters (dollar signs,
+ double-quotes, and backslashes) that may be present.
+
+ Args:
+ s: The string to quote.
+ Returns:
+ An escaped and quoted version of the string that can be passed to a shell
+ command.
+ """
+ return ('"' +
+ s.replace("\\", "\\\\").replace("$", "\\$").replace('"', '\\"') +
+ '"')
def cmake_var_string(cmake_vars):
- """Converts a dictionary to an input suitable for expand_cmake_vars.
+ """Converts a dictionary to an input suitable for expand_cmake_vars.
- Ideally we would jist stringify in the expand_cmake_vars() rule, but select()
- interacts badly with genrules.
+ Ideally we would jist stringify in the expand_cmake_vars() rule, but select()
+ interacts badly with genrules.
- TODO(phawkins): replace the genrule() with native rule and delete this rule.
+ TODO(phawkins): replace the genrule() with native rule and delete this rule.
- Args:
- cmake_vars: a dictionary with string keys and values that are convertable to
- strings.
- """
- return " ".join([
- _quote("{}={}".format(k, str(v)))
- for (k, v) in cmake_vars.items()
- ])
+ Args:
+ cmake_vars: a dictionary with string keys and values that are convertable to
+ strings.
+ """
+ return " ".join([_quote("{}={}".format(k, str(v)))
+ for (k, v) in cmake_vars.items()])
def expand_cmake_vars(name, src, dst, cmake_vars):
- """Expands #cmakedefine, #cmakedefine01, and CMake variables in a text file.
-
- Args:
- name: the name of the rule
- src: the input of the rule
- dst: the output of the rule
- cmake_vars: a string containing the CMake variables, as generated by
- cmake_var_string.
- """
- expand_cmake_vars_tool = Label("@org_tensorflow//third_party/llvm:expand_cmake_vars")
- native.genrule(
- name = name,
- srcs = [src],
- tools = [expand_cmake_vars_tool],
- outs = [dst],
- cmd = ("$(location {}) ".format(expand_cmake_vars_tool) + cmake_vars +
- "< $< > $@"),
- )
+ """Expands #cmakedefine, #cmakedefine01, and CMake variables in a text file.
+
+ Args:
+ name: the name of the rule
+ src: the input of the rule
+ dst: the output of the rule
+ cmake_vars: a string containing the CMake variables, as generated by
+ cmake_var_string.
+ """
+ expand_cmake_vars_tool = Label("@org_tensorflow//third_party/llvm:expand_cmake_vars")
+ native.genrule(
+ name = name,
+ srcs = [src],
+ tools = [expand_cmake_vars_tool],
+ outs = [dst],
+ cmd = ("$(location {}) ".format(expand_cmake_vars_tool) + cmake_vars +
+ "< $< > $@")
+ )
# TODO(phawkins): the set of CMake variables was hardcoded for expediency.
# However, we should really detect many of these via configure-time tests.
@@ -225,18 +213,17 @@ darwin_cmake_vars = {
llvm_all_cmake_vars = select({
"@org_tensorflow//tensorflow:darwin": cmake_var_string(
cmake_vars + llvm_target_cmake_vars("X86", "x86_64-apple-darwin") +
- darwin_cmake_vars,
- ),
+ darwin_cmake_vars),
"@org_tensorflow//tensorflow:linux_ppc64le": cmake_var_string(
cmake_vars +
llvm_target_cmake_vars("PowerPC", "powerpc64le-unknown-linux_gnu") +
linux_cmake_vars,
),
"//conditions:default": cmake_var_string(
- cmake_vars +
- llvm_target_cmake_vars("X86", "x86_64-unknown-linux_gnu") +
- linux_cmake_vars,
- ),
+ cmake_vars +
+ llvm_target_cmake_vars("X86", "x86_64-unknown-linux_gnu") +
+ linux_cmake_vars),
+
})
LLVM_LINKOPTS = ["-ldl", "-lm", "-lpthread"]
diff --git a/third_party/mkl/build_defs.bzl b/third_party/mkl/build_defs.bzl
index 6571f38fe6..53e02769da 100644
--- a/third_party/mkl/build_defs.bzl
+++ b/third_party/mkl/build_defs.bzl
@@ -8,8 +8,10 @@ mkl_repository depends on the following environment variables:
* `TF_MKL_ROOT`: The root folder where a copy of libmkl is located.
"""
+
_TF_MKL_ROOT = "TF_MKL_ROOT"
+
def if_mkl(if_true, if_false = []):
"""Shorthand for select()'ing on whether we're building with MKL.
@@ -19,7 +21,7 @@ def if_mkl(if_true, if_false = []):
"""
return select({
str(Label("//third_party/mkl:using_mkl")): if_true,
- "//conditions:default": if_false,
+ "//conditions:default": if_false
})
def if_mkl_lnx_x64(if_true, if_false = []):
@@ -31,34 +33,37 @@ def if_mkl_lnx_x64(if_true, if_false = []):
"""
return select({
str(Label("//third_party/mkl:using_mkl_lnx_x64")): if_true,
- "//conditions:default": if_false,
+ "//conditions:default": if_false
})
+
def _enable_local_mkl(repository_ctx):
- return _TF_MKL_ROOT in repository_ctx.os.environ
+ return _TF_MKL_ROOT in repository_ctx.os.environ
+
def _mkl_autoconf_impl(repository_ctx):
- """Implementation of the local_mkl_autoconf repository rule."""
-
- if _enable_local_mkl(repository_ctx):
- # Symlink lib and include local folders.
- mkl_root = repository_ctx.os.environ[_TF_MKL_ROOT]
- mkl_lib_path = "%s/lib" % mkl_root
- repository_ctx.symlink(mkl_lib_path, "lib")
- mkl_include_path = "%s/include" % mkl_root
- repository_ctx.symlink(mkl_include_path, "include")
- mkl_license_path = "%s/license.txt" % mkl_root
- repository_ctx.symlink(mkl_license_path, "license.txt")
- else:
- # setup remote mkl repository.
- repository_ctx.download_and_extract(
- repository_ctx.attr.urls,
- sha256 = repository_ctx.attr.sha256,
- stripPrefix = repository_ctx.attr.strip_prefix,
- )
-
- # Also setup BUILD file.
- repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD")
+ """Implementation of the local_mkl_autoconf repository rule."""
+
+ if _enable_local_mkl(repository_ctx):
+ # Symlink lib and include local folders.
+ mkl_root = repository_ctx.os.environ[_TF_MKL_ROOT]
+ mkl_lib_path = "%s/lib" % mkl_root
+ repository_ctx.symlink(mkl_lib_path, "lib")
+ mkl_include_path = "%s/include" % mkl_root
+ repository_ctx.symlink(mkl_include_path, "include")
+ mkl_license_path = "%s/license.txt" % mkl_root
+ repository_ctx.symlink(mkl_license_path, "license.txt")
+ else:
+ # setup remote mkl repository.
+ repository_ctx.download_and_extract(
+ repository_ctx.attr.urls,
+ sha256=repository_ctx.attr.sha256,
+ stripPrefix=repository_ctx.attr.strip_prefix,
+ )
+
+ # Also setup BUILD file.
+ repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD")
+
mkl_repository = repository_rule(
implementation = _mkl_autoconf_impl,
diff --git a/third_party/mpi/mpi.bzl b/third_party/mpi/mpi.bzl
index 3a483351d1..38ce91c4d0 100644
--- a/third_party/mpi/mpi.bzl
+++ b/third_party/mpi/mpi.bzl
@@ -2,16 +2,16 @@
#based on the configuration options return one or the other
def mpi_hdr():
- MPI_LIB_IS_OPENMPI = True
- hdrs = []
+ MPI_LIB_IS_OPENMPI=True
+ hdrs = []
if MPI_LIB_IS_OPENMPI:
- hdrs = ["mpi.h", "mpi_portable_platform.h"] #When using OpenMPI
+ hdrs = ["mpi.h", "mpi_portable_platform.h"] #When using OpenMPI
else:
- hdrs = ["mpi.h", "mpio.h", "mpicxx.h"] #When using MVAPICH
+ hdrs = ["mpi.h", "mpio.h", "mpicxx.h"] #When using MVAPICH
return hdrs
def if_mpi(if_true, if_false = []):
return select({
"//tensorflow:with_mpi_support": if_true,
- "//conditions:default": if_false,
+ "//conditions:default": if_false
})
diff --git a/third_party/repo.bzl b/third_party/repo.bzl
index d020248ac9..9cee1fcc4b 100644
--- a/third_party/repo.bzl
+++ b/third_party/repo.bzl
@@ -19,98 +19,90 @@ _SINGLE_URL_WHITELIST = depset([
])
def _is_windows(ctx):
- return ctx.os.name.lower().find("windows") != -1
+ return ctx.os.name.lower().find("windows") != -1
def _wrap_bash_cmd(ctx, cmd):
- if _is_windows(ctx):
- bazel_sh = _get_env_var(ctx, "BAZEL_SH")
- if not bazel_sh:
- fail("BAZEL_SH environment variable is not set")
- cmd = [bazel_sh, "-l", "-c", " ".join(cmd)]
- return cmd
+ if _is_windows(ctx):
+ bazel_sh = _get_env_var(ctx, "BAZEL_SH")
+ if not bazel_sh:
+ fail("BAZEL_SH environment variable is not set")
+ cmd = [bazel_sh, "-l", "-c", " ".join(cmd)]
+ return cmd
def _get_env_var(ctx, name):
- if name in ctx.os.environ:
- return ctx.os.environ[name]
- else:
- return None
+ if name in ctx.os.environ:
+ return ctx.os.environ[name]
+ else:
+ return None
# Executes specified command with arguments and calls 'fail' if it exited with
# non-zero code
def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
- result = repo_ctx.execute(cmd_and_args, timeout = 10)
- if result.return_code != 0:
- fail(("Non-zero return code({1}) when executing '{0}':\n" + "Stdout: {2}\n" +
- "Stderr: {3}").format(
- " ".join(cmd_and_args),
- result.return_code,
- result.stdout,
- result.stderr,
- ))
+ result = repo_ctx.execute(cmd_and_args, timeout=10)
+ if result.return_code != 0:
+ fail(("Non-zero return code({1}) when executing '{0}':\n" + "Stdout: {2}\n"
+ + "Stderr: {3}").format(" ".join(cmd_and_args), result.return_code,
+ result.stdout, result.stderr))
def _repos_are_siblings():
- return Label("@foo//bar").workspace_root.startswith("../")
+ return Label("@foo//bar").workspace_root.startswith("../")
# Apply a patch_file to the repository root directory
# Runs 'patch -p1'
def _apply_patch(ctx, patch_file):
- # Don't check patch on Windows, because patch is only available under bash.
- if not _is_windows(ctx) and not ctx.which("patch"):
- fail("patch command is not found, please install it")
- cmd = _wrap_bash_cmd(
- ctx,
- ["patch", "-p1", "-d", ctx.path("."), "-i", ctx.path(patch_file)],
- )
- _execute_and_check_ret_code(ctx, cmd)
+ # Don't check patch on Windows, because patch is only available under bash.
+ if not _is_windows(ctx) and not ctx.which("patch"):
+ fail("patch command is not found, please install it")
+ cmd = _wrap_bash_cmd(
+ ctx, ["patch", "-p1", "-d", ctx.path("."), "-i", ctx.path(patch_file)])
+ _execute_and_check_ret_code(ctx, cmd)
def _apply_delete(ctx, paths):
- for path in paths:
- if path.startswith("/"):
- fail("refusing to rm -rf path starting with '/': " + path)
- if ".." in path:
- fail("refusing to rm -rf path containing '..': " + path)
- cmd = _wrap_bash_cmd(ctx, ["rm", "-rf"] + [ctx.path(path) for path in paths])
- _execute_and_check_ret_code(ctx, cmd)
+ for path in paths:
+ if path.startswith("/"):
+ fail("refusing to rm -rf path starting with '/': " + path)
+ if ".." in path:
+ fail("refusing to rm -rf path containing '..': " + path)
+ cmd = _wrap_bash_cmd(ctx, ["rm", "-rf"] + [ctx.path(path) for path in paths])
+ _execute_and_check_ret_code(ctx, cmd)
def _tf_http_archive(ctx):
- if ("mirror.bazel.build" not in ctx.attr.urls[0] and
- (len(ctx.attr.urls) < 2 and
- ctx.attr.name not in _SINGLE_URL_WHITELIST)):
- fail("tf_http_archive(urls) must have redundant URLs. The " +
- "mirror.bazel.build URL must be present and it must come first. " +
- "Even if you don't have permission to mirror the file, please " +
- "put the correctly formatted mirror URL there anyway, because " +
- "someone will come along shortly thereafter and mirror the file.")
- ctx.download_and_extract(
- ctx.attr.urls,
- "",
- ctx.attr.sha256,
- ctx.attr.type,
- ctx.attr.strip_prefix,
- )
- if ctx.attr.delete:
- _apply_delete(ctx, ctx.attr.delete)
- if ctx.attr.patch_file != None:
- _apply_patch(ctx, ctx.attr.patch_file)
- if ctx.attr.build_file != None:
- # Use BUILD.bazel to avoid conflict with third party projects with
- # BUILD or build (directory) underneath.
- ctx.template("BUILD.bazel", ctx.attr.build_file, {
- "%prefix%": ".." if _repos_are_siblings() else "external",
- }, False)
+ if ("mirror.bazel.build" not in ctx.attr.urls[0] and
+ (len(ctx.attr.urls) < 2 and
+ ctx.attr.name not in _SINGLE_URL_WHITELIST)):
+ fail("tf_http_archive(urls) must have redundant URLs. The " +
+ "mirror.bazel.build URL must be present and it must come first. " +
+ "Even if you don't have permission to mirror the file, please " +
+ "put the correctly formatted mirror URL there anyway, because " +
+ "someone will come along shortly thereafter and mirror the file.")
+ ctx.download_and_extract(
+ ctx.attr.urls,
+ "",
+ ctx.attr.sha256,
+ ctx.attr.type,
+ ctx.attr.strip_prefix)
+ if ctx.attr.delete:
+ _apply_delete(ctx, ctx.attr.delete)
+ if ctx.attr.patch_file != None:
+ _apply_patch(ctx, ctx.attr.patch_file)
+ if ctx.attr.build_file != None:
+ # Use BUILD.bazel to avoid conflict with third party projects with
+ # BUILD or build (directory) underneath.
+ ctx.template("BUILD.bazel", ctx.attr.build_file, {
+ "%prefix%": ".." if _repos_are_siblings() else "external",
+ }, False)
tf_http_archive = repository_rule(
- implementation = _tf_http_archive,
- attrs = {
- "sha256": attr.string(mandatory = True),
- "urls": attr.string_list(mandatory = True, allow_empty = False),
+ implementation=_tf_http_archive,
+ attrs={
+ "sha256": attr.string(mandatory=True),
+ "urls": attr.string_list(mandatory=True, allow_empty=False),
"strip_prefix": attr.string(),
"type": attr.string(),
"delete": attr.string_list(),
"patch_file": attr.label(),
"build_file": attr.label(),
- },
-)
+ })
"""Downloads and creates Bazel repos for dependencies.
This is a swappable replacement for both http_archive() and
diff --git a/third_party/sycl/sycl_configure.bzl b/third_party/sycl/sycl_configure.bzl
index deba6c4116..5b9d0eb383 100644
--- a/third_party/sycl/sycl_configure.bzl
+++ b/third_party/sycl/sycl_configure.bzl
@@ -11,124 +11,122 @@
"""
_HOST_CXX_COMPILER = "HOST_CXX_COMPILER"
-_HOST_C_COMPILER = "HOST_C_COMPILER"
+_HOST_C_COMPILER= "HOST_C_COMPILER"
_COMPUTECPP_TOOLKIT_PATH = "COMPUTECPP_TOOLKIT_PATH"
_TRISYCL_INCLUDE_DIR = "TRISYCL_INCLUDE_DIR"
_PYTHON_LIB_PATH = "PYTHON_LIB_PATH"
def _enable_sycl(repository_ctx):
- if "TF_NEED_OPENCL_SYCL" in repository_ctx.os.environ:
- enable_sycl = repository_ctx.os.environ["TF_NEED_OPENCL_SYCL"].strip()
- return enable_sycl == "1"
- return False
+ if "TF_NEED_OPENCL_SYCL" in repository_ctx.os.environ:
+ enable_sycl = repository_ctx.os.environ["TF_NEED_OPENCL_SYCL"].strip()
+ return enable_sycl == "1"
+ return False
def _enable_compute_cpp(repository_ctx):
- return _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ
+ return _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ
def auto_configure_fail(msg):
- """Output failure message when auto configuration fails."""
- red = "\033[0;31m"
- no_color = "\033[0m"
- fail("\n%sAuto-Configuration Error:%s %s\n" % (red, no_color, msg))
-
+ """Output failure message when auto configuration fails."""
+ red = "\033[0;31m"
+ no_color = "\033[0m"
+ fail("\n%sAuto-Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def find_c(repository_ctx):
- """Find host C compiler."""
- c_name = "gcc"
- if _HOST_C_COMPILER in repository_ctx.os.environ:
- c_name = repository_ctx.os.environ[_HOST_C_COMPILER].strip()
- if c_name.startswith("/"):
- return c_name
- c = repository_ctx.which(c_name)
- if c == None:
- fail("Cannot find C compiler, please correct your path.")
- return c
+ """Find host C compiler."""
+ c_name = "gcc"
+ if _HOST_C_COMPILER in repository_ctx.os.environ:
+ c_name = repository_ctx.os.environ[_HOST_C_COMPILER].strip()
+ if c_name.startswith("/"):
+ return c_name
+ c = repository_ctx.which(c_name)
+ if c == None:
+ fail("Cannot find C compiler, please correct your path.")
+ return c
def find_cc(repository_ctx):
- """Find host C++ compiler."""
- cc_name = "g++"
- if _HOST_CXX_COMPILER in repository_ctx.os.environ:
- cc_name = repository_ctx.os.environ[_HOST_CXX_COMPILER].strip()
- if cc_name.startswith("/"):
- return cc_name
- cc = repository_ctx.which(cc_name)
- if cc == None:
- fail("Cannot find C++ compiler, please correct your path.")
- return cc
+ """Find host C++ compiler."""
+ cc_name = "g++"
+ if _HOST_CXX_COMPILER in repository_ctx.os.environ:
+ cc_name = repository_ctx.os.environ[_HOST_CXX_COMPILER].strip()
+ if cc_name.startswith("/"):
+ return cc_name
+ cc = repository_ctx.which(cc_name)
+ if cc == None:
+ fail("Cannot find C++ compiler, please correct your path.")
+ return cc
def find_computecpp_root(repository_ctx):
- """Find ComputeCpp compiler."""
- sycl_name = ""
- if _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ:
- sycl_name = repository_ctx.os.environ[_COMPUTECPP_TOOLKIT_PATH].strip()
- if sycl_name.startswith("/"):
- return sycl_name
- fail("Cannot find SYCL compiler, please correct your path")
+ """Find ComputeCpp compiler."""
+ sycl_name = ""
+ if _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ:
+ sycl_name = repository_ctx.os.environ[_COMPUTECPP_TOOLKIT_PATH].strip()
+ if sycl_name.startswith("/"):
+ return sycl_name
+ fail("Cannot find SYCL compiler, please correct your path")
def find_trisycl_include_dir(repository_ctx):
- """Find triSYCL include directory. """
- if _TRISYCL_INCLUDE_DIR in repository_ctx.os.environ:
- sycl_name = repository_ctx.os.environ[_TRISYCL_INCLUDE_DIR].strip()
- if sycl_name.startswith("/"):
- return sycl_name
- fail("Cannot find triSYCL include directory, please correct your path")
+ """Find triSYCL include directory. """
+ if _TRISYCL_INCLUDE_DIR in repository_ctx.os.environ:
+ sycl_name = repository_ctx.os.environ[_TRISYCL_INCLUDE_DIR].strip()
+ if sycl_name.startswith("/"):
+ return sycl_name
+ fail( "Cannot find triSYCL include directory, please correct your path")
def find_python_lib(repository_ctx):
- """Returns python path."""
- if _PYTHON_LIB_PATH in repository_ctx.os.environ:
- return repository_ctx.os.environ[_PYTHON_LIB_PATH].strip()
- fail("Environment variable PYTHON_LIB_PATH was not specified re-run ./configure")
+ """Returns python path."""
+ if _PYTHON_LIB_PATH in repository_ctx.os.environ:
+ return repository_ctx.os.environ[_PYTHON_LIB_PATH].strip()
+ fail("Environment variable PYTHON_LIB_PATH was not specified re-run ./configure")
+
def _check_lib(repository_ctx, toolkit_path, lib):
- """Checks if lib exists under sycl_toolkit_path or fail if it doesn't.
+ """Checks if lib exists under sycl_toolkit_path or fail if it doesn't.
- Args:
- repository_ctx: The repository context.
- toolkit_path: The toolkit directory containing the libraries.
- ib: The library to look for under toolkit_path.
- """
- lib_path = toolkit_path + "/" + lib
- if not repository_ctx.path(lib_path).exists:
- auto_configure_fail("Cannot find %s" % lib_path)
+ Args:
+ repository_ctx: The repository context.
+ toolkit_path: The toolkit directory containing the libraries.
+ ib: The library to look for under toolkit_path.
+ """
+ lib_path = toolkit_path + "/" + lib
+ if not repository_ctx.path(lib_path).exists:
+ auto_configure_fail("Cannot find %s" % lib_path)
def _check_dir(repository_ctx, directory):
- """Checks whether the directory exists and fail if it does not.
+ """Checks whether the directory exists and fail if it does not.
- Args:
- repository_ctx: The repository context.
- directory: The directory to check the existence of.
- """
- if not repository_ctx.path(directory).exists:
- auto_configure_fail("Cannot find dir: %s" % directory)
+ Args:
+ repository_ctx: The repository context.
+ directory: The directory to check the existence of.
+ """
+ if not repository_ctx.path(directory).exists:
+ auto_configure_fail("Cannot find dir: %s" % directory)
def _symlink_dir(repository_ctx, src_dir, dest_dir):
- """Symlinks all the files in a directory.
-
- Args:
- repository_ctx: The repository context.
- src_dir: The source directory.
- dest_dir: The destination directory to create the symlinks in.
- """
- files = repository_ctx.path(src_dir).readdir()
- for src_file in files:
- repository_ctx.symlink(src_file, dest_dir + "/" + src_file.basename)
-
-def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
- if not out:
- out = tpl.replace(":", "/")
- repository_ctx.template(
- out,
- Label("//third_party/sycl/%s.tpl" % tpl),
- substitutions,
- )
+ """Symlinks all the files in a directory.
+
+ Args:
+ repository_ctx: The repository context.
+ src_dir: The source directory.
+ dest_dir: The destination directory to create the symlinks in.
+ """
+ files = repository_ctx.path(src_dir).readdir()
+ for src_file in files:
+ repository_ctx.symlink(src_file, dest_dir + "/" + src_file.basename)
+
+def _tpl(repository_ctx, tpl, substitutions={}, out=None):
+ if not out:
+ out = tpl.replace(":", "/")
+ repository_ctx.template(
+ out,
+ Label("//third_party/sycl/%s.tpl" % tpl),
+ substitutions)
def _file(repository_ctx, label):
- repository_ctx.template(
- label.replace(":", "/"),
- Label("//third_party/sycl/%s" % label),
- {},
- )
+ repository_ctx.template(
+ label.replace(":", "/"),
+ Label("//third_party/sycl/%s" % label),
+ {})
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_sycl_disabled():
@@ -149,6 +147,7 @@ def error_sycl_disabled():
)
"""
+
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_sycl_disabled.bzl", "error_sycl_disabled")
@@ -156,97 +155,87 @@ error_sycl_disabled()
"""
def _create_dummy_repository(repository_ctx):
- # Set up BUILD file for sycl/.
+ # Set up BUILD file for sycl/.
+ _tpl(repository_ctx, "sycl:build_defs.bzl")
+ _tpl(repository_ctx, "sycl:BUILD")
+ _file(repository_ctx, "sycl:LICENSE.text")
+ _tpl(repository_ctx, "sycl:platform.bzl")
+
+ # Create dummy files for the SYCL toolkit since they are still required by
+ # tensorflow/sycl/platform/default/build_config:sycl.
+ repository_ctx.file("sycl/include/sycl.hpp", "")
+ repository_ctx.file("sycl/lib/libComputeCpp.so", "")
+
+ # If sycl_configure is not configured to build with SYCL support, and the user
+ # attempts to build with --config=sycl, add a dummy build rule to intercept
+ # this and fail with an actionable error message.
+ repository_ctx.file("crosstool/error_sycl_disabled.bzl",
+ _DUMMY_CROSSTOOL_BZL_FILE)
+ repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
+
+
+def _sycl_autoconf_imp(repository_ctx):
+ """Implementation of the sycl_autoconf rule."""
+ if not _enable_sycl(repository_ctx):
+ _create_dummy_repository(repository_ctx)
+ else:
+ # copy template files
_tpl(repository_ctx, "sycl:build_defs.bzl")
_tpl(repository_ctx, "sycl:BUILD")
- _file(repository_ctx, "sycl:LICENSE.text")
_tpl(repository_ctx, "sycl:platform.bzl")
+ _tpl(repository_ctx, "crosstool:BUILD")
+ _file(repository_ctx, "sycl:LICENSE.text")
- # Create dummy files for the SYCL toolkit since they are still required by
- # tensorflow/sycl/platform/default/build_config:sycl.
- repository_ctx.file("sycl/include/sycl.hpp", "")
- repository_ctx.file("sycl/lib/libComputeCpp.so", "")
+ if _enable_compute_cpp(repository_ctx):
+ _tpl(repository_ctx, "crosstool:computecpp",
+ {
+ "%{host_cxx_compiler}" : find_cc(repository_ctx),
+ "%{host_c_compiler}" : find_c(repository_ctx)
+ })
+
+ computecpp_root = find_computecpp_root(repository_ctx);
+ _check_dir(repository_ctx, computecpp_root)
+
+ _tpl(repository_ctx, "crosstool:CROSSTOOL",
+ {
+ "%{sycl_include_dir}" : computecpp_root,
+ "%{sycl_impl}" : "computecpp",
+ "%{c++_std}" : "-std=c++11",
+ "%{python_lib_path}" : find_python_lib(repository_ctx),
+ })
+
+ # symlink libraries
+ _check_lib(repository_ctx, computecpp_root+"/lib", "libComputeCpp.so" )
+ _symlink_dir(repository_ctx, computecpp_root + "/lib", "sycl/lib")
+ _symlink_dir(repository_ctx, computecpp_root + "/include", "sycl/include")
+ _symlink_dir(repository_ctx, computecpp_root + "/bin", "sycl/bin")
+ else:
- # If sycl_configure is not configured to build with SYCL support, and the user
- # attempts to build with --config=sycl, add a dummy build rule to intercept
- # this and fail with an actionable error message.
- repository_ctx.file(
- "crosstool/error_sycl_disabled.bzl",
- _DUMMY_CROSSTOOL_BZL_FILE,
- )
- repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
+ trisycl_include_dir = find_trisycl_include_dir(repository_ctx);
+ _check_dir(repository_ctx, trisycl_include_dir)
+
+ _tpl(repository_ctx, "crosstool:trisycl",
+ {
+ "%{host_cxx_compiler}" : find_cc(repository_ctx),
+ "%{host_c_compiler}" : find_c(repository_ctx),
+ "%{trisycl_include_dir}" : trisycl_include_dir
+ })
+
+
+ _tpl(repository_ctx, "crosstool:CROSSTOOL",
+ {
+ "%{sycl_include_dir}" : trisycl_include_dir,
+ "%{sycl_impl}" : "trisycl",
+ "%{c++_std}" : "-std=c++1y",
+ "%{python_lib_path}" : find_python_lib(repository_ctx),
+ })
+
+ _symlink_dir(repository_ctx, trisycl_include_dir, "sycl/include")
-def _sycl_autoconf_imp(repository_ctx):
- """Implementation of the sycl_autoconf rule."""
- if not _enable_sycl(repository_ctx):
- _create_dummy_repository(repository_ctx)
- else:
- # copy template files
- _tpl(repository_ctx, "sycl:build_defs.bzl")
- _tpl(repository_ctx, "sycl:BUILD")
- _tpl(repository_ctx, "sycl:platform.bzl")
- _tpl(repository_ctx, "crosstool:BUILD")
- _file(repository_ctx, "sycl:LICENSE.text")
-
- if _enable_compute_cpp(repository_ctx):
- _tpl(
- repository_ctx,
- "crosstool:computecpp",
- {
- "%{host_cxx_compiler}": find_cc(repository_ctx),
- "%{host_c_compiler}": find_c(repository_ctx),
- },
- )
-
- computecpp_root = find_computecpp_root(repository_ctx)
- _check_dir(repository_ctx, computecpp_root)
-
- _tpl(
- repository_ctx,
- "crosstool:CROSSTOOL",
- {
- "%{sycl_include_dir}": computecpp_root,
- "%{sycl_impl}": "computecpp",
- "%{c++_std}": "-std=c++11",
- "%{python_lib_path}": find_python_lib(repository_ctx),
- },
- )
-
- # symlink libraries
- _check_lib(repository_ctx, computecpp_root + "/lib", "libComputeCpp.so")
- _symlink_dir(repository_ctx, computecpp_root + "/lib", "sycl/lib")
- _symlink_dir(repository_ctx, computecpp_root + "/include", "sycl/include")
- _symlink_dir(repository_ctx, computecpp_root + "/bin", "sycl/bin")
- else:
- trisycl_include_dir = find_trisycl_include_dir(repository_ctx)
- _check_dir(repository_ctx, trisycl_include_dir)
-
- _tpl(
- repository_ctx,
- "crosstool:trisycl",
- {
- "%{host_cxx_compiler}": find_cc(repository_ctx),
- "%{host_c_compiler}": find_c(repository_ctx),
- "%{trisycl_include_dir}": trisycl_include_dir,
- },
- )
-
- _tpl(
- repository_ctx,
- "crosstool:CROSSTOOL",
- {
- "%{sycl_include_dir}": trisycl_include_dir,
- "%{sycl_impl}": "trisycl",
- "%{c++_std}": "-std=c++1y",
- "%{python_lib_path}": find_python_lib(repository_ctx),
- },
- )
-
- _symlink_dir(repository_ctx, trisycl_include_dir, "sycl/include")
sycl_configure = repository_rule(
- implementation = _sycl_autoconf_imp,
- local = True,
+ implementation = _sycl_autoconf_imp,
+ local = True,
)
"""Detects and configures the SYCL toolchain.
diff --git a/third_party/toolchains/clang6/repo.bzl b/third_party/toolchains/clang6/repo.bzl
index e4b6422c96..b81f44506f 100644
--- a/third_party/toolchains/clang6/repo.bzl
+++ b/third_party/toolchains/clang6/repo.bzl
@@ -1,37 +1,30 @@
"""Repository rule for Debian 8 Jessie Clang-6.0 portable Linux builds."""
def _clang6_configure(ctx):
- # TODO(jart): It'd probably be better to use Bazel's struct.to_proto()
- # method to generate a gigantic CROSSTOOL file that allows
- # Clang to support everything.
- ctx.symlink(
- ctx.os.environ.get(
- "TF_LLVM_PATH",
- "/usr/lib/llvm-6.0",
- ),
- "clang6/llvm",
- )
- ctx.symlink(
- ctx.os.environ.get("STRIP", "/usr/bin/strip"),
- "clang6/sbin/strip",
- )
- ctx.symlink(
- ctx.os.environ.get("OBJDUMP", "/usr/bin/objdump"),
- "clang6/sbin/objdump",
- )
- ctx.symlink(ctx.attr._build, "clang6/BUILD")
- ctx.template("clang6/CROSSTOOL", ctx.attr._crosstool, {
- "%package(@local_config_clang6//clang6)%": str(ctx.path("clang6")),
- })
+ # TODO(jart): It'd probably be better to use Bazel's struct.to_proto()
+ # method to generate a gigantic CROSSTOOL file that allows
+ # Clang to support everything.
+ ctx.symlink(
+ ctx.os.environ.get('TF_LLVM_PATH',
+ '/usr/lib/llvm-6.0'),
+ 'clang6/llvm')
+ ctx.symlink(
+ ctx.os.environ.get('STRIP', '/usr/bin/strip'),
+ 'clang6/sbin/strip')
+ ctx.symlink(
+ ctx.os.environ.get('OBJDUMP', '/usr/bin/objdump'),
+ 'clang6/sbin/objdump')
+ ctx.symlink(ctx.attr._build, 'clang6/BUILD')
+ ctx.template('clang6/CROSSTOOL', ctx.attr._crosstool, {
+ '%package(@local_config_clang6//clang6)%': str(ctx.path('clang6')),
+ })
clang6_configure = repository_rule(
implementation = _clang6_configure,
attrs = {
- "_build": attr.label(
- default = str(Label("//third_party/toolchains/clang6:clang.BUILD")),
- ),
- "_crosstool": attr.label(
- default = str(Label("//third_party/toolchains/clang6:CROSSTOOL.tpl")),
- ),
+ '_build': attr.label(
+ default=str(Label('//third_party/toolchains/clang6:clang.BUILD'))),
+ '_crosstool': attr.label(
+ default=str(Label('//third_party/toolchains/clang6:CROSSTOOL.tpl'))),
},
)
diff --git a/third_party/toolchains/cpus/arm/arm_compiler_configure.bzl b/third_party/toolchains/cpus/arm/arm_compiler_configure.bzl
index d675e95f70..ab6eac115c 100644
--- a/third_party/toolchains/cpus/arm/arm_compiler_configure.bzl
+++ b/third_party/toolchains/cpus/arm/arm_compiler_configure.bzl
@@ -1,38 +1,38 @@
# -*- Python -*-
"""Repository rule for arm compiler autoconfiguration."""
-def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
- if not out:
- out = tpl
- repository_ctx.template(
- out,
- Label("//third_party/toolchains/cpus/arm:%s.tpl" % tpl),
- substitutions,
- )
+def _tpl(repository_ctx, tpl, substitutions={}, out=None):
+ if not out:
+ out = tpl
+ repository_ctx.template(
+ out,
+ Label("//third_party/toolchains/cpus/arm:%s.tpl" % tpl),
+ substitutions)
+
def _arm_compiler_configure_impl(repository_ctx):
- # We need to find a cross-compilation include directory for Python, so look
- # for an environment variable. Be warned, this crosstool template is only
- # regenerated on the first run of Bazel, so if you change the variable after
- # it may not be reflected in later builds. Doing a shutdown and clean of Bazel
- # doesn't fix this, you'll need to delete the generated file at something like:
- # external/local_config_arm_compiler/CROSSTOOL in your Bazel install.
- if "CROSSTOOL_PYTHON_INCLUDE_PATH" in repository_ctx.os.environ:
- python_include_path = repository_ctx.os.environ["CROSSTOOL_PYTHON_INCLUDE_PATH"]
- else:
- python_include_path = "/usr/include/python2.7"
- _tpl(repository_ctx, "CROSSTOOL", {
- "%{ARM_COMPILER_PATH}%": str(repository_ctx.path(
- repository_ctx.attr.remote_config_repo,
- )),
- "%{PYTHON_INCLUDE_PATH}%": python_include_path,
- })
- repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD")
+ # We need to find a cross-compilation include directory for Python, so look
+ # for an environment variable. Be warned, this crosstool template is only
+ # regenerated on the first run of Bazel, so if you change the variable after
+ # it may not be reflected in later builds. Doing a shutdown and clean of Bazel
+ # doesn't fix this, you'll need to delete the generated file at something like:
+ # external/local_config_arm_compiler/CROSSTOOL in your Bazel install.
+ if "CROSSTOOL_PYTHON_INCLUDE_PATH" in repository_ctx.os.environ:
+ python_include_path = repository_ctx.os.environ["CROSSTOOL_PYTHON_INCLUDE_PATH"]
+ else:
+ python_include_path = "/usr/include/python2.7"
+ _tpl(repository_ctx, "CROSSTOOL", {
+ "%{ARM_COMPILER_PATH}%": str(repository_ctx.path(
+ repository_ctx.attr.remote_config_repo)),
+ "%{PYTHON_INCLUDE_PATH}%": python_include_path,
+ })
+ repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD")
+
arm_compiler_configure = repository_rule(
implementation = _arm_compiler_configure_impl,
attrs = {
- "remote_config_repo": attr.string(mandatory = False, default = ""),
+ "remote_config_repo": attr.string(mandatory = False, default =""),
"build_file": attr.label(),
},
)
diff --git a/third_party/toolchains/gpus/cuda/build_defs.bzl b/third_party/toolchains/gpus/cuda/build_defs.bzl
index 7295ecb3b4..badaf43019 100644
--- a/third_party/toolchains/gpus/cuda/build_defs.bzl
+++ b/third_party/toolchains/gpus/cuda/build_defs.bzl
@@ -12,13 +12,15 @@ def if_cuda(if_true, if_false = []):
return select({
"@local_config_cuda//cuda:using_nvcc": if_true,
"@local_config_cuda//cuda:using_clang": if_true,
- "//conditions:default": if_false,
+ "//conditions:default": if_false
})
+
def cuda_default_copts():
"""Default options for all CUDA compilations."""
return if_cuda(["-x", "cuda", "-DGOOGLE_CUDA=1"] + ["--cuda-gpu-arch=sm_30"])
+
def cuda_is_configured():
"""Returns true if CUDA was enabled during the configure process."""
return True
@@ -30,5 +32,6 @@ def if_cuda_is_configured(x):
--config=cuda. Used to allow non-CUDA code to depend on CUDA libraries.
"""
if cuda_is_configured():
- return x
+ return x
return []
+