aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Sanjoy Das <sanjoy@google.com>2017-10-23 13:51:45 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-10-23 14:05:13 -0700
commita0ee701f73cc80a56b41c2452006e166e0b835e6 (patch)
tree2cb1cab35534624e5aec7d92e8cbfdac6da86e6b
parent3f96f6f956e0e0a6b960e664db4f1e1f2d9b9967 (diff)
Remove contrib/xla_tf_graph/
It isn't needed anymore. PiperOrigin-RevId: 173165310
-rw-r--r--tensorflow/BUILD1
-rw-r--r--tensorflow/compiler/xla/BUILD1
-rw-r--r--tensorflow/contrib/xla_tf_graph/BUILD67
-rw-r--r--tensorflow/contrib/xla_tf_graph/README.md8
-rw-r--r--tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.cc247
-rw-r--r--tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h72
-rw-r--r--tensorflow/contrib/xla_tf_graph/xla_tf_graph_util_test.cc134
7 files changed, 0 insertions, 530 deletions
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index d4396bacbf..673e433a8a 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -448,7 +448,6 @@ filegroup(
"//tensorflow/contrib/training:all_files",
"//tensorflow/contrib/util:all_files",
"//tensorflow/contrib/verbs:all_files",
- "//tensorflow/contrib/xla_tf_graph:all_files",
"//tensorflow/core:all_files",
"//tensorflow/core/debug:all_files",
"//tensorflow/core/distributed_runtime:all_files",
diff --git a/tensorflow/compiler/xla/BUILD b/tensorflow/compiler/xla/BUILD
index e51bbffcd0..0129c51a09 100644
--- a/tensorflow/compiler/xla/BUILD
+++ b/tensorflow/compiler/xla/BUILD
@@ -7,7 +7,6 @@ package_group(
packages = [
"//tensorflow/compiler/...",
"//tensorflow/contrib/tpu/...",
- "//tensorflow/contrib/xla_tf_graph/...",
],
)
diff --git a/tensorflow/contrib/xla_tf_graph/BUILD b/tensorflow/contrib/xla_tf_graph/BUILD
deleted file mode 100644
index 4a3a2de9b5..0000000000
--- a/tensorflow/contrib/xla_tf_graph/BUILD
+++ /dev/null
@@ -1,67 +0,0 @@
-# Description:
-# contains parts of TensorFlow that are experimental or unstable and which are not supported.
-
-package(
- default_visibility = ["//visibility:public"],
-)
-
-licenses(["notice"]) # Apache 2.0
-
-exports_files(["LICENSE"])
-
-load("//tensorflow:tensorflow.bzl", "tf_cc_test")
-
-filegroup(
- name = "all_files",
- srcs = glob(
- ["**/*"],
- exclude = [
- "**/METADATA",
- "**/OWNERS",
- ],
- ),
-)
-
-cc_library(
- name = "xla_tf_graph_util",
- srcs = [
- "xla_tf_graph_util.cc",
- ],
- hdrs = [
- "xla_tf_graph_util.h",
- ],
- deps = [
- "//tensorflow/compiler/tf2xla:xla_compiler",
- "//tensorflow/compiler/xla:status_macros",
- "//tensorflow/compiler/xla/client",
- "//tensorflow/compiler/xla/client:client_library",
- "//tensorflow/core:core_cpu",
- "//tensorflow/core:framework",
- "//tensorflow/core:lib",
- ],
-)
-
-tf_cc_test(
- name = "xla_tf_graph_util_test",
- srcs = ["xla_tf_graph_util_test.cc"],
- linkstatic = 1,
- tags = ["nomac"], # b/63908145
- deps = [
- ":xla_tf_graph_util",
- "//tensorflow/cc:cc_ops",
- "//tensorflow/cc:function_ops",
- "//tensorflow/cc:scope",
- "//tensorflow/compiler/jit:xla_cpu_jit",
- "//tensorflow/compiler/tf2xla:xla_compiler",
- "//tensorflow/compiler/xla:shape_util",
- "//tensorflow/compiler/xla/client:client_library",
- "//tensorflow/compiler/xla/service:hlo_module_config",
- "//tensorflow/core:core_cpu_internal",
- "//tensorflow/core:framework_internal",
- "//tensorflow/core:ops",
- "//tensorflow/core:tensorflow",
- "//tensorflow/core:test",
- "//tensorflow/core:test_main",
- "//tensorflow/core/kernels:cwise_op",
- ],
-)
diff --git a/tensorflow/contrib/xla_tf_graph/README.md b/tensorflow/contrib/xla_tf_graph/README.md
deleted file mode 100644
index a374189e81..0000000000
--- a/tensorflow/contrib/xla_tf_graph/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Xla Tf Graph
-
-## Description
-
-This module contains utilities to treat xla representation as tf graph to support mobile SOC experiments and leverage tf tools.
-
-Maintainers:
-- Satoshi Kataoka (satok@google.com, github.com/satok16)
diff --git a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.cc b/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.cc
deleted file mode 100644
index 302aa6457a..0000000000
--- a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h"
-
-#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/client/client_library.h"
-#include "tensorflow/core/platform/protobuf.h"
-
-namespace tensorflow {
-namespace xla_tf_graph {
-
-namespace {
-
-constexpr const char* const GRAPH_NAME = "xla_tf_graph";
-constexpr const char* const NODE_NAME_PREFIX = "xla";
-
-Status ConvertPrimitiveTypeToDataType(const xla::PrimitiveType p_type,
- DataType* d_type) {
- switch (p_type) {
- case xla::PRED:
- *d_type = DT_BOOL;
- return Status::OK();
- case xla::S8:
- *d_type = DT_INT8;
- return Status::OK();
- case xla::S16:
- *d_type = DT_INT16;
- return Status::OK();
- case xla::S32:
- *d_type = DT_INT32;
- return Status::OK();
- case xla::S64:
- *d_type = DT_INT64;
- return Status::OK();
- case xla::U8:
- *d_type = DT_UINT8;
- return Status::OK();
- case xla::U16:
- *d_type = DT_UINT16;
- return Status::OK();
- case xla::F16:
- *d_type = DT_HALF;
- return Status::OK();
- case xla::F32:
- *d_type = DT_FLOAT;
- return Status::OK();
- case xla::F64:
- *d_type = DT_DOUBLE;
- return Status::OK();
- default:
- return errors::InvalidArgument(
- "Unsupported PrimitiveType in ConvertPrimitiveTypeToDataType ",
- xla::PrimitiveType_Name(p_type));
- }
-}
-
-Status ConvertXlaShapeToTensorShapeType(const xla::Shape& xla_shape,
- std::vector<TensorShape>* tensor_shapes,
- std::vector<DataType>* data_types) {
- switch (xla_shape.element_type()) {
- case xla::TUPLE: {
- for (const xla::Shape& element_shape : xla_shape.tuple_shapes()) {
- if (element_shape.element_type() == xla::TUPLE) {
- return errors::InvalidArgument("Nested tuple is not allowed.");
- }
- TF_RETURN_IF_ERROR(ConvertXlaShapeToTensorShapeType(
- element_shape, tensor_shapes, data_types));
- }
- return Status::OK();
- }
- case xla::PRED:
- case xla::S8:
- case xla::S16:
- case xla::S32:
- case xla::S64:
- case xla::U8:
- case xla::U16:
- case xla::U32:
- case xla::U64:
- case xla::F16:
- case xla::F32:
- case xla::F64: {
- TensorShape shape;
- DataType type;
- TF_RETURN_IF_ERROR(
- ConvertPrimitiveTypeToDataType(xla_shape.element_type(), &type));
- for (const int64& dim : xla_shape.dimensions()) {
- shape.AddDim(dim);
- }
- tensor_shapes->emplace_back(shape);
- data_types->emplace_back(type);
- return Status::OK();
- }
- default:
- return errors::InvalidArgument(
- "Unsupported PrimitiveType in ConvertXlaShapeToTensorShapeType ",
- xla::PrimitiveType_Name(xla_shape.element_type()));
- }
-}
-
-string BuildXlaNodeName(const xla::OperationRequest& operation_request,
- const string& xla_op_type, const string& suffix) {
- const string name = strings::StrCat(
- NODE_NAME_PREFIX, "/", operation_request.output_handle().handle(), "/",
- xla_op_type);
- if (suffix.empty()) {
- return name;
- } else {
- return strings::StrCat(name, "/", suffix);
- }
-}
-
-string BuildXlaNodeName(const xla::OperationRequest& operation_request,
- const string& xla_op_type) {
- return BuildXlaNodeName(operation_request, xla_op_type, "");
-}
-
-string BuildXlaNodeOp(const protobuf::Message& msg, const string& suffix) {
- return strings::StrCat(msg.GetDescriptor()->name(), "/", suffix);
-}
-
-string BuildXlaNodeOp(const protobuf::Message& msg) {
- return BuildXlaNodeOp(msg, "");
-}
-
-Status ConvertOpRequestToXlaNode(const xla::OperationRequest& operation_request,
- XlaNode* xla_node) {
- const xla::OpRequest& op_request = operation_request.request();
- switch (op_request.op_case()) {
- case xla::OpRequest::kBinaryOpRequest: {
- const xla::BinaryOpRequest& op = op_request.binary_op_request();
- xla_node->op_type =
- BuildXlaNodeOp(op, xla::BinaryOperation_Name(op.binop()));
- xla_node->name = BuildXlaNodeName(operation_request, xla_node->op_type);
- xla_node->input_ids.emplace_back(std::make_tuple(op.lhs().handle(), 0));
- xla_node->input_ids.emplace_back(std::make_tuple(op.rhs().handle(), 0));
- for (const int64& dim : op.broadcast_dimensions()) {
- xla_node->broadcast_dimensions.emplace_back(dim);
- }
- break;
- }
- case xla::OpRequest::kParameterRequest: {
- const xla::ParameterRequest& op = op_request.parameter_request();
- xla_node->op_type = BuildXlaNodeOp(op, "");
- xla_node->name =
- BuildXlaNodeName(operation_request, xla_node->op_type, op.name());
- break;
- }
- case xla::OpRequest::kVariadicOpRequest: {
- const xla::VariadicOpRequest& op = op_request.variadic_op_request();
- xla_node->op_type =
- BuildXlaNodeOp(op, xla::VariadicOperation_Name(op.varop()));
- xla_node->name = BuildXlaNodeName(operation_request, xla_node->op_type);
- for (const xla::ComputationDataHandle& handle : op.operands()) {
- xla_node->input_ids.emplace_back(std::make_tuple(handle.handle(), 0));
- }
- break;
- }
- case xla::OpRequest::kGetTupleElementRequest: {
- const xla::GetTupleElementRequest& op =
- op_request.get_tuple_element_request();
- xla_node->op_type = BuildXlaNodeOp(op);
- xla_node->name = BuildXlaNodeName(operation_request, xla_node->op_type);
- xla_node->input_ids.emplace_back(
- std::make_tuple(op.operand().handle(), op.index()));
- break;
- }
- default:
- // TODO(satok): Implement all possible cases.
- LOG(FATAL) << "Op request: " << op_request.op_case()
- << " is not supported yet.";
- break;
- }
-
- CHECK(!xla_node->name.empty());
- CHECK(!xla_node->op_type.empty());
-
- TF_RETURN_IF_ERROR(ConvertXlaShapeToTensorShapeType(
- operation_request.output_shape(), &xla_node->output_shapes,
- &xla_node->output_data_types));
- return Status::OK();
-}
-
-void SetupXlaCpuClient(std::unique_ptr<FunctionLibraryDefinition>* flib_def,
- std::unique_ptr<XlaCompiler>* compiler) {
- xla::Client* client = xla::ClientLibrary::LocalClientOrDie();
- XlaOpRegistry::RegisterCompilationKernels();
-
- FunctionDefLibrary flib;
- flib_def->reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
-
- // Setup compiler options
- XlaCompiler::Options options;
- DeviceType device_type(DEVICE_CPU_XLA_JIT);
- options.device_type = &device_type;
- options.flib_def = flib_def->get();
- options.client = client;
- compiler->reset(new XlaCompiler(options));
-}
-
-} // namespace
-
-xla::StatusOr<std::unique_ptr<xla::SessionModule>>
-ConvertTfGraphToXlaSessionModule(const std::vector<XlaCompiler::Argument>& args,
- std::unique_ptr<Graph> graph) {
- CHECK(graph);
-
- std::unique_ptr<FunctionLibraryDefinition> flib_def;
- std::unique_ptr<XlaCompiler> compiler;
-
- SetupXlaCpuClient(&flib_def, &compiler);
-
- // Compile graph and build computation
- XlaCompiler::CompilationResult result;
- TF_CHECK_OK(compiler->CompileGraph(XlaCompiler::CompileOptions(), GRAPH_NAME,
- std::move(graph), args, &result));
-
- return result.computation->Snapshot();
-}
-
-xla::StatusOr<std::unordered_map<int64, XlaNode>>
-ConvertXlaSessionModuleToXlaNodes(const xla::SessionModule& session_module) {
- std::unordered_map<int64, XlaNode> xla_nodes;
- for (const auto& operation_request : session_module.entry().requests()) {
- XlaNode xla_node;
- TF_RETURN_IF_ERROR(
- ConvertOpRequestToXlaNode(operation_request.second, &xla_node));
- xla_nodes.emplace(operation_request.first, xla_node);
- }
- return std::move(xla_nodes);
-}
-
-} // namespace xla_tf_graph
-} // namespace tensorflow
diff --git a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h b/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h
deleted file mode 100644
index e635290851..0000000000
--- a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_CONTRIB_XLA_TF_GRAPH_XLA_TF_GRAPH_UTIL_H_
-#define TENSORFLOW_CONTRIB_XLA_TF_GRAPH_XLA_TF_GRAPH_UTIL_H_
-
-#include <unordered_map>
-
-#include "tensorflow/compiler/tf2xla/xla_compiler.h"
-#include "tensorflow/compiler/xla/client/client.h"
-#include "tensorflow/compiler/xla/status_macros.h"
-#include "tensorflow/core/framework/function.h"
-#include "tensorflow/core/graph/graph.h"
-#include "tensorflow/core/platform/macros.h"
-
-namespace tensorflow {
-namespace xla_tf_graph {
-
-// A set of utilities to handle xla computation requests.
-// These utilities help developers leverage existing tools to work with
-// xla computations, also provide a way to support TensorFlow ops by
-// implementing xla computations so that they can do experiments on their
-// specialized environments.
-
-// A structure to represent typed attributes of TensorFlow graph node.
-// This structure contains op specific attributes as members so that
-// we can treat them explicitly.
-struct XlaNode {
- // Unique node name
- string name;
- // Op type of xla computation
- string op_type;
- // List of pair of unique id and port of input node.
- // We store this value instead
- // of node name in order not to wait for all XlaNodes to be constructed.
- std::vector<std::tuple<int64, int>> input_ids;
- // Oputput shapes
- std::vector<TensorShape> output_shapes;
- // Output data types
- std::vector<DataType> output_data_types;
-
- //---------------------------
- // Op specific attributes
- // #xla::OpRequest::kBinaryOpRequest
- std::vector<int64> broadcast_dimensions;
-};
-
-// Convert a tf graph to a xla session module
-xla::StatusOr<std::unique_ptr<xla::SessionModule>>
-ConvertTfGraphToXlaSessionModule(const std::vector<XlaCompiler::Argument>& args,
- std::unique_ptr<Graph> graph);
-
-// Convert a xla session module to a map to XlaNode from unique id
-xla::StatusOr<std::unordered_map<int64, XlaNode>>
-ConvertXlaSessionModuleToXlaNodes(const xla::SessionModule& session_module);
-
-} // namespace xla_tf_graph
-} // namespace tensorflow
-
-#endif // TENSORFLOW_CONTRIB_XLA_TF_GRAPH_XLA_TF_GRAPH_UTIL_H_
diff --git a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util_test.cc b/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util_test.cc
deleted file mode 100644
index 144269303e..0000000000
--- a/tensorflow/contrib/xla_tf_graph/xla_tf_graph_util_test.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/contrib/xla_tf_graph/xla_tf_graph_util.h"
-#include "tensorflow/cc/framework/scope.h"
-#include "tensorflow/cc/ops/function_ops.h"
-#include "tensorflow/cc/ops/standard_ops.h"
-#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
-#include "tensorflow/compiler/xla/client/client_library.h"
-#include "tensorflow/compiler/xla/service/hlo_module_config.h"
-#include "tensorflow/compiler/xla/shape_util.h"
-#include "tensorflow/core/platform/test.h"
-
-namespace tensorflow {
-namespace xla_tf_graph {
-
-static std::unique_ptr<Graph> BuildAddGraph() {
- Scope scope = Scope::NewRootScope().ExitOnError();
- auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
- auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
- // See tf2xla/kernels/binary_ops.cc
- auto c = ops::Add(scope.WithOpName("C"), a, b);
- auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
- std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
- TF_CHECK_OK(scope.ToGraph(graph.get()));
- return graph;
-}
-
-static std::vector<XlaCompiler::Argument> BuildAddGraphArguments() {
- // Builds a description of the arguments.
- std::vector<XlaCompiler::Argument> args(2);
- args[0].kind = XlaCompiler::Argument::kParameter;
- args[0].type = DT_INT32;
- // Difference of dimension will add extra broadcast_dimensions.
- // broadcast_dimension generates an additional HloInstruction
- // in user_computation.cc
- args[0].shape = xla::ShapeUtil::MakeShape(xla::S32, {2, 2});
- args[1].kind = XlaCompiler::Argument::kParameter;
- args[1].type = DT_INT32;
- args[1].shape = xla::ShapeUtil::MakeShape(xla::S32, {2});
- return args;
-}
-
-// CAVEAT: Debug purpose only.
-// This function dumps a protobuf string format of HloModule.
-static void DumpHloGraphForDebug(const std::vector<XlaCompiler::Argument>& args,
- std::unique_ptr<Graph> graph) {
- std::unique_ptr<FunctionLibraryDefinition> flib_def;
- std::unique_ptr<FunctionLibraryRuntime> flr;
- std::unique_ptr<XlaCompiler> compiler;
-
- xla::Client* client = xla::ClientLibrary::LocalClientOrDie();
- XlaOpRegistry::RegisterCompilationKernels();
-
- FunctionDefLibrary flib;
- flib_def.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
-
- // Compiles the graph.
- XlaCompiler::Options options;
- DeviceType device_type("XLA_CPU_JIT");
- options.device_type = &device_type;
- options.client = client;
- options.flib_def = flib_def.get();
- compiler.reset(new XlaCompiler(options));
-
- // Compile graph
- XlaCompiler::CompilationResult result;
- TF_CHECK_OK(compiler->CompileGraph(XlaCompiler::CompileOptions(), "dump",
- std::move(graph), args, &result));
-
- // Convert to hlo
- xla::Computation& computation = *result.computation;
-
- xla::Service* service(
- static_cast<xla::Service*>(xla::ClientLibrary::GetXlaService(
- static_cast<xla::LocalClient*>(client)->platform())));
- const xla::ComputationTracker& computation_tracker =
- service->computation_tracker();
-
- auto user_computation_status =
- computation_tracker.Resolve(computation.handle());
- TF_CHECK_OK(user_computation_status.status());
- auto user_computation = user_computation_status.ConsumeValueOrDie();
- xla::VersionedComputationHandle versioned_handle =
- user_computation->GetVersionedHandle();
- std::unique_ptr<xla::HloModule> hlo_module =
- std::move(computation_tracker
- .BuildHloModule(versioned_handle, xla::HloModuleConfig())
- .ValueOrDie());
- VLOG(1) << "--- DUMP HLO ---";
- VLOG(1) << hlo_module->ToString();
-}
-
-TEST(XlaTfGraphUtil, ConvertTfGraphToSessionModule) {
- // Builds a description of the arguments.
- std::vector<XlaCompiler::Argument> args = BuildAddGraphArguments();
- std::unique_ptr<Graph> graph = BuildAddGraph();
-
- TF_ASSERT_OK_AND_ASSIGN(
- std::unique_ptr<xla::SessionModule> session_module,
- ConvertTfGraphToXlaSessionModule(args, std::move(graph)));
-
- ASSERT_EQ(4, session_module->entry().requests_size());
-
- VLOG(1) << "--- DUMP ---";
- VLOG(1) << session_module->DebugString();
- DumpHloGraphForDebug(args, BuildAddGraph());
-}
-
-TEST(XlaTfGraphUtil, ConvertXlaSessionModuleToXlaNodes) {
- std::vector<XlaCompiler::Argument> args = BuildAddGraphArguments();
- std::unique_ptr<Graph> graph = BuildAddGraph();
- TF_ASSERT_OK_AND_ASSIGN(
- std::unique_ptr<xla::SessionModule> session_module,
- ConvertTfGraphToXlaSessionModule(args, std::move(graph)));
- TF_ASSERT_OK_AND_ASSIGN(auto xla_nodes,
- ConvertXlaSessionModuleToXlaNodes(*session_module));
- EXPECT_EQ(session_module->entry().requests_size(), xla_nodes.size());
-}
-
-} // namespace xla_tf_graph
-} // namespace tensorflow