diff options
author | 2017-06-21 14:43:08 -0700 | |
---|---|---|
committer | 2017-06-21 14:47:13 -0700 | |
commit | f787d718967b3586561287a1506aec03e614d8dd (patch) | |
tree | 0af93296f8fbca60142222c36c5f63828a7c5a5f | |
parent | cda959d92c279f2ec687275d59d8314569df2f65 (diff) |
[XLA] Remove xla_cpu_*_eigen flags from CPU backends.
These flags are currently de-facto unused; parallelism should be controlled
through the cpu_parallel backend. For configuring Eigen, if needed, the options
should be piped more directly to the code.
PiperOrigin-RevId: 159746509
-rw-r--r-- | tensorflow/compiler/aot/BUILD | 1 | ||||
-rw-r--r-- | tensorflow/compiler/aot/tfcompile_main.cc | 2 | ||||
-rw-r--r-- | tensorflow/compiler/tests/BUILD | 1 | ||||
-rw-r--r-- | tensorflow/compiler/xla/legacy_flags/BUILD | 12 | ||||
-rw-r--r-- | tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc | 71 | ||||
-rw-r--r-- | tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h | 51 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/BUILD | 5 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/compiler_functor.cc | 5 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/conv_canonicalization.cc | 6 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc | 11 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc | 11 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/ir_emitter.cc | 7 | ||||
-rw-r--r-- | tensorflow/compiler/xla/tests/BUILD | 42 | ||||
-rw-r--r-- | tensorflow/compiler/xla/tests/dot_operation_test.cc | 2 |
14 files changed, 4 insertions, 223 deletions
diff --git a/tensorflow/compiler/aot/BUILD b/tensorflow/compiler/aot/BUILD index 31637358c3..3e1670dce3 100644 --- a/tensorflow/compiler/aot/BUILD +++ b/tensorflow/compiler/aot/BUILD @@ -128,7 +128,6 @@ cc_library( ":tfcompile_proto", "//tensorflow/compiler/xla/legacy_flags:buffer_assignment_flags", "//tensorflow/compiler/xla/legacy_flags:compiler_functor_flags", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/legacy_flags:debug_options_flags", "//tensorflow/compiler/xla/legacy_flags:hlo_graph_dumper_flags", "//tensorflow/compiler/xla/legacy_flags:service_flags", diff --git a/tensorflow/compiler/aot/tfcompile_main.cc b/tensorflow/compiler/aot/tfcompile_main.cc index e03d28cd96..26a1b203ac 100644 --- a/tensorflow/compiler/aot/tfcompile_main.cc +++ b/tensorflow/compiler/aot/tfcompile_main.cc @@ -25,7 +25,6 @@ limitations under the License. #include "tensorflow/compiler/aot/tfcompile_util.h" #include "tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h" #include "tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h" #include "tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h" #include "tensorflow/compiler/xla/legacy_flags/service_flags.h" @@ -135,7 +134,6 @@ int main(int argc, char** argv) { AppendMainFlags(&flag_list, &flags); xla::legacy_flags::AppendBufferAssignmentFlags(&flag_list); xla::legacy_flags::AppendCompilerFunctorFlags(&flag_list); - xla::legacy_flags::AppendCpuRuntimeFlags(&flag_list); xla::legacy_flags::AppendHloGraphDumperFlags(&flag_list); xla::legacy_flags::AppendDebugOptionsFlags(&flag_list); xla::legacy_flags::AppendServiceFlags(&flag_list); diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD index 044857d422..45642c9aa2 100644 --- a/tensorflow/compiler/tests/BUILD +++ b/tensorflow/compiler/tests/BUILD @@ -535,7 +535,6 @@ tf_library( cpp_class = "LSTMLayerInference", graph = "lstm_layer_inference.pbtxt", tags = ["manual"], - tfcompile_flags = "--xla_cpu_multi_thread_eigen=false", ) # ----------------------------------------------------------------------------- diff --git a/tensorflow/compiler/xla/legacy_flags/BUILD b/tensorflow/compiler/xla/legacy_flags/BUILD index fafd5f591b..ac049c9b7a 100644 --- a/tensorflow/compiler/xla/legacy_flags/BUILD +++ b/tensorflow/compiler/xla/legacy_flags/BUILD @@ -80,18 +80,6 @@ cc_library( ) cc_library( - name = "cpu_runtime_flags", - srcs = ["cpu_runtime_flags.cc"], - hdrs = ["cpu_runtime_flags.h"], - deps = - [ - ":parse_flags_from_env", - "//tensorflow/core:framework_internal", - "//tensorflow/core:lib", - ], -) - -cc_library( name = "compiler_functor_flags", srcs = ["compiler_functor_flags.cc"], hdrs = ["compiler_functor_flags.h"], diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc deleted file mode 100644 index d7817c5d54..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Legacy flags for XLA's cpu_runtime module. - -#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex. -#include <vector> - -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" -#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Pointers to the parsed value of the flags and flag descriptors, initialized -// via flags_init. -static CpuRuntimeFlags* flags; -static std::vector<tensorflow::Flag>* flag_list; -static std::once_flag flags_init; - -// Allocate *flags. Called via call_once(&flags_init,...). -static void AllocateFlags() { - flags = new CpuRuntimeFlags; - flags->xla_cpu_use_eigen = true; - flags->xla_cpu_multi_thread_eigen = true; - flag_list = new std::vector<tensorflow::Flag>({ - tensorflow::Flag( - "xla_cpu_use_eigen", &flags->xla_cpu_use_eigen, - "Use Eigen for matrix multiply on the CPU platform. This " - "is a useful hack for performance comparisons against " - "XLA's implementation."), - tensorflow::Flag( - "xla_cpu_multi_thread_eigen", &flags->xla_cpu_multi_thread_eigen, - "When generating calls to Eigen for matmul and conv, should " - "single or multi-threaded eigen be used? " - "Only used when --xla_cpu_use_eigen is true."), - }); - ParseFlagsFromEnv(*flag_list); -} - -// Append to *append_to flag definitions associated with XLA's cpu_runtime -// module. -void AppendCpuRuntimeFlags(std::vector<tensorflow::Flag>* append_to) { - std::call_once(flags_init, &AllocateFlags); - append_to->insert(append_to->end(), flag_list->begin(), flag_list->end()); -} - -// Return a pointer to the CpuRuntimeFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -CpuRuntimeFlags* GetCpuRuntimeFlags() { - std::call_once(flags_init, &AllocateFlags); - return flags; -} - -} // namespace legacy_flags -} // namespace xla diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h deleted file mode 100644 index e3ff30da36..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_ -#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_ - -// Legacy flags for the XLA's cpu_runtime module. - -#include <vector> - -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Append to *flag_list flag definitions associated with XLA's cpu_runtime -// module. -void AppendCpuRuntimeFlags(std::vector<tensorflow::Flag>* flag_list); - -// The values of flags associated with XLA's cpu_runtime module. -typedef struct { - // Use Eigen for matrix multiply on the CPU platform. This is a useful hack - // for performance comparisons against XLA's implementation. - bool xla_cpu_use_eigen; - // When generating calls to Eigen for matmul and conv, should single or - // multi-threaded eigen be used? Only used when --xla_cpu_use_eigen is true. - bool xla_cpu_multi_thread_eigen; -} CpuRuntimeFlags; - -// Return a pointer to the CpuRuntimeFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -CpuRuntimeFlags* GetCpuRuntimeFlags(); - -} // namespace legacy_flags -} // namespace xla - -#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_ diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD index 68cd545695..93b9281874 100644 --- a/tensorflow/compiler/xla/service/cpu/BUILD +++ b/tensorflow/compiler/xla/service/cpu/BUILD @@ -197,7 +197,6 @@ cc_library( "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla:window_util", "//tensorflow/compiler/xla:xla_data_proto", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/service:buffer_assignment", "//tensorflow/compiler/xla/service:elemental_ir_emitter", "//tensorflow/compiler/xla/service:hlo", @@ -228,7 +227,6 @@ cc_library( "//tensorflow/compiler/xla:types", "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla:xla_data_proto", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/service:hlo", "//tensorflow/compiler/xla/service/llvm_ir:ir_array", "//tensorflow/compiler/xla/service/llvm_ir:llvm_loop", @@ -290,7 +288,6 @@ cc_library( "//tensorflow/compiler/xla:types", "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla/legacy_flags:compiler_functor_flags", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/service/llvm_ir:llvm_util", "//tensorflow/core:lib", "@llvm//:analysis", @@ -485,7 +482,6 @@ cc_library( ":cpu_runtime", "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla:window_util", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/service:hlo", ], ) @@ -512,7 +508,6 @@ cc_library( "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla:xla_data_proto", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/service:hlo", "//tensorflow/compiler/xla/service:hlo_pass", "//tensorflow/core:lib", diff --git a/tensorflow/compiler/xla/service/cpu/compiler_functor.cc b/tensorflow/compiler/xla/service/cpu/compiler_functor.cc index 8ebf9ab110..93895c3482 100644 --- a/tensorflow/compiler/xla/service/cpu/compiler_functor.cc +++ b/tensorflow/compiler/xla/service/cpu/compiler_functor.cc @@ -36,7 +36,6 @@ limitations under the License. #include "external/llvm/include/llvm/Transforms/IPO/AlwaysInliner.h" #include "external/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h" #include "tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/ptr_util.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime_avx.h" @@ -159,9 +158,7 @@ std::vector<llvm::VecDesc> VectorFunctionsForTargetLibraryInfoImpl( // Our vectorized library calls are currently implement by calling into Eigen. // As such, only emit calls to these routines if --xla_cpu_use_eigen is // enabled. - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - if (flags->xla_cpu_use_eigen && - (arch == llvm::Triple::x86 || llvm::Triple::x86_64)) { + if (arch == llvm::Triple::x86 || llvm::Triple::x86_64) { llvm::SmallVector<llvm::StringRef, 32> features; feature_string.split(features, ',', -1, /*KeepEmpty=*/false); if (std::find(features.begin(), features.end(), "+sse4.1") != diff --git a/tensorflow/compiler/xla/service/cpu/conv_canonicalization.cc b/tensorflow/compiler/xla/service/cpu/conv_canonicalization.cc index cdf43587b6..069979c661 100644 --- a/tensorflow/compiler/xla/service/cpu/conv_canonicalization.cc +++ b/tensorflow/compiler/xla/service/cpu/conv_canonicalization.cc @@ -15,7 +15,6 @@ limitations under the License. #include "tensorflow/compiler/xla/service/cpu/conv_canonicalization.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/service/hlo_computation.h" @@ -30,11 +29,6 @@ namespace xla { namespace cpu { StatusOr<bool> ConvCanonicalization::Run(HloModule* module) { - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - if (!flags->xla_cpu_use_eigen) { - return false; - } - bool changed = false; for (HloInstruction* hlo : module->entry_computation()->MakeInstructionPostOrder()) { diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc index 420f9cebc5..a8d2565b64 100644 --- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc @@ -22,7 +22,6 @@ limitations under the License. #include "external/llvm/include/llvm/IR/Instructions.h" #include "external/llvm/include/llvm/IR/Module.h" #include "external/llvm/include/llvm/IR/Value.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h" @@ -233,22 +232,16 @@ tensorflow::Status DotOpEmitter::EmitCallToRuntime() { // The two transpose_... parameters are actually booleans, but we use int32 // to avoid target-dependent calling convention details. - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - bool multi_threaded = flags->xla_cpu_multi_thread_eigen; PrimitiveType type = target_array_.GetShape().element_type(); llvm::Type* float_type; const char* fn_name; switch (type) { case F32: - fn_name = multi_threaded - ? runtime::kEigenMatmulF32SymbolName - : runtime::kEigenSingleThreadedMatmulF32SymbolName; + fn_name = runtime::kEigenMatmulF32SymbolName; float_type = ir_builder_->getFloatTy(); break; case F64: - fn_name = multi_threaded - ? runtime::kEigenMatmulF64SymbolName - : runtime::kEigenSingleThreadedMatmulF64SymbolName; + fn_name = runtime::kEigenMatmulF64SymbolName; float_type = ir_builder_->getDoubleTy(); break; default: diff --git a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc index 2d855d0eb1..859329e2c1 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc +++ b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc @@ -16,7 +16,6 @@ limitations under the License. #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/layout_util.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" #include "tensorflow/compiler/xla/shape_util.h" #include "tensorflow/compiler/xla/window_util.h" @@ -26,11 +25,6 @@ namespace cpu { bool PotentiallyImplementedAsEigenConvolution( const HloInstruction& convolution) { - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - if (!flags->xla_cpu_use_eigen) { - return false; - } - // The following conditions are necessary (but not sufficient) for // implementing `convolution` with Eigen convolution: // - the input and kernel have a non-zero number of elements. @@ -82,11 +76,6 @@ bool AreValidGemmShapes(const Shape& lhs_shape, const Shape& rhs_shape, } // namespace bool PotentiallyImplementedAsEigenDot(const HloInstruction& hlo) { - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - if (!flags->xla_cpu_use_eigen) { - return false; - } - // For certain types of Dot, we can call Eigen if (hlo.opcode() == HloOpcode::kDot) { const Shape& lhs_shape = hlo.operand(0)->shape(); diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc index fee5fd8830..695c8069c4 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc @@ -33,7 +33,6 @@ limitations under the License. #include "external/llvm/include/llvm/IR/Intrinsics.h" #include "external/llvm/include/llvm/IR/LLVMContext.h" #include "tensorflow/compiler/xla/layout_util.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/map_util.h" #include "tensorflow/compiler/xla/service/buffer_assignment.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" @@ -862,11 +861,7 @@ Status IrEmitter::HandleConvolution(HloInstruction* convolution, int64_type, int64_type, int64_type, int64_type, int64_type, int64_type, int64_type, int64_type}, /*isVarArg=*/false); - legacy_flags::CpuRuntimeFlags* flags = legacy_flags::GetCpuRuntimeFlags(); - const char* fn_name = - (flags->xla_cpu_multi_thread_eigen - ? runtime::kEigenConvF32SymbolName - : runtime::kEigenSingleThreadedConvF32SymbolName); + const char* fn_name = runtime::kEigenConvF32SymbolName; llvm::Function* conv_func = llvm::cast<llvm::Function>( module_->getOrInsertFunction(fn_name, conv_type)); conv_func->setCallingConv(llvm::CallingConv::C); diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD index a11ac0bec6..184b6b684e 100644 --- a/tensorflow/compiler/xla/tests/BUILD +++ b/tensorflow/compiler/xla/tests/BUILD @@ -496,7 +496,6 @@ xla_test( "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla/client:computation_builder", "//tensorflow/compiler/xla/client:local_client", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/legacy_flags:debug_options_flags", "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", "//tensorflow/compiler/xla/tests:client_library_test_base", @@ -513,43 +512,6 @@ xla_test( xla_test( name = "dot_operation_runtime_test", srcs = ["dot_operation_test.cc"], - backend_args = { - "cpu": ["--xla_cpu_use_eigen"], - "cpu_parallel": ["--xla_cpu_use_eigen"], - }, - deps = [ - "//tensorflow/compiler/xla:array2d", - "//tensorflow/compiler/xla:array3d", - "//tensorflow/compiler/xla:reference_util", - "//tensorflow/compiler/xla:shape_util", - "//tensorflow/compiler/xla/client:computation_builder", - "//tensorflow/compiler/xla/client:local_client", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", - "//tensorflow/compiler/xla/legacy_flags:debug_options_flags", - "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", - "//tensorflow/compiler/xla/tests:client_library_test_base", - "//tensorflow/compiler/xla/tests:literal_test_util", - "//tensorflow/compiler/xla/tests:test_utils", - "//tensorflow/core:framework_internal", - "//tensorflow/core:lib", - "//tensorflow/core:test", - ], -) - -# Repeat dot_operation_runtime_test with single-threded eigen. -xla_test( - name = "dot_operation_single_threaded_runtime_test", - srcs = ["dot_operation_test.cc"], - backend_args = { - "cpu": [ - "--xla_cpu_use_eigen", - "--xla_cpu_multi_thread_eigen=false", - ], - "cpu_parallel": [ - "--xla_cpu_use_eigen", - "--xla_cpu_multi_thread_eigen=false", - ], - }, deps = [ "//tensorflow/compiler/xla:array2d", "//tensorflow/compiler/xla:array3d", @@ -557,7 +519,6 @@ xla_test( "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla/client:computation_builder", "//tensorflow/compiler/xla/client:local_client", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/legacy_flags:debug_options_flags", "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", "//tensorflow/compiler/xla/tests:client_library_test_base", @@ -574,11 +535,9 @@ xla_test( srcs = ["dot_operation_test.cc"], backend_args = { "cpu": [ - "--xla_cpu_use_eigen", "--xla_default_layout=major2minor", ], "cpu_parallel": [ - "--xla_cpu_use_eigen", "--xla_default_layout=major2minor", ], }, @@ -589,7 +548,6 @@ xla_test( "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla/client:computation_builder", "//tensorflow/compiler/xla/client:local_client", - "//tensorflow/compiler/xla/legacy_flags:cpu_runtime_flags", "//tensorflow/compiler/xla/legacy_flags:debug_options_flags", "//tensorflow/compiler/xla/legacy_flags:layout_util_flags", "//tensorflow/compiler/xla/tests:client_library_test_base", diff --git a/tensorflow/compiler/xla/tests/dot_operation_test.cc b/tensorflow/compiler/xla/tests/dot_operation_test.cc index b06b5c5f47..7abef6a27b 100644 --- a/tensorflow/compiler/xla/tests/dot_operation_test.cc +++ b/tensorflow/compiler/xla/tests/dot_operation_test.cc @@ -20,7 +20,6 @@ limitations under the License. #include "tensorflow/compiler/xla/array3d.h" #include "tensorflow/compiler/xla/client/computation_builder.h" #include "tensorflow/compiler/xla/client/local_client.h" -#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h" #include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h" #include "tensorflow/compiler/xla/legacy_flags/layout_util_flags.h" #include "tensorflow/compiler/xla/primitive_util.h" @@ -461,7 +460,6 @@ int main(int argc, char** argv) { std::vector<tensorflow::Flag> flag_list; xla::legacy_flags::AppendLayoutUtilFlags(&flag_list); xla::legacy_flags::AppendDebugOptionsFlags(&flag_list); - xla::legacy_flags::AppendCpuRuntimeFlags(&flag_list); xla::string usage = tensorflow::Flags::Usage(argv[0], flag_list); const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list); if (!parse_result) { |