diff options
author | 2017-06-16 12:37:05 -0700 | |
---|---|---|
committer | 2017-06-16 12:40:16 -0700 | |
commit | 12efd3d0bbea953e52aee12eb5a3d5d2269ec16a (patch) | |
tree | a129c2898e04d437d716f7a95ed545bc5db07e5b /tensorflow/compiler/xla/legacy_flags | |
parent | 1f7990e6d762a0444b1c0a94c7e70e7cf5c9d0a8 (diff) |
[XLA] Remove gpu/gpu_backend-specific flags
Move useful flags into debug_options, and leave some less used flags out - they
can be propagated through debug_options if required (for now there's too much
duplication between them and what's already inside)
PiperOrigin-RevId: 159261661
Diffstat (limited to 'tensorflow/compiler/xla/legacy_flags')
6 files changed, 17 insertions, 288 deletions
diff --git a/tensorflow/compiler/xla/legacy_flags/BUILD b/tensorflow/compiler/xla/legacy_flags/BUILD index 09101708f2..abaccb81e5 100644 --- a/tensorflow/compiler/xla/legacy_flags/BUILD +++ b/tensorflow/compiler/xla/legacy_flags/BUILD @@ -115,30 +115,6 @@ cc_library( ) cc_library( - name = "gpu_compiler_flags", - srcs = ["gpu_compiler_flags.cc"], - hdrs = ["gpu_compiler_flags.h"], - deps = [ - ":parse_flags_from_env", - "//tensorflow/compiler/xla:types", - "//tensorflow/core:framework_internal", - "//tensorflow/core:lib", - ], -) - -cc_library( - name = "gpu_backend_lib_flags", - srcs = ["gpu_backend_lib_flags.cc"], - hdrs = ["gpu_backend_lib_flags.h"], - deps = [ - ":parse_flags_from_env", - "//tensorflow/compiler/xla:types", - "//tensorflow/core:framework_internal", - "//tensorflow/core:lib", - ], -) - -cc_library( name = "stream_assignment_flags", srcs = ["stream_assignment_flags.cc"], hdrs = ["stream_assignment_flags.h"], diff --git a/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc b/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc index 52b048ec15..5f029a5f53 100644 --- a/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc +++ b/tensorflow/compiler/xla/legacy_flags/debug_options_flags.cc @@ -30,6 +30,10 @@ struct DebugOptionsFlags { int32 xla_backend_optimization_level; bool xla_embed_ir_in_executable; string xla_dump_debug_json_to; + + string xla_gpu_cuda_data_dir; + bool xla_gpu_ftz; + string xla_backend_extra_options; }; @@ -46,9 +50,11 @@ void AllocateFlags() { flag_values->xla_generate_hlo_graph = ""; flag_values->xla_disable_hlo_passes = ""; flag_values->xla_enable_fast_math = true; - flag_values->xla_backend_optimization_level = 2; + flag_values->xla_backend_optimization_level = 3; flag_values->xla_embed_ir_in_executable = false; flag_values->xla_dump_debug_json_to = ""; + flag_values->xla_gpu_cuda_data_dir = "./cuda_sdk_lib"; + flag_values->xla_gpu_ftz = false; flag_values->xla_backend_extra_options = ""; flag_objects = new std::vector<tensorflow::Flag>( @@ -72,6 +78,14 @@ void AllocateFlags() { tensorflow::Flag("xla_embed_ir_in_executable", &flag_values->xla_embed_ir_in_executable, "Embed the compiler IR as a string in the executable."), + tensorflow::Flag("xla_gpu_cuda_data_dir", + &flag_values->xla_gpu_cuda_data_dir, + "If non-empty, speficies a local directory containing " + "ptxas and nvvm libdevice files; otherwise we use " + "those from runfile directories."), + tensorflow::Flag("xla_gpu_ftz", &flag_values->xla_gpu_ftz, + "If true, flush-to-zero semantics are enabled in the " + "code generated for GPUs."), tensorflow::Flag( "xla_dump_debug_json_to", &flag_values->xla_dump_debug_json_to, "Dump compilation artifacts as JSON into this directory."), @@ -110,6 +124,8 @@ xla::DebugOptions GetDebugOptionsFromFlags() { options.set_xla_embed_ir_in_executable( flag_values->xla_embed_ir_in_executable); options.set_xla_dump_debug_json_to(flag_values->xla_dump_debug_json_to); + options.set_xla_gpu_cuda_data_dir(flag_values->xla_gpu_cuda_data_dir); + options.set_xla_gpu_ftz(flag_values->xla_gpu_ftz); std::vector<string> extra_options_parts = tensorflow::str_util::Split(flag_values->xla_backend_extra_options, ','); diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc deleted file mode 100644 index f8f6ea26b1..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Legacy flags for XLA's gpu_backend_lib module. - -#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex. -#include <vector> - -#include "tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h" -#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Pointers to the parsed value of the flags and flag descriptors, initialized -// via flags_init. -static GpuBackendLibFlags* flags; -static std::vector<tensorflow::Flag>* flag_list; -static std::once_flag flags_init; - -// Allocate *flags. Called via call_once(&flags_init,...). -static void AllocateFlags() { - flags = new GpuBackendLibFlags; - flags->dump_temp_products_to = ""; - flags->ftz = false; - flags->fma = true; - flags->verbose_ptx_asm = false; - flags->kernel = ""; - flags->llvm_dump_passes = false; - flags->llvm_cl_opts = ""; - flags->dump_ir_before_passes = false; - flags->opt_level = 3; - flag_list = new std::vector<tensorflow::Flag>({ - tensorflow::Flag("dump_temp_products_to", &flags->dump_temp_products_to, - "dump temporary compilation products to this directory. " - "If empty, no dump is produced"), - tensorflow::Flag("ftz", &flags->ftz, "flush to zero semantics"), - tensorflow::Flag("fma", &flags->fma, "use FMA synthesis"), - tensorflow::Flag("verbose_ptx_asm", &flags->verbose_ptx_asm, - "emit PTX assembly with extra comments"), - tensorflow::Flag("kernel", &flags->kernel, - "only emit the IR and PTX for this kernel"), - tensorflow::Flag("llvm_dump_passes", &flags->llvm_dump_passes, - "dump the passes LLVM runs to stderr"), - tensorflow::Flag( - "llvm_cl_opts", &flags->llvm_cl_opts, - "comma-separated list of command line options to pass to " - "LLVM. For example, --llvm_cl_opts=--print-before=loop-unroll"), - tensorflow::Flag("dump_ir_before_passes", &flags->dump_ir_before_passes, - "dump the IR before each optimization pass in " - "sequentially-named files."), - tensorflow::Flag("opt_level", &flags->opt_level, - "optimization level (default to 3)"), - }); - ParseFlagsFromEnv(*flag_list); -} - -// Append to *append_to flag definitions associated with XLA's gpu_backend_lib -// module. -void AppendGpuBackendLibFlags(std::vector<tensorflow::Flag>* append_to) { - std::call_once(flags_init, &AllocateFlags); - append_to->insert(append_to->end(), flag_list->begin(), flag_list->end()); -} - -// Return a pointer to the GpuBackendLibFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -GpuBackendLibFlags* GetGpuBackendLibFlags() { - std::call_once(flags_init, &AllocateFlags); - return flags; -} - -} // namespace legacy_flags -} // namespace xla diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h deleted file mode 100644 index 31cb50e9da..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_ -#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_ - -// Legacy flags for XLA's gpu_backend_lib module. - -#include <vector> - -#include "tensorflow/compiler/xla/types.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Append to *flag_list flag definitions associated with XLA's gpu_backend_lib -// module. -void AppendGpuBackendLibFlags(std::vector<tensorflow::Flag>* flag_list); - -// The values of flags associated with XLA's gpu_backend_lib module. -typedef struct { - string dump_temp_products_to; // temporary compilation products dir - bool ftz; // flush to zero semantics - bool fma; // use FMA synthesis - bool verbose_ptx_asm; // emit PTX assembly with extra comments - string kernel; // only emit the IR and PTX for this kernel - bool llvm_dump_passes; // dump the passes LLVM runs to stderr - string llvm_cl_opts; // comma-separated list of LLVM options - bool dump_ir_before_passes; // dump IR before each pass - int32 opt_level; // optimization level -} GpuBackendLibFlags; - -// Return a pointer to the GpuBackendLibFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -GpuBackendLibFlags* GetGpuBackendLibFlags(); - -} // namespace legacy_flags -} // namespace xla - -#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_ diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc deleted file mode 100644 index a8af794494..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Legacy flags for XLA's gpu_compiler module. - -#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex. -#include <vector> - -#include "tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h" -#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h" -#include "tensorflow/compiler/xla/types.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Pointers to the parsed value of the flags and flag descriptors, initialized -// via flags_init. -static GpuCompilerFlags* flags; -static std::vector<tensorflow::Flag>* flag_list; -static std::once_flag flags_init; - -// Allocate *flags. Called via call_once(&flags_init,...). -static void AllocateFlags() { - flags = new GpuCompilerFlags; - flags->xla_cuda_data_dir = "./cuda_sdk_lib"; - flag_list = new std::vector<tensorflow::Flag>({ - tensorflow::Flag( - "xla_cuda_data_dir", &flags->xla_cuda_data_dir, - "If non-empty, specifies a local directory containing ptxas and " - "nvvm libdevice files. Otherwise, by default, we use those from " - "runfile directories."), - tensorflow::Flag("xla_ptxas_path", &flags->xla_ptxas_path, - "The path to ptxas. Required to log stats of the ptx."), - }); - ParseFlagsFromEnv(*flag_list); -} - -// Append to *append_to flag definitions associated with XLA's gpu_compiler -// module. -void AppendGpuCompilerFlags(std::vector<tensorflow::Flag>* append_to) { - std::call_once(flags_init, &AllocateFlags); - append_to->insert(append_to->end(), flag_list->begin(), flag_list->end()); -} - -// Return a pointer to the GpuCompilerFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -GpuCompilerFlags* GetGpuCompilerFlags() { - std::call_once(flags_init, &AllocateFlags); - return flags; -} - -} // namespace legacy_flags -} // namespace xla diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h deleted file mode 100644 index 0b0f186b25..0000000000 --- a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_ -#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_ - -// Legacy flags for XLA's gpu_compiler module. - -#include <vector> - -#include "tensorflow/compiler/xla/types.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/util/command_line_flags.h" - -namespace xla { -namespace legacy_flags { - -// Append to *flag_list flag definitions associated with XLA's gpu_compiler -// module. -void AppendGpuCompilerFlags(std::vector<tensorflow::Flag>* flag_list); - -// The values of flags associated with XLA's gpu_compiler module. -typedef struct { - string xla_cuda_data_dir; // If non-empty, specifies a local directory - // containing ptxas and nvvm libdevice files. - // Otherwise, by default, we use those from runfile - // directories. - string xla_ptxas_path; // The path to ptxas. Required to log stats of - // the ptx. -} GpuCompilerFlags; - -// Return a pointer to the GpuCompilerFlags struct; -// repeated calls return the same pointer. -// This should be called only after Flags::Parse() has returned. -GpuCompilerFlags* GetGpuCompilerFlags(); - -} // namespace legacy_flags -} // namespace xla - -#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_ |