diff options
author | 2017-05-25 18:19:02 -0700 | |
---|---|---|
committer | 2017-05-25 18:22:34 -0700 | |
commit | 3e767e9db0e0a00a509354ec18462841ea4d40f2 (patch) | |
tree | b4eaf4f8aadab0a4ffec95e4ce67fe3548353b65 /tensorflow/compiler/xla/legacy_flags | |
parent | ca0d25c2d7f7dea6f35e3dea20c8a755fd58c637 (diff) |
Add debug protos that serialize HLO graph information.
Also add flags to dump this data in JSON format, for each backend.
This is useful for upcoming debugging tools.
PiperOrigin-RevId: 157178357
Diffstat (limited to 'tensorflow/compiler/xla/legacy_flags')
4 files changed, 10 insertions, 0 deletions
diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc index f8ae25552d..1800ed7e7e 100644 --- a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc +++ b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc @@ -40,6 +40,7 @@ static void AllocateFlags() { flags->xla_cpu_llvm_cl_opts = ""; flags->xla_cpu_embed_ir = false; flags->xla_cpu_parallel = false; + flags->xla_cpu_dump_debug_json_to = ""; flag_list = new std::vector<tensorflow::Flag>({ tensorflow::Flag( "xla_cpu_llvm_opt_level", &flags->xla_cpu_llvm_opt_level, @@ -53,6 +54,9 @@ static void AllocateFlags() { "Embed the LLVM IR module string in the resultant CpuExecutable."), tensorflow::Flag("xla_cpu_parallel", &flags->xla_cpu_parallel, "Use the multi-threaded CPU backend."), + tensorflow::Flag("xla_cpu_dump_debug_json_to", + &flags->xla_cpu_dump_debug_json_to, + "Dump debug JSON to this directory."), }); ParseFlagsFromEnv(*flag_list); } diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h index 16a7b68711..01f3876ba4 100644 --- a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h +++ b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h @@ -41,6 +41,7 @@ typedef struct { bool xla_cpu_embed_ir; // Embed the LLVM IR module string in the resultant // CpuExecutable bool xla_cpu_parallel; // Use the multi-threaded CPU backend. + string xla_cpu_dump_debug_json_to; // Dump debug JSON to this directory. } CpuCompilerFlags; // Return a pointer to the CpuCompilerFlags struct; diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc index 7d3ad60aea..131e3ce70a 100644 --- a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc +++ b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc @@ -38,6 +38,7 @@ static void AllocateFlags() { flags = new GpuCompilerFlags; flags->xla_gpu_embed_ir = false; flags->xla_cuda_data_dir = "./cuda_sdk_lib"; + flags->xla_gpu_dump_debug_json_to = ""; flag_list = new std::vector<tensorflow::Flag>({ tensorflow::Flag( "xla_gpu_embed_ir", &flags->xla_gpu_embed_ir, @@ -49,6 +50,9 @@ static void AllocateFlags() { "runfile directories."), tensorflow::Flag("xla_ptxas_path", &flags->xla_ptxas_path, "The path to ptxas. Required to log stats of the ptx."), + tensorflow::Flag("xla_gpu_dump_debug_json_to", + &flags->xla_gpu_dump_debug_json_to, + "Dump debug JSON to this directory."), }); ParseFlagsFromEnv(*flag_list); } diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h index 04ddedab73..0cf39e0ab3 100644 --- a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h +++ b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h @@ -41,6 +41,7 @@ typedef struct { // directories. string xla_ptxas_path; // The path to ptxas. Required to log stats of // the ptx. + string xla_gpu_dump_debug_json_to; // Dump debug JSON to this directory. } GpuCompilerFlags; // Return a pointer to the GpuCompilerFlags struct; |