aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/legacy_flags
diff options
context:
space:
mode:
authorGravatar Peter Hawkins <phawkins@google.com>2017-01-09 12:04:37 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-09 12:26:35 -0800
commit1e67c90e2caceeff82d09793d1ef5fa0300d219b (patch)
tree6567ea8b0fa01fcfcd608b7e4c636865d33c7032 /tensorflow/compiler/xla/legacy_flags
parent7ad7e4dfae4344d6b955b5eb61dc4b6bb792f1b3 (diff)
Initial open-source release of XLA: Accelerated Linear Algebra.
XLA is a compiler-based linear algebra execution engine that targets CPUs, GPUs and custom accelerators. XLA is still experimental; we are releasing it early to get the community involved. Change: 143990941
Diffstat (limited to 'tensorflow/compiler/xla/legacy_flags')
-rw-r--r--tensorflow/compiler/xla/legacy_flags/BUILD267
-rw-r--r--tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.cc62
-rw-r--r--tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.h46
-rw-r--r--tensorflow/compiler/xla/legacy_flags/backend_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/backend_flags.h46
-rw-r--r--tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h46
-rw-r--r--tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.cc61
-rw-r--r--tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h47
-rw-r--r--tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.h47
-rw-r--r--tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc76
-rw-r--r--tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h54
-rw-r--r--tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc71
-rw-r--r--tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h51
-rw-r--r--tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc91
-rw-r--r--tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h56
-rw-r--r--tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc73
-rw-r--r--tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h54
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h47
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.cc62
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.h48
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.h47
-rw-r--r--tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc107
-rw-r--r--tensorflow/compiler/xla/legacy_flags/layout_util_flags.h62
-rw-r--r--tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.cc67
-rw-r--r--tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.h58
-rw-r--r--tensorflow/compiler/xla/legacy_flags/llvm_util_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/llvm_util_flags.h46
-rw-r--r--tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.cc206
-rw-r--r--tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h66
-rw-r--r--tensorflow/compiler/xla/legacy_flags/parse_flags_from_env_test.cc190
-rw-r--r--tensorflow/compiler/xla/legacy_flags/service_flags.cc100
-rw-r--r--tensorflow/compiler/xla/legacy_flags/service_flags.h69
-rw-r--r--tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.cc63
-rw-r--r--tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.h47
-rw-r--r--tensorflow/compiler/xla/legacy_flags/util_flags.cc62
-rw-r--r--tensorflow/compiler/xla/legacy_flags/util_flags.h45
40 files changed, 2918 insertions, 0 deletions
diff --git a/tensorflow/compiler/xla/legacy_flags/BUILD b/tensorflow/compiler/xla/legacy_flags/BUILD
new file mode 100644
index 0000000000..c98232cdf6
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/BUILD
@@ -0,0 +1,267 @@
+# Legacy command line flags for the XLA libraries.
+
+# Please do not add more flags to this package.
+
+# The XLA libraries were written in an environment that allowed command - line
+# flags to be scattered freely throughout the libraries. This model, while
+# initially convenient, leads to a proliferation in unused commnd line flags in
+# tests and binaries, and serious problems in servers, where one might wish
+# parameters to be different in independent RPC calls to the same routine.
+#
+# Please don't add more flags. If you're a library author, pass options and
+# parameters explicitly through the library's interface.
+
+package(default_visibility = ["//tensorflow:internal"])
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "parse_flags_from_env",
+ srcs = ["parse_flags_from_env.cc"],
+ hdrs = ["parse_flags_from_env.h"],
+ deps =
+ [
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_test(
+ name = "parse_flags_from_env_test",
+ srcs = ["parse_flags_from_env_test.cc"],
+ deps =
+ [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test",
+ ],
+)
+
+cc_library(
+ name = "layout_util_flags",
+ srcs = ["layout_util_flags.cc"],
+ hdrs = ["layout_util_flags.h"],
+ deps =
+ [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "util_flags",
+ srcs = ["util_flags.cc"],
+ hdrs = ["util_flags.h"],
+ deps =
+ [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "cpu_compiler_flags",
+ srcs = ["cpu_compiler_flags.cc"],
+ hdrs = ["cpu_compiler_flags.h"],
+ deps =
+ [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "cpu_runtime_flags",
+ srcs = ["cpu_runtime_flags.cc"],
+ hdrs = ["cpu_runtime_flags.h"],
+ deps =
+ [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "llvm_backend_flags",
+ srcs = ["llvm_backend_flags.cc"],
+ hdrs = ["llvm_backend_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ "@llvm//:core",
+ ],
+)
+
+cc_library(
+ name = "compiler_functor_flags",
+ srcs = ["compiler_functor_flags.cc"],
+ hdrs = ["compiler_functor_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "convolution_thunk_flags",
+ srcs = ["convolution_thunk_flags.cc"],
+ hdrs = ["convolution_thunk_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "gpu_compiler_flags",
+ srcs = ["gpu_compiler_flags.cc"],
+ hdrs = ["gpu_compiler_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "gpu_backend_lib_flags",
+ srcs = ["gpu_backend_lib_flags.cc"],
+ hdrs = ["gpu_backend_lib_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "stream_assignment_flags",
+ srcs = ["stream_assignment_flags.cc"],
+ hdrs = ["stream_assignment_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "hlo_graph_dumper_flags",
+ srcs = ["hlo_graph_dumper_flags.cc"],
+ hdrs = ["hlo_graph_dumper_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "hlo_pass_pipeline_flags",
+ srcs = ["hlo_pass_pipeline_flags.cc"],
+ hdrs = ["hlo_pass_pipeline_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "alias_analysis_flags",
+ srcs = ["alias_analysis_flags.cc"],
+ hdrs = ["alias_analysis_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "llvm_util_flags",
+ srcs = ["llvm_util_flags.cc"],
+ hdrs = ["llvm_util_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "service_flags",
+ srcs = ["service_flags.cc"],
+ hdrs = ["service_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "buffer_assignment_flags",
+ srcs = ["buffer_assignment_flags.cc"],
+ hdrs = ["buffer_assignment_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "hlo_test_base_flags",
+ srcs = ["hlo_test_base_flags.cc"],
+ hdrs = ["hlo_test_base_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
+ name = "backend_flags",
+ srcs = ["backend_flags.cc"],
+ hdrs = ["backend_flags.h"],
+ deps = [
+ ":parse_flags_from_env",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ ],
+)
+
+# -----------------------------------------------------------------------------
+
+filegroup(
+ name = "all_files",
+ srcs = glob(
+ ["**/*"],
+ exclude = [
+ "**/METADATA",
+ "**/OWNERS",
+ ],
+ ),
+ visibility = ["//tensorflow:__subpackages__"],
+)
diff --git a/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.cc b/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.cc
new file mode 100644
index 0000000000..474753c10a
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.cc
@@ -0,0 +1,62 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's alias_analysis module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static AliasAnalysisFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new AliasAnalysisFlags;
+ flags->xla_emit_alias_scope = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_emit_alias_scope", &flags->xla_emit_alias_scope,
+ "Use buffer analysis to refine alias-analysis."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's alias_analysis
+// module.
+void AppendAliasAnalysisFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the AliasAnalysisFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+AliasAnalysisFlags* GetAliasAnalysisFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.h b/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.h
new file mode 100644
index 0000000000..369f8cd7ca
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/alias_analysis_flags.h
@@ -0,0 +1,46 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_ALIAS_ANALYSIS_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_ALIAS_ANALYSIS_FLAGS_H_
+
+// Legacy flags for XLA's alias_analysis module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's alias_analysis
+// module.
+void AppendAliasAnalysisFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's alias_analysis module.
+typedef struct {
+ bool xla_emit_alias_scope; // Use buffer analysis to refine alias-analysis.
+} AliasAnalysisFlags;
+
+// Return a pointer to the AliasAnalysisFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+AliasAnalysisFlags* GetAliasAnalysisFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_ALIAS_ANALYSIS_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/backend_flags.cc b/tensorflow/compiler/xla/legacy_flags/backend_flags.cc
new file mode 100644
index 0000000000..7c007f4435
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/backend_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's backend module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/backend_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static BackendFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new BackendFlags;
+ // TODO(b/32648682): Decide if this should continue to be a flag longer term.
+ flags->xla_replicas = 1;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_replicas", &flags->xla_replicas,
+ "The number of replicas to use. 1 means no replication."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's backend module.
+void AppendBackendFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the BackendFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BackendFlags* GetBackendFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/backend_flags.h b/tensorflow/compiler/xla/legacy_flags/backend_flags.h
new file mode 100644
index 0000000000..061238b7e6
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/backend_flags.h
@@ -0,0 +1,46 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BACKEND_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BACKEND_FLAGS_H_
+
+// Legacy flags for XLA's backend module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's backend module.
+void AppendBackendFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's backend module.
+typedef struct {
+ int64 xla_replicas; // The number of replicas to use. 1 means no
+ // replication.
+} BackendFlags;
+
+// Return a pointer to the BackendFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BackendFlags* GetBackendFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BACKEND_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.cc b/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.cc
new file mode 100644
index 0000000000..71873f73af
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's buffer_assignment module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static BufferAssignmentFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new BufferAssignmentFlags;
+ flags->xla_enable_buffer_reuse = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_enable_buffer_reuse",
+ &flags->xla_enable_buffer_reuse,
+ "Enable reuse of buffers."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's buffer_assignment
+// module.
+void AppendBufferAssignmentFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the BufferAssignmentFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BufferAssignmentFlags* GetBufferAssignmentFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h b/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h
new file mode 100644
index 0000000000..5f098c2663
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/buffer_assignment_flags.h
@@ -0,0 +1,46 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BUFFER_ASSIGNMENT_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BUFFER_ASSIGNMENT_FLAGS_H_
+
+// Legacy flags for XLA's buffer_assignment module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's buffer_assignment
+// module.
+void AppendBufferAssignmentFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's buffer_assignment module.
+typedef struct {
+ bool xla_enable_buffer_reuse; // Enable reuse of buffers.
+} BufferAssignmentFlags;
+
+// Return a pointer to the BufferAssignmentFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+BufferAssignmentFlags* GetBufferAssignmentFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_BUFFER_ASSIGNMENT_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.cc b/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.cc
new file mode 100644
index 0000000000..617a9b712e
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.cc
@@ -0,0 +1,61 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's compiler_functor module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static CompilerFunctorFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new CompilerFunctorFlags;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_debug_cpu_dump_ir", &flags->xla_debug_cpu_dump_ir,
+ "Dump IR, before optimizations to a path"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's compiler_functor
+// module.
+void AppendCompilerFunctorFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the CompilerFunctorFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CompilerFunctorFlags* GetCompilerFunctorFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h b/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h
new file mode 100644
index 0000000000..28b505ec5e
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/compiler_functor_flags.h
@@ -0,0 +1,47 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_COMPILER_FUNCTOR_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_COMPILER_FUNCTOR_FLAGS_H_
+
+// Legacy flags for the XLA's compiler_functor module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's compiler_functor
+// module.
+void AppendCompilerFunctorFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's compiler_functor module.
+typedef struct {
+ string xla_debug_cpu_dump_ir; // Dump IR, before optimizations to a path
+} CompilerFunctorFlags;
+
+// Return a pointer to the CompilerFunctorFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CompilerFunctorFlags* GetCompilerFunctorFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_COMPILER_FUNCTOR_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.cc b/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.cc
new file mode 100644
index 0000000000..fe5d19147f
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's convolution_thunk module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static ConvolutionThunkFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new ConvolutionThunkFlags;
+ flags->xla_gpu_autotune_convolution_algorithm = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_gpu_autotune_convolution_algorithm",
+ &flags->xla_gpu_autotune_convolution_algorithm,
+ "Auto-tune the algorithm used by convolution"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's convolution_thunk
+// module.
+void AppendConvolutionThunkFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the ConvolutionThunkFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+ConvolutionThunkFlags* GetConvolutionThunkFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.h b/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.h
new file mode 100644
index 0000000000..53d6806a71
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/convolution_thunk_flags.h
@@ -0,0 +1,47 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CONVOLUTION_THUNK_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CONVOLUTION_THUNK_FLAGS_H_
+
+// Legacy flags for XLA's convolution_thunk module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's convolution_thunk
+// module.
+void AppendConvolutionThunkFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's convolution_thunk module.
+typedef struct {
+ // Auto-tune the algorithm used by convolution
+ bool xla_gpu_autotune_convolution_algorithm;
+} ConvolutionThunkFlags;
+
+// Return a pointer to the ConvolutionThunkFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+ConvolutionThunkFlags* GetConvolutionThunkFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CONVOLUTION_THUNK_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc
new file mode 100644
index 0000000000..f8ae25552d
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.cc
@@ -0,0 +1,76 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's cpu_compiler module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static CpuCompilerFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new CpuCompilerFlags;
+ flags->xla_cpu_llvm_opt_level = 2;
+ flags->xla_cpu_llvm_cl_opts = "";
+ flags->xla_cpu_embed_ir = false;
+ flags->xla_cpu_parallel = false;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_cpu_llvm_opt_level", &flags->xla_cpu_llvm_opt_level,
+ "The LLVM optimization level for the CPU XLA backend. "
+ "Valid range is from 0 to 3 where 0 means no optimizations."),
+ tensorflow::Flag(
+ "xla_cpu_llvm_cl_opts", &flags->xla_cpu_llvm_cl_opts,
+ "Comma-separated list of command line options to pass to LLVM."),
+ tensorflow::Flag(
+ "xla_cpu_embed_ir", &flags->xla_cpu_embed_ir,
+ "Embed the LLVM IR module string in the resultant CpuExecutable."),
+ tensorflow::Flag("xla_cpu_parallel", &flags->xla_cpu_parallel,
+ "Use the multi-threaded CPU backend."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's cpu_compiler
+// module.
+void AppendCpuCompilerFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the CpuCompilerFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CpuCompilerFlags* GetCpuCompilerFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h
new file mode 100644
index 0000000000..16a7b68711
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/cpu_compiler_flags.h
@@ -0,0 +1,54 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_COMPILER_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_COMPILER_FLAGS_H_
+
+// Legacy flags for the XLA's cpu_compiler module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's cpu_compiler
+// module.
+void AppendCpuCompilerFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's cpu_compiler module.
+typedef struct {
+ // The LLVM optimization level for the CPU XLA backend.
+ // Valid range is from 0 to 3 where 0 means no optimizations.
+ int32 xla_cpu_llvm_opt_level;
+ string xla_cpu_llvm_cl_opts; // Comma-separated list of command line options
+ // to pass to LLVM.
+ bool xla_cpu_embed_ir; // Embed the LLVM IR module string in the resultant
+ // CpuExecutable
+ bool xla_cpu_parallel; // Use the multi-threaded CPU backend.
+} CpuCompilerFlags;
+
+// Return a pointer to the CpuCompilerFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CpuCompilerFlags* GetCpuCompilerFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_COMPILER_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc
new file mode 100644
index 0000000000..d7817c5d54
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.cc
@@ -0,0 +1,71 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's cpu_runtime module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static CpuRuntimeFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new CpuRuntimeFlags;
+ flags->xla_cpu_use_eigen = true;
+ flags->xla_cpu_multi_thread_eigen = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_cpu_use_eigen", &flags->xla_cpu_use_eigen,
+ "Use Eigen for matrix multiply on the CPU platform. This "
+ "is a useful hack for performance comparisons against "
+ "XLA's implementation."),
+ tensorflow::Flag(
+ "xla_cpu_multi_thread_eigen", &flags->xla_cpu_multi_thread_eigen,
+ "When generating calls to Eigen for matmul and conv, should "
+ "single or multi-threaded eigen be used? "
+ "Only used when --xla_cpu_use_eigen is true."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's cpu_runtime
+// module.
+void AppendCpuRuntimeFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the CpuRuntimeFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CpuRuntimeFlags* GetCpuRuntimeFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h
new file mode 100644
index 0000000000..e3ff30da36
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/cpu_runtime_flags.h
@@ -0,0 +1,51 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_
+
+// Legacy flags for the XLA's cpu_runtime module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's cpu_runtime
+// module.
+void AppendCpuRuntimeFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's cpu_runtime module.
+typedef struct {
+ // Use Eigen for matrix multiply on the CPU platform. This is a useful hack
+ // for performance comparisons against XLA's implementation.
+ bool xla_cpu_use_eigen;
+ // When generating calls to Eigen for matmul and conv, should single or
+ // multi-threaded eigen be used? Only used when --xla_cpu_use_eigen is true.
+ bool xla_cpu_multi_thread_eigen;
+} CpuRuntimeFlags;
+
+// Return a pointer to the CpuRuntimeFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+CpuRuntimeFlags* GetCpuRuntimeFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_CPU_RUNTIME_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc
new file mode 100644
index 0000000000..c355b1ed9b
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.cc
@@ -0,0 +1,91 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's gpu_backend_lib module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static GpuBackendLibFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new GpuBackendLibFlags;
+ flags->dump_temp_products_to = "";
+ flags->ftz = false;
+ flags->fma = true;
+ flags->gpu_architecture = "compute_35";
+ flags->verbose_ptx_asm = false;
+ flags->kernel = "";
+ flags->llvm_dump_passes = false;
+ flags->llvm_cl_opts = "";
+ flags->dump_ir_before_passes = false;
+ flags->opt_level = 3;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("dump_temp_products_to", &flags->dump_temp_products_to,
+ "dump temporary compilation products to this directory. "
+ "If empty, no dump is produced"),
+ tensorflow::Flag("ftz", &flags->ftz, "flush to zero semantics"),
+ tensorflow::Flag("fma", &flags->fma, "use FMA synthesis"),
+ tensorflow::Flag("gpu_architecture", &flags->gpu_architecture,
+ "GPU architecture"),
+ tensorflow::Flag("verbose_ptx_asm", &flags->verbose_ptx_asm,
+ "emit PTX assembly with extra comments"),
+ tensorflow::Flag("kernel", &flags->kernel,
+ "only emit the IR and PTX for this kernel"),
+ tensorflow::Flag("llvm_dump_passes", &flags->llvm_dump_passes,
+ "dump the passes LLVM runs to stderr"),
+ tensorflow::Flag(
+ "llvm_cl_opts", &flags->llvm_cl_opts,
+ "comma-separated list of command line options to pass to "
+ "LLVM. For example, --llvm_cl_opts=--print-before=loop-unroll"),
+ tensorflow::Flag("dump_ir_before_passes", &flags->dump_ir_before_passes,
+ "dump the IR before each optimization pass in "
+ "sequentially-named files."),
+ tensorflow::Flag("opt_level", &flags->opt_level,
+ "optimization level (default to 3)"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's gpu_backend_lib
+// module.
+void AppendGpuBackendLibFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the GpuBackendLibFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+GpuBackendLibFlags* GetGpuBackendLibFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h
new file mode 100644
index 0000000000..fbb8863454
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/gpu_backend_lib_flags.h
@@ -0,0 +1,56 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_
+
+// Legacy flags for XLA's gpu_backend_lib module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's gpu_backend_lib
+// module.
+void AppendGpuBackendLibFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's gpu_backend_lib module.
+typedef struct {
+ string dump_temp_products_to; // temporary compilation products dir
+ bool ftz; // flush to zero semantics
+ bool fma; // use FMA synthesis
+ string gpu_architecture; // GPU architecture
+ bool verbose_ptx_asm; // emit PTX assembly with extra comments
+ string kernel; // only emit the IR and PTX for this kernel
+ bool llvm_dump_passes; // dump the passes LLVM runs to stderr
+ string llvm_cl_opts; // comma-separated list of LLVM options
+ bool dump_ir_before_passes; // dump IR before each pass
+ int32 opt_level; // optimization level
+} GpuBackendLibFlags;
+
+// Return a pointer to the GpuBackendLibFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+GpuBackendLibFlags* GetGpuBackendLibFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_BACKEND_LIB_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc
new file mode 100644
index 0000000000..e79d363509
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.cc
@@ -0,0 +1,73 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's gpu_compiler module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static GpuCompilerFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new GpuCompilerFlags;
+ flags->xla_gpu_embed_ir = false;
+ flags->xla_cuda_data_dir = "./cuda_sdk_lib";
+ flags->xla_ptxas_path = "/usr/local/cuda/bin/ptxas";
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_gpu_embed_ir", &flags->xla_gpu_embed_ir,
+ "Embed the LLVM IR module string in the resultant GpuExecutable."),
+ tensorflow::Flag(
+ "xla_cuda_data_dir", &flags->xla_cuda_data_dir,
+ "If non-empty, specifies a local directory containing ptxas and "
+ "nvvm libdevice files. Otherwise, by default, we use those from "
+ "runfile directories."),
+ tensorflow::Flag("xla_ptxas_path", &flags->xla_ptxas_path,
+ "The path to ptxas. Required to log stats of the ptx."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's gpu_compiler
+// module.
+void AppendGpuCompilerFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the GpuCompilerFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+GpuCompilerFlags* GetGpuCompilerFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h
new file mode 100644
index 0000000000..04ddedab73
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/gpu_compiler_flags.h
@@ -0,0 +1,54 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_
+
+// Legacy flags for XLA's gpu_compiler module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's gpu_compiler
+// module.
+void AppendGpuCompilerFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's gpu_compiler module.
+typedef struct {
+ bool xla_gpu_embed_ir; // Embed the LLVM IR module string in the resultant
+ // GpuExecutable.
+ string xla_cuda_data_dir; // If non-empty, specifies a local directory
+ // containing ptxas and nvvm libdevice files.
+ // Otherwise, by default, we use those from runfile
+ // directories.
+ string xla_ptxas_path; // The path to ptxas. Required to log stats of
+ // the ptx.
+} GpuCompilerFlags;
+
+// Return a pointer to the GpuCompilerFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+GpuCompilerFlags* GetGpuCompilerFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_GPU_COMPILER_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.cc b/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.cc
new file mode 100644
index 0000000000..8822f6f610
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's hlo_graph_dumper module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static HloGraphDumperFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new HloGraphDumperFlags;
+ flags->xla_hlo_dump_graph_path = "/tmp/";
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_hlo_dump_graph_path",
+ &flags->xla_hlo_dump_graph_path,
+ "Path to write dumped HLO graphs to"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's hlo_graph_dumper
+// module.
+void AppendHloGraphDumperFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the HloGraphDumperFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloGraphDumperFlags* GetHloGraphDumperFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h b/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h
new file mode 100644
index 0000000000..b6dfced87c
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_graph_dumper_flags.h
@@ -0,0 +1,47 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_GRAPH_DUMPER_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_GRAPH_DUMPER_FLAGS_H_
+
+// Legacy flags for XLA's hlo_graph_dumper module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's hlo_graph_dumper
+// module.
+void AppendHloGraphDumperFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's hlo_graph_dumper module.
+typedef struct {
+ string xla_hlo_dump_graph_path; // Path to write dumped HLO graphs to
+} HloGraphDumperFlags;
+
+// Return a pointer to the HloGraphDumperFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloGraphDumperFlags* GetHloGraphDumperFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_GRAPH_DUMPER_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.cc b/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.cc
new file mode 100644
index 0000000000..edc04d51a7
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.cc
@@ -0,0 +1,62 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's hlo_pass_pipeline module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static HloPassPipelineFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new HloPassPipelineFlags;
+ flags->xla_disable_hlo_passes = "";
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_disable_hlo_passes", &flags->xla_disable_hlo_passes,
+ "Comma-separated list of HLO passes to disable."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's hlo_pass_pipeline
+// module.
+void AppendHloPassPipelineFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the HloPassPipelineFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloPassPipelineFlags* GetHloPassPipelineFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.h b/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.h
new file mode 100644
index 0000000000..520759bbf0
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_pass_pipeline_flags.h
@@ -0,0 +1,48 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_
+
+// Legacy flags for XLA's hlo_pass_pipeline module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's hlo_pass_pipeline
+// module.
+void AppendHloPassPipelineFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's hlo_pass_pipeline module.
+typedef struct {
+ // Comma-separated list of HLO passes to disable.
+ string xla_disable_hlo_passes;
+} HloPassPipelineFlags;
+
+// Return a pointer to the HloPassPipelineFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloPassPipelineFlags* GetHloPassPipelineFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_PASS_PIPELINE_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.cc b/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.cc
new file mode 100644
index 0000000000..c7893c1385
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's hlo_test_base module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static HloTestBaseFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new HloTestBaseFlags;
+ flags->xla_hlo_test_generate_hlo_graph = false;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_hlo_test_generate_hlo_graph",
+ &flags->xla_hlo_test_generate_hlo_graph,
+ "Generate graph output of HLO instructions"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's hlo_test_base
+// module.
+void AppendHloTestBaseFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the HloTestBaseFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloTestBaseFlags* GetHloTestBaseFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.h b/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.h
new file mode 100644
index 0000000000..23b808cecb
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/hlo_test_base_flags.h
@@ -0,0 +1,47 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_TEST_BASE_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_TEST_BASE_FLAGS_H_
+
+// Legacy flags for XLA's hlo_test_base module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's hlo_test_base
+// module.
+void AppendHloTestBaseFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's hlo_test_base module.
+typedef struct {
+ bool xla_hlo_test_generate_hlo_graph; // Generate graph output of HLO
+ // instructions
+} HloTestBaseFlags;
+
+// Return a pointer to the HloTestBaseFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+HloTestBaseFlags* GetHloTestBaseFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_HLO_TEST_BASE_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc
new file mode 100644
index 0000000000..4242b501d4
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc
@@ -0,0 +1,107 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's layout_util module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/layout_util_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/lib/strings/numbers.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the string value of the xla_default_layout flag and the flag
+// descriptor, initialized via raw_flags_init.
+static string* raw_flag;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag raw_flags_init;
+
+// Allocate *raw_flag. Called via call_once(&raw_flags_init,...).
+static void AllocateRawFlag() {
+ raw_flag = new string;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_default_layout", raw_flag,
+ "Default layout for Shapes in XLA. Valid values are: "
+ "'minor2major', 'major2minor', 'random', 'random:<seed>'. "
+ "For debugging purposes. If no seed (or 0) is given, a seed from "
+ "random_device is used."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Parse text into *layout.
+static bool ParseDefaultLayout(const string& text, DefaultLayout* layout) {
+ bool result = true;
+ std::vector<string> field = tensorflow::str_util::Split(text, ':');
+ if (field.size() > 0) {
+ if (field[0] == "random") {
+ layout->dimension_order = DefaultLayout::DimensionOrder::kRandom;
+ if (field.size() > 1) {
+ uint64 seed = 0;
+ result = tensorflow::strings::safe_strtou64(field[1], &seed);
+ layout->seed = seed;
+ }
+ } else if (field[0] == "minor2major") {
+ layout->dimension_order = DefaultLayout::DimensionOrder::kMinorToMajor;
+ } else if (field[0] == "major2minor") {
+ layout->dimension_order = DefaultLayout::DimensionOrder::kMajorToMinor;
+ } else {
+ result = false;
+ }
+ }
+ return result;
+}
+
+// Pointer to the parsed value of the flags, initialized via flags_init.
+static LayoutUtilFlags* flags;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ std::call_once(raw_flags_init, &AllocateRawFlag);
+ flags = new LayoutUtilFlags;
+ flags->xla_default_layout.dimension_order =
+ DefaultLayout::DimensionOrder::kMajorToMinor;
+ flags->xla_default_layout.seed = 0;
+ if (!ParseDefaultLayout(*raw_flag, &flags->xla_default_layout)) {
+ flags = nullptr;
+ }
+}
+
+// Append to *append_to the flag definitions associated with XLA's layout_util
+// module.
+void AppendLayoutUtilFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(raw_flags_init, &AllocateRawFlag);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the LayoutUtilFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+LayoutUtilFlags* GetLayoutUtilFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/layout_util_flags.h b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.h
new file mode 100644
index 0000000000..177f428b73
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.h
@@ -0,0 +1,62 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LAYOUT_UTIL_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LAYOUT_UTIL_FLAGS_H_
+
+// Legacy flags for the XLA's layout_util module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// The default layout for all newly created shapes. Specified by the flag
+// --xla_default_layout.
+struct DefaultLayout {
+ enum class DimensionOrder {
+ kRandom,
+ kMinorToMajor,
+ kMajorToMinor,
+ };
+
+ DimensionOrder dimension_order;
+ size_t seed;
+};
+
+// Append to *flag_list the flag definitions associated with XLA's layout_util
+// module.
+void AppendLayoutUtilFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's layout_util module.
+typedef struct {
+ // Default layout for Shapes in XLA. Valid values are: 'minor2major',
+ // 'major2minor', 'random', 'random:<seed>'. For debugging purposes. If no
+ // seed (or 0) is given, a seed from random_device is used.
+ DefaultLayout xla_default_layout;
+} LayoutUtilFlags;
+
+// Return a pointer to the LayoutFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+LayoutUtilFlags* GetLayoutUtilFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LAYOUT_UTIL_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.cc b/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.cc
new file mode 100644
index 0000000000..c8a71b284f
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.cc
@@ -0,0 +1,67 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags associated with XLA's use of LLVM for code generation.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static LlvmBackendFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new LlvmBackendFlags;
+ flags->xla_fast_math = true;
+ flags->xla_precision_losing_optimizations = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_precision_losing_optimizations",
+ &flags->xla_precision_losing_optimizations,
+ "Allows llvm to make transformations that reduce the precision of "
+ "floating-point computations. This is equivalent to clang's "
+ "-funsafe-math-optimizations flag."),
+ tensorflow::Flag(
+ "xla_fast_math", &flags->xla_fast_math,
+ "Allows llvm to make all manner of unsafe floating-point "
+ "optimizations, including assuming that NaN and Inf don't appear. "
+ "This is equivalent to clang's -ffast-math flag."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+void AppendLlvmBackendFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+LlvmBackendFlags* GetLlvmBackendFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.h b/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.h
new file mode 100644
index 0000000000..e8c0489285
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/llvm_backend_flags.h
@@ -0,0 +1,58 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_BACKEND_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_BACKEND_FLAGS_H_
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's use of LLVM for
+// code generation.
+void AppendLlvmBackendFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's use of LLVM for code generation.
+typedef struct {
+ // Allows llvm to make transformations that reduce the precision of
+ // floating-point computations, but it *does not* allow it to disregard signed
+ // zero or assume that NaN and Inf never appear.
+ //
+ // Controls the "UnsafeFPMath" LLVM target option and
+ // llvm::FastMathFlags::allowReciprocal. This is equivalent to clang's
+ // -funsafe-math-optimizations flag.
+ bool xla_precision_losing_optimizations;
+
+ // Unleashes the full power of LLVM's unsafe floating-point optimizations.
+ // Everything is fair game, including disregarding signed zero and assuming
+ // that NaN and Inf never appear.
+ //
+ // This implies xla_precision_losing_optimizations, and is equivalent to
+ // clang's -ffast-math flag.
+ bool xla_fast_math;
+} LlvmBackendFlags;
+
+// Return a pointer to the LlvmBackendFlags struct. Repeated calls return the
+// same pointer. This should be called only after Flags::Parse() has returned.
+LlvmBackendFlags* GetLlvmBackendFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_BACKEND_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.cc b/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.cc
new file mode 100644
index 0000000000..3c53729a67
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's llvm_util module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/llvm_util_flags.h"
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static LlvmUtilFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new LlvmUtilFlags;
+ flags->xla_emit_tbaa = true;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_emit_tbaa", &flags->xla_emit_tbaa,
+ "Perform type-based alias analysis optimizations for "
+ "LLVM-based backends."),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's llvm_util
+// module.
+void AppendLlvmUtilFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the LlvmUtilFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+LlvmUtilFlags* GetLlvmUtilFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.h b/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.h
new file mode 100644
index 0000000000..98da26b4b8
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/llvm_util_flags.h
@@ -0,0 +1,46 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_UTIL_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_UTIL_FLAGS_H_
+
+// Legacy flags for XLA's llvm_util module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's llvm_util module.
+void AppendLlvmUtilFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's llvm_util module.
+typedef struct {
+ bool xla_emit_tbaa; // Perform type-based alias analysis optimizations for
+ // LLVM-based backends.
+} LlvmUtilFlags;
+
+// Return a pointer to the LlvmUtilFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+LlvmUtilFlags* GetLlvmUtilFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_LLVM_UTIL_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.cc b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.cc
new file mode 100644
index 0000000000..2a4e49b05a
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.cc
@@ -0,0 +1,206 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// This module exports ParseFlagsFromEnv(), which allows other modules to parse
+// flags from an environtment variable, or a file named by the environment
+// variable.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+static const char kEnvVar[] = "TF_XLA_FLAGS"; // environment variable queried
+static const char kWS[] = " \t\r\n"; // whitespace
+
+// The following struct represents an argv[]-style array, parsed
+// from data gleaned from the environment.
+//
+// As usual, an anonymous namespace is advisable to avoid
+// constructor/destructor collisions with other "private" types
+// in the same named namespace.
+namespace {
+struct EnvArgv {
+ EnvArgv() : initialized(false), argc(0) {}
+ bool initialized; // whether the other fields have been set.
+ int argc; // elements used in argv[]
+ std::vector<char*> argv; // flag arguments parsed from environment string.
+ std::vector<char*> argv_save; // saved values from argv[] to avoid leaks
+};
+} // anonymous namespace
+
+// Append the string s0[0, .., s0len-1] concatenated with s1[0, .., s1len-1] as
+// a newly allocated nul-terminated string to the array *a. If s0==nullptr, a
+// nullptr is appended without increasing a->argc.
+static void AppendToEnvArgv(const char* s0, size_t s0len, const char* s1,
+ size_t s1len, EnvArgv* a) {
+ if (s0 == nullptr) {
+ a->argv.push_back(nullptr);
+ a->argv_save.push_back(nullptr);
+ } else {
+ string s = string(s0, s0len) + string(s1, s1len);
+ char* str = strdup(s.c_str());
+ a->argv.push_back(str);
+ a->argv_save.push_back(str);
+ a->argc++;
+ }
+}
+
+// Like s.find_first_of(x, pos), but return s.size() when find_first_of() would
+// return string::npos. This avoids if-statements elsewhere.
+static size_t FindFirstOf(const string& s, const char* x, size_t pos) {
+ size_t result = s.find_first_of(x, pos);
+ return result == string::npos ? s.size() : result;
+}
+
+// Like s.find_first_not_of(x, pos), but return s.size() when
+// find_first_not_of() would return string::npos. This avoids if-statements
+// elsewhere.
+static size_t FindFirstNotOf(const string& s, const char* x, size_t pos) {
+ size_t result = s.find_first_not_of(x, pos);
+ return result == string::npos ? s.size() : result;
+}
+
+// Given a string containing flags, parse them into the XLA command line flags.
+// The parse is best effort, and gives up on the first syntax error.
+static void ParseArgvFromString(const string& flag_str, EnvArgv* a) {
+ size_t b = FindFirstNotOf(flag_str, kWS, 0);
+ while (b != flag_str.size() && flag_str[b] == '-') {
+ // b is the index of the start of a flag.
+ // Set e to the index just past the end of the flag.
+ size_t e = b;
+ while (e != flag_str.size() && isascii(flag_str[e]) &&
+ (strchr("-_", flag_str[e]) != nullptr || isalnum(flag_str[e]))) {
+ e++;
+ }
+ if (e != flag_str.size() && flag_str[e] == '=' &&
+ e + 1 != flag_str.size() && strchr("'\"", flag_str[e + 1]) != nullptr) {
+ // A flag of the form --flag="something in double or single quotes"
+ int c;
+ e++; // point just past '='
+ size_t eflag = e;
+ char quote = flag_str[e];
+ e++; // point just past quote
+ // Put in value the string with quotes removed.
+ string value;
+ for (; e != flag_str.size() && (c = flag_str[e]) != quote; e++) {
+ if (quote == '"' && c == '\\' && e + 1 != flag_str.size()) {
+ // Handle backslash in double quoted strings. They are literal in
+ // single-quoted strings.
+ e++;
+ c = flag_str[e];
+ }
+ value += c;
+ }
+ if (e != flag_str.size()) { // skip final " or '
+ e++;
+ }
+ AppendToEnvArgv(flag_str.data() + b, eflag - b, value.data(),
+ value.size(), a);
+ } else { // A flag without a quoted value.
+ e = FindFirstOf(flag_str, kWS, e);
+ AppendToEnvArgv(flag_str.data() + b, e - b, "", 0, a);
+ }
+ b = FindFirstNotOf(flag_str, kWS, e);
+ }
+}
+
+// Call ParseArgvFromString(..., a) on a string derived from the setting of an
+// environment variable kEnvVar, or a file it points to.
+static void SetArgvFromEnv(EnvArgv* a) {
+ if (!a->initialized) {
+ static const char kDummyArgv[] = "<argv[0]>";
+ AppendToEnvArgv(kDummyArgv, strlen(kDummyArgv), nullptr, 0,
+ a); // dummy argv[0]
+ const char* env = getenv(kEnvVar);
+ if (env == nullptr || env[0] == '\0') {
+ // nothing
+ } else if (env[strspn(env, kWS)] == '-') { // flags in env var value
+ ParseArgvFromString(env, a);
+ } else { // assume it's a file name
+ FILE* fp = fopen(env, "r");
+ if (fp != nullptr) {
+ string str;
+ char buf[512];
+ int n;
+ while ((n = fread(buf, 1, sizeof(buf), fp)) > 0) {
+ str.append(buf, n);
+ }
+ fclose(fp);
+ ParseArgvFromString(str, a);
+ }
+ }
+ AppendToEnvArgv(nullptr, 0, nullptr, 0, a); // add trailing nullptr to *a.
+ a->initialized = true;
+ }
+}
+
+// The simulated argv[] parsed from the environment.
+static EnvArgv* env_argv;
+
+// Used to protect accesses to env_argv.
+static tensorflow::mutex env_argv_mu(tensorflow::LINKER_INITIALIZED);
+
+// Call Flags::Parse(argc, argv, flag_list) against any as yet unrecognized
+// flags passed in from the environment.
+bool ParseFlagsFromEnv(const std::vector<tensorflow::Flag>& flag_list) {
+ env_argv_mu.lock();
+ if (env_argv == nullptr) {
+ env_argv = new EnvArgv;
+ }
+ SetArgvFromEnv(env_argv); // a no-op if already initialized
+ bool result =
+ tensorflow::Flags::Parse(&env_argv->argc, &env_argv->argv[0], flag_list);
+ env_argv_mu.unlock();
+ return result;
+}
+
+// Testing only.
+// Reset the env_argv struct so that subsequent calls to ParseFlagsFromEnv()
+// will parse the environment variable (or the file it points to) anew, and set
+// *pargc, and *pargv to point to the internal locations of the argc and argv
+// constructed from the environment.
+void ResetFlagsFromEnvForTesting(int** pargc, std::vector<char*>** pargv) {
+ env_argv_mu.lock();
+ if (env_argv == nullptr) {
+ env_argv = new EnvArgv;
+ }
+ if (!env_argv->argv_save.empty()) {
+ for (int i = 0; env_argv->argv_save[i] != nullptr; i++) {
+ free(env_argv->argv_save[i]);
+ }
+ }
+ env_argv->initialized = false;
+ env_argv->argc = 0;
+ env_argv->argv.clear();
+ env_argv->argv_save.clear();
+ env_argv_mu.unlock();
+ *pargc = &env_argv->argc;
+ *pargv = &env_argv->argv;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h
new file mode 100644
index 0000000000..b54482ad2b
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h
@@ -0,0 +1,66 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_PARSE_FLAGS_FROM_ENV_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_PARSE_FLAGS_FROM_ENV_H_
+
+// This module exports ParseFlagsFromEnv(), which allows other modules to parse
+// flags from the environtment variable TF_XLA_FLAGS, or (if the first
+// non-whitespace in the variable value is not '-'), a file named by that
+// environment variable. The accepted syntax is that flags arguments are of
+// the form --flag=value or (for boolean flags) --flag, and are whitespace
+// separated. The <value> may be one of:
+// - <non-whitespace, non-nul not starting with single-quote or double-quote>
+// in which case the effective value is the string itself
+// - <single-quote><characters string not containing nul or
+// single-quote><single_quote> in which case the effective value is the
+// string with the single-quotes removed
+// - <double-quote><character string not containing nul or unesecaped
+// double-quote><double_quote> in which case the effective value if the
+// string with the double-quotes removed, and escaped sequences of
+// <backslash><char> replaced by <char>.
+//
+// Flags values inconsistent with the type of the flag will be rejected by the
+// flag parser.
+//
+// Examples:
+// TF_XLA_FLAGS="--foo=bar --wombat='value with a space'"
+//
+// TF_XLA_FLAGS=/tmp/flagfile
+// where /tmp/flagfile might contain
+// --some_flag="This is a string containing a \" and a '."
+// --another_flag=wombats
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Call tensorflow::Flags::Parse(argc, argv, flag_list) against any as yet
+// unrecognized flags passed in from the environment, and return its
+// return value.
+bool ParseFlagsFromEnv(const std::vector<tensorflow::Flag>& flag_list);
+
+// Used only for testing. Not to be used by clients.
+void ResetFlagsFromEnvForTesting(int** pargc, std::vector<char*>** pargv);
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_PARSE_FLAGS_FROM_ENV_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env_test.cc b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env_test.cc
new file mode 100644
index 0000000000..7a966ce241
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/parse_flags_from_env_test.cc
@@ -0,0 +1,190 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Test for parse_flags_from_env.cc
+
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Test that XLA flags can be set from the environment.
+// Failure messages are accompanied by the text in msg[].
+static void TestParseFlagsFromEnv(const char* msg) {
+ // Initialize module under test.
+ int* pargc;
+ std::vector<char*>* pargv;
+ ResetFlagsFromEnvForTesting(&pargc, &pargv);
+
+ // Ensure that environment variable can be parsed when
+ // no flags are expected.
+ std::vector<tensorflow::Flag> empty_flag_list;
+ bool parsed_ok = ParseFlagsFromEnv(empty_flag_list);
+ CHECK(parsed_ok) << msg;
+ const std::vector<char*>& argv_first = *pargv;
+ CHECK_NE(argv_first[0], nullptr) << msg;
+ int i = 0;
+ while (argv_first[i] != nullptr) {
+ i++;
+ }
+ CHECK_EQ(i, *pargc) << msg;
+
+ // Check that actual flags can be parsed.
+ bool simple = false;
+ string with_value;
+ string embedded_quotes;
+ string single_quoted;
+ string double_quoted;
+ std::vector<tensorflow::Flag> flag_list = {
+ tensorflow::Flag("simple", &simple, ""),
+ tensorflow::Flag("with_value", &with_value, ""),
+ tensorflow::Flag("embedded_quotes", &embedded_quotes, ""),
+ tensorflow::Flag("single_quoted", &single_quoted, ""),
+ tensorflow::Flag("double_quoted", &double_quoted, ""),
+ };
+ parsed_ok = ParseFlagsFromEnv(flag_list);
+ CHECK_EQ(*pargc, 1) << msg;
+ const std::vector<char*>& argv_second = *pargv;
+ CHECK_NE(argv_second[0], nullptr) << msg;
+ CHECK_EQ(argv_second[1], nullptr) << msg;
+ CHECK(parsed_ok) << msg;
+ CHECK(simple) << msg;
+ CHECK_EQ(with_value, "a_value") << msg;
+ CHECK_EQ(embedded_quotes, "single'double\"") << msg;
+ CHECK_EQ(single_quoted, "single quoted \\\\ \n \"") << msg;
+ CHECK_EQ(double_quoted, "double quoted \\ \n '\"") << msg;
+}
+
+// The flags settings to test.
+static const char kTestFlagString[] =
+ "--simple "
+ "--with_value=a_value "
+ "--embedded_quotes=single'double\" "
+ "--single_quoted='single quoted \\\\ \n \"' "
+ "--double_quoted=\"double quoted \\\\ \n '\\\"\" ";
+
+// Test that the environent variable is parserd correctly.
+TEST(ParseFlagsFromEnv, Basic) {
+ // Prepare environment.
+ setenv("TF_XLA_FLAGS", kTestFlagString, true /*overwrite*/);
+ TestParseFlagsFromEnv("(flags in environment variable)");
+}
+
+// Test that a file named by the environent variable is parserd correctly.
+TEST(ParseFlagsFromEnv, File) {
+ // environment variables where tmp dir may be specified.
+ static const char* kTempVars[] = {"TEST_TMPDIR", "TMP"};
+ static const char kTempDir[] = "/tmp"; // default temp dir if all else fails.
+ const char* tmp_dir = nullptr;
+ for (int i = 0; i != TF_ARRAYSIZE(kTempVars) && tmp_dir == nullptr; i++) {
+ tmp_dir = getenv(kTempVars[i]);
+ }
+ if (tmp_dir == nullptr) {
+ tmp_dir = kTempDir;
+ }
+ string tmp_file = tensorflow::strings::Printf("%s/parse_flags_from_env.%d",
+ tmp_dir, getpid());
+ FILE* fp = fopen(tmp_file.c_str(), "w");
+ CHECK_NE(fp, nullptr) << "can't write to " << tmp_file;
+ for (int i = 0; kTestFlagString[i] != '\0'; i++) {
+ putc(kTestFlagString[i], fp);
+ }
+ fflush(fp);
+ CHECK_EQ(ferror(fp), 0) << "writes failed to " << tmp_file;
+ fclose(fp);
+ // Prepare environment.
+ setenv("TF_XLA_FLAGS", tmp_file.c_str(), true /*overwrite*/);
+ TestParseFlagsFromEnv("(flags in file)");
+ unlink(tmp_file.c_str());
+}
+
+// Name of the test binary.
+static const char* binary_name;
+
+// Test that when we use both the environment variable and actual
+// commend line flags (when the latter is possible), the latter win.
+TEST(ParseFlagsFromEnv, EnvAndFlag) {
+ // TODO(m3b): convert to Subprocess when CL 137771604 is finished.
+ static struct {
+ const char* env;
+ const char* arg;
+ const char* expected_value;
+ } test[] = {
+ {nullptr, nullptr, "1\n"},
+ {nullptr, "--int_flag=2", "2\n"},
+ {"--int_flag=3", nullptr, "3\n"},
+ {"--int_flag=3", "--int_flag=2", "2\n"}, // flag beats environment
+ };
+ for (int i = 0; i != TF_ARRAYSIZE(test); i++) {
+ if (test[i].env != nullptr) {
+ setenv("TF_XLA_FLAGS", test[i].env, true /*overwrite*/);
+ }
+ tensorflow::SubProcess child;
+ std::vector<string> argv;
+ argv.push_back(binary_name);
+ argv.push_back("--recursing");
+ if (test[i].arg != nullptr) {
+ argv.push_back(test[i].arg);
+ }
+ child.SetProgram(binary_name, argv);
+ child.SetChannelAction(tensorflow::CHAN_STDOUT, tensorflow::ACTION_PIPE);
+ CHECK(child.Start()) << "test " << i;
+ string stdout_str;
+ int child_status = child.Communicate(nullptr, &stdout_str, nullptr);
+ CHECK_EQ(child_status, 0) << "test " << i;
+ CHECK_EQ(stdout_str, test[i].expected_value) << "test " << i;
+ }
+}
+
+} // namespace legacy_flags
+} // namespace xla
+
+int main(int argc, char* argv[]) {
+ // Save name of binary so that it may invoke itself.
+ xla::legacy_flags::binary_name = argv[0];
+ bool recursing = false;
+ xla::int32 int_flag = 1;
+ const std::vector<tensorflow::Flag> flag_list = {
+ tensorflow::Flag("recursing", &recursing,
+ "Whether the binary is being invoked recusively."),
+ tensorflow::Flag("int_flag", &int_flag, "An integer flag to test with"),
+ };
+ xla::string usage = tensorflow::Flags::Usage(argv[0], flag_list);
+ bool parse_ok = xla::legacy_flags::ParseFlagsFromEnv(flag_list);
+ if (!parse_ok) {
+ LOG(QFATAL) << "can't parse from environment\n" << usage;
+ }
+ parse_ok = tensorflow::Flags::Parse(&argc, argv, flag_list);
+ if (!parse_ok) {
+ LOG(QFATAL) << usage;
+ }
+ if (recursing) {
+ printf("%d\n", int_flag);
+ exit(0);
+ }
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/compiler/xla/legacy_flags/service_flags.cc b/tensorflow/compiler/xla/legacy_flags/service_flags.cc
new file mode 100644
index 0000000000..41cb8d8bdf
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/service_flags.cc
@@ -0,0 +1,100 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's service module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/legacy_flags/service_flags.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static ServiceFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new ServiceFlags;
+ flags->xla_hlo_profile = false;
+ flags->xla_log_hlo_text = "";
+ flags->xla_generate_hlo_graph = "";
+ flags->xla_hlo_graph_addresses = false;
+ flags->xla_hlo_graph_layout = false;
+ flags->xla_hlo_graph_for_compute_constant = false;
+ flags->xla_dump_computations_to = "";
+ flags->xla_dump_hlo_text_to = "";
+ flags->xla_dump_executions_to = "";
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag(
+ "xla_hlo_profile", &flags->xla_hlo_profile,
+ "Instrument the computation to collect per-HLO cycle counts"),
+ tensorflow::Flag(
+ "xla_log_hlo_text", &flags->xla_log_hlo_text,
+ "If non-empty, print the text format of "
+ "HLO modules whose name partially matches this regex. E.g. "
+ "xla_log_hlo_text=.* will dump the text for every module."),
+ tensorflow::Flag(
+ "xla_generate_hlo_graph", &flags->xla_generate_hlo_graph,
+ "If non-empty, dump graph of HLO modules whose name partially "
+ "matches this regex. E.g. --xla_generate_hlo_graph=.* will dump "
+ "the graph of every module."),
+ tensorflow::Flag("xla_hlo_graph_addresses",
+ &flags->xla_hlo_graph_addresses,
+ "Show addresses of HLO ops in graph"),
+ tensorflow::Flag("xla_hlo_graph_layout", &flags->xla_hlo_graph_layout,
+ "Show layout of HLO ops in graph"),
+ tensorflow::Flag(
+ "xla_hlo_graph_for_compute_constant",
+ &flags->xla_hlo_graph_for_compute_constant,
+ "If true, include hlo dumps of graphs from ComputeConstant."
+ "Such graphs still need to be matched via xla_generate_hlo_graph."),
+ tensorflow::Flag("xla_dump_computations_to",
+ &flags->xla_dump_computations_to,
+ "Dumps computations that XLA executes into the provided "
+ "directory path"),
+ tensorflow::Flag("xla_dump_hlo_text_to", &flags->xla_dump_hlo_text_to,
+ "Dumps HLO modules that XLA executes into the provided "
+ "directory path"),
+ tensorflow::Flag("xla_dump_executions_to", &flags->xla_dump_executions_to,
+ "Dumps parameters and results of computations that XLA "
+ "executes into the provided directory path"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's service module.
+void AppendServiceFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the ServiceFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+ServiceFlags* GetServiceFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/service_flags.h b/tensorflow/compiler/xla/legacy_flags/service_flags.h
new file mode 100644
index 0000000000..d982506944
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/service_flags.h
@@ -0,0 +1,69 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_SERVICE_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_SERVICE_FLAGS_H_
+
+// Legacy flags for XLA's service module.
+
+#include <vector>
+
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's service module.
+void AppendServiceFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's service module.
+typedef struct {
+ bool xla_hlo_profile; // Instrument the computation to collect per-HLO cycle
+ // counts
+ string xla_log_hlo_text; // If non-empty, print the text format of the HLO
+ // modules whose name partially
+ // matches this regex. E.g. xla_log_hlo_text=.*
+ // will dump the text for every module.
+ string xla_generate_hlo_graph; // If non-empty, dump graph of HLO modules
+ // whose name partially matches this regex.
+ // E.g. --xla_generate_hlo_graph=.* will dump
+ // the graph of every module.
+ bool xla_hlo_graph_addresses; // Show addresses of HLO ops in graph
+ bool xla_hlo_graph_layout; // Show layout of HLO ops in graph
+ bool xla_hlo_graph_for_compute_constant; // If true, include hlo dumps of
+ // graphs from ComputeConstant.
+ // Such graphs still need to be
+ // matched via
+ // xla_generate_hlo_graph.
+ string xla_dump_hlo_text_to; // Dumps HLO text for each HLO module that is
+ // executed into the provided directory path
+ string xla_dump_computations_to; // Dumps computations that XLA executes
+ // into the provided directory path
+ // Dumps parameters and results of computations that XLA executes into
+ // the provided directory path
+ string xla_dump_executions_to;
+} ServiceFlags;
+
+// Return a pointer to the ServiceFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+ServiceFlags* GetServiceFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_SERVICE_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.cc b/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.cc
new file mode 100644
index 0000000000..6506175777
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.cc
@@ -0,0 +1,63 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's stream_assignment module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static StreamAssignmentFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new StreamAssignmentFlags;
+ flags->xla_gpu_disable_multi_streaming = false;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_gpu_disable_multi_streaming",
+ &flags->xla_gpu_disable_multi_streaming,
+ "Disable multi-streaming in XLA's GPU backend"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's stream_assignment
+// module.
+void AppendStreamAssignmentFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the StreamAssignmentFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+StreamAssignmentFlags* GetStreamAssignmentFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.h b/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.h
new file mode 100644
index 0000000000..a98f9b3458
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/stream_assignment_flags.h
@@ -0,0 +1,47 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_STREAM_ASSIGNMENT_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_STREAM_ASSIGNMENT_FLAGS_H_
+
+// Legacy flags for XLA's stream_assignment module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's stream_assignment
+// module.
+void AppendStreamAssignmentFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's stream_assignment module.
+typedef struct {
+ bool xla_gpu_disable_multi_streaming; // Disable multi-streaming in XLA's GPU
+ // backend
+} StreamAssignmentFlags;
+
+// Return a pointer to the StreamAssignmentFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+StreamAssignmentFlags* GetStreamAssignmentFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_STREAM_ASSIGNMENT_FLAGS_H_
diff --git a/tensorflow/compiler/xla/legacy_flags/util_flags.cc b/tensorflow/compiler/xla/legacy_flags/util_flags.cc
new file mode 100644
index 0000000000..e6df19ddd2
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/util_flags.cc
@@ -0,0 +1,62 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Legacy flags for XLA's util module.
+
+#include <mutex> // NOLINT(build/c++11): only using std::call_once, not mutex.
+#include <vector>
+
+#include "tensorflow/compiler/xla/legacy_flags/parse_flags_from_env.h"
+#include "tensorflow/compiler/xla/legacy_flags/util_flags.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Pointers to the parsed value of the flags and flag descriptors, initialized
+// via flags_init.
+static UtilFlags* flags;
+static std::vector<tensorflow::Flag>* flag_list;
+static std::once_flag flags_init;
+
+// Allocate *flags. Called via call_once(&flags_init,...).
+static void AllocateFlags() {
+ flags = new UtilFlags;
+ flags->xla_status_add_backtrace = false;
+ flag_list = new std::vector<tensorflow::Flag>({
+ tensorflow::Flag("xla_status_add_backtrace",
+ &flags->xla_status_add_backtrace,
+ "add backtraces to XLA-produced status values"),
+ });
+ ParseFlagsFromEnv(*flag_list);
+}
+
+// Append to *append_to flag definitions associated with XLA's util module.
+void AppendUtilFlags(std::vector<tensorflow::Flag>* append_to) {
+ std::call_once(flags_init, &AllocateFlags);
+ append_to->insert(append_to->end(), flag_list->begin(), flag_list->end());
+}
+
+// Return a pointer to the UtilFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+UtilFlags* GetUtilFlags() {
+ std::call_once(flags_init, &AllocateFlags);
+ return flags;
+}
+
+} // namespace legacy_flags
+} // namespace xla
diff --git a/tensorflow/compiler/xla/legacy_flags/util_flags.h b/tensorflow/compiler/xla/legacy_flags/util_flags.h
new file mode 100644
index 0000000000..03bffcd726
--- /dev/null
+++ b/tensorflow/compiler/xla/legacy_flags/util_flags.h
@@ -0,0 +1,45 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_UTIL_FLAGS_H_
+#define TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_UTIL_FLAGS_H_
+
+// Legacy flags for the XLA's util module.
+
+#include <vector>
+
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace xla {
+namespace legacy_flags {
+
+// Append to *flag_list flag definitions associated with XLA's util module.
+void AppendUtilFlags(std::vector<tensorflow::Flag>* flag_list);
+
+// The values of flags associated with XLA's util module.
+typedef struct {
+ bool xla_status_add_backtrace; // add backtraces to XLA-produced statuses
+} UtilFlags;
+
+// Return a pointer to the UtilFlags struct;
+// repeated calls return the same pointer.
+// This should be called only after Flags::Parse() has returned.
+UtilFlags* GetUtilFlags();
+
+} // namespace legacy_flags
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_LEGACY_FLAGS_UTIL_FLAGS_H_