aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/cpu
diff options
context:
space:
mode:
authorGravatar Yunxing Dai <yunxing@google.com>2018-08-21 17:08:12 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-21 17:12:22 -0700
commit3cb3a450ed845c4602080f43d7bb6cfade298a22 (patch)
tree21e5a40fcb72737bd2cb00829bd049c5173d20e0 /tensorflow/compiler/xla/service/cpu
parent95d718a8a41370f31ccb3b32aaac7fd00b0291e4 (diff)
[XLA] gtl::optional->absl::optional
PiperOrigin-RevId: 209686671
Diffstat (limited to 'tensorflow/compiler/xla/service/cpu')
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_layout_assignment.cc4
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_options.cc9
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_options.h5
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.h2
5 files changed, 10 insertions, 12 deletions
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment.cc b/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment.cc
index aa872d5ec9..69acca86bf 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_layout_assignment.cc
@@ -34,8 +34,8 @@ namespace cpu {
// instruction stream.
namespace {
-using ::tensorflow::gtl::nullopt;
-using ::tensorflow::gtl::optional;
+using ::absl::nullopt;
+using ::absl::optional;
using ShouldMakeOperandColMajorCache =
tensorflow::gtl::FlatMap<const HloInstruction*, bool>;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_options.cc b/tensorflow/compiler/xla/service/cpu/cpu_options.cc
index 3ed7876715..b6039b465e 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_options.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_options.cc
@@ -45,8 +45,7 @@ bool VectorizedReduceDisabled(const HloModuleConfig& config) {
return extra_options_map.count(kXlaOptimizeForSizeCpuOption) > 0;
}
-tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
- const HloModuleConfig& config) {
+absl::optional<int64> LlvmIrGemvTilingFactor(const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
auto it = extra_options_map.find(kLlvmIrDotTilingFactor);
@@ -55,7 +54,7 @@ tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
tensorflow::strings::safe_strto64(it->second, &tiling_factor)) {
return tiling_factor;
}
- return tensorflow::gtl::nullopt;
+ return absl::nullopt;
}
bool EnableExperimentalLlvmIrGemm(const HloModuleConfig& config) {
@@ -71,13 +70,13 @@ static tensorflow::StringPiece RemoveSuffix(tensorflow::StringPiece str,
return str.substr(0, str.size() - suffix.size());
}
-tensorflow::gtl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
+absl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
const HloModuleConfig& config) {
const auto& extra_options_map =
config.debug_options().xla_backend_extra_options();
auto it = extra_options_map.find(kLlvmIrGemmTileSize);
if (it == extra_options_map.end()) {
- return tensorflow::gtl::nullopt;
+ return absl::nullopt;
}
std::vector<string> tile_components =
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_options.h b/tensorflow/compiler/xla/service/cpu/cpu_options.h
index 429b9e16cb..47c7eb13b6 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_options.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_options.h
@@ -27,9 +27,8 @@ namespace options {
bool OptimizeForSizeRequested(const HloModuleConfig& config);
bool VectorizedReduceDisabled(const HloModuleConfig& config);
bool EnableExperimentalLlvmIrGemm(const HloModuleConfig& config);
-tensorflow::gtl::optional<int64> LlvmIrGemvTilingFactor(
- const HloModuleConfig& config);
-tensorflow::gtl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
+absl::optional<int64> LlvmIrGemvTilingFactor(const HloModuleConfig& config);
+absl::optional<std::tuple<int64, int64, int64>> LlvmIrGemmTileSize(
const HloModuleConfig& config);
} // namespace options
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
index 414f185fdf..797392c265 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
@@ -1620,7 +1620,7 @@ bool PotentiallyImplementedAsEigenDot(
// For vector-matrix dot products, it is always profitable to make the Rhs
// column major.
-tensorflow::gtl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
+absl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kDot && hlo.shape().dimensions_size() == 2 &&
hlo.shape().dimensions(0) == 1) {
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
index aeead3844b..05322faa75 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
@@ -38,7 +38,7 @@ bool PotentiallyImplementedAsEigenDot(
// Returns the index for an operand to `hlo` that should ideally be column
// major. Returns nullopt if there is no such operand or if `hlo` is not a dot
// or a fusion containing a dot.
-tensorflow::gtl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
+absl::optional<int64> ProfitableToMakeDotOperandColumnMajor(
const HloInstruction& hlo);
// Returns true to indicate that we can generate a tiled LLVM IR implementation