diff options
author | 2018-08-21 17:08:12 -0700 | |
---|---|---|
committer | 2018-08-21 17:12:22 -0700 | |
commit | 3cb3a450ed845c4602080f43d7bb6cfade298a22 (patch) | |
tree | 21e5a40fcb72737bd2cb00829bd049c5173d20e0 /tensorflow/compiler/xla/service/gpu/gpu_executable.cc | |
parent | 95d718a8a41370f31ccb3b32aaac7fd00b0291e4 (diff) |
[XLA] gtl::optional->absl::optional
PiperOrigin-RevId: 209686671
Diffstat (limited to 'tensorflow/compiler/xla/service/gpu/gpu_executable.cc')
-rw-r--r-- | tensorflow/compiler/xla/service/gpu/gpu_executable.cc | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc index a1fbd8022d..88be63e267 100644 --- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc +++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc @@ -112,7 +112,7 @@ Status GpuExecutable::ExecuteThunks( // // TODO(jlebar): Should we cache the results of HloInstruction::ToString(), // since we expect it to be an expensive call? - tensorflow::gtl::optional<ScopedAnnotation> op_annotation; + absl::optional<ScopedAnnotation> op_annotation; if (top_level_annotation.IsEnabled()) { op_annotation.emplace( thunk->hlo_instruction() != nullptr |