aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/hlo_runner.cc
diff options
context:
space:
mode:
authorGravatar Justin Lebar <jlebar@google.com>2018-01-26 17:12:23 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-26 17:16:06 -0800
commit0b164dd43bbf76547836a9ae6ae424b9cda65968 (patch)
tree522a3767ecc75b69d2c9cf42b5f9a21af76b3de3 /tensorflow/compiler/xla/service/hlo_runner.cc
parentf8da6cc63ae1fd71de1ab5d9e91884872b249e55 (diff)
[XLA] Add a DeviceAllocator* argument to compilation.
In a later change, the GPU backend will use this allocator to reserve scratch memory when trying out different convolution algorithms during compilation. PiperOrigin-RevId: 183469579
Diffstat (limited to 'tensorflow/compiler/xla/service/hlo_runner.cc')
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc6
1 files changed, 4 insertions, 2 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index 204a8bf748..e281538848 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -121,12 +121,14 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::ExecuteInternal(
if (run_hlo_passes) {
TF_ASSIGN_OR_RETURN(
module, backend().compiler()->RunHloPasses(
- std::move(module), backend().default_stream_executor()));
+ std::move(module), backend().default_stream_executor(),
+ /*device_allocator=*/nullptr));
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
backend().compiler()->RunBackend(std::move(module),
- backend().default_stream_executor()));
+ backend().default_stream_executor(),
+ /*device_allocator=*/nullptr));
se::Stream stream(backend().default_stream_executor());
stream.Init();