aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/hlo_runner.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-11-17 14:16:09 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-11-17 14:20:28 -0800
commit3f888e1539db5551cfcf9ee837a0555c224e0018 (patch)
tree5f2df45e666fc15e370e6c029bf0712ee65d53ed /tensorflow/compiler/xla/service/hlo_runner.cc
parentd79dd4993061670c1ec5ea01db3022f28d72d0a3 (diff)
Add a Compiler::BuildExecutable interface that compiles the given Hlo module without optimizations.
PiperOrigin-RevId: 176158846
Diffstat (limited to 'tensorflow/compiler/xla/service/hlo_runner.cc')
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc19
1 files changed, 13 insertions, 6 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index 63f2b1296e..6b6d48233a 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -114,11 +114,16 @@ HloRunner::~HloRunner() {
StatusOr<se::DeviceMemoryBase> HloRunner::Execute(
std::unique_ptr<HloModule> module,
tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> arguments,
- Shape* result_shape) {
+ Shape* result_shape, bool run_hlo_passes) {
+ if (run_hlo_passes) {
+ TF_ASSIGN_OR_RETURN(
+ module, backend().compiler()->RunHloPasses(
+ std::move(module), backend().default_stream_executor()));
+ }
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
- backend().compiler()->Compile(std::move(module),
- backend().default_stream_executor()));
+ backend().compiler()->RunBackend(std::move(module),
+ backend().default_stream_executor()));
se::Stream stream(backend().default_stream_executor());
stream.Init();
@@ -193,10 +198,12 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::TransferFromDevice(
StatusOr<std::unique_ptr<Literal>> HloRunner::ExecuteAndTransfer(
std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> arguments) {
+ tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> arguments,
+ bool run_hlo_passes) {
Shape result_shape;
- TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase device_base,
- Execute(std::move(module), arguments, &result_shape));
+ TF_ASSIGN_OR_RETURN(
+ se::DeviceMemoryBase device_base,
+ Execute(std::move(module), arguments, &result_shape, run_hlo_passes));
return TransferFromDevice(result_shape, device_base);
}