aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-04-12 10:14:02 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-12 10:16:34 -0700
commitffbf77de81d0b7b4b169c92d0d9fbbdef5b8842a (patch)
tree8d68eedf28bdcac55516b6a3a176d56f6cef0fa2
parent8a247976484173059aedc17bfd8d770b8d1a70e1 (diff)
Introduced tool to run an HLO module in replicated fashion, by infeeding random data and outfeeding the data generated at each step.
The arguments of the computation can be either read from the session module, or randomly generated. The tool uses the raw transfer manager API to infeed and outfeed the data. PiperOrigin-RevId: 192628605
-rw-r--r--tensorflow/compiler/xla/service/BUILD2
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc189
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.h66
-rw-r--r--tensorflow/compiler/xla/shape_util.h5
-rw-r--r--tensorflow/compiler/xla/tests/test_utils.cc4
-rw-r--r--tensorflow/compiler/xla/tests/test_utils.h3
6 files changed, 221 insertions, 48 deletions
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index db91e80407..65203fa2a0 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -2535,6 +2535,7 @@ cc_library(
srcs = ["hlo_runner.cc"],
hdrs = ["hlo_runner.h"],
deps = [
+ ":computation_placer",
":executable",
":hlo",
":transfer_manager",
@@ -2551,6 +2552,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"//third_party/eigen3",
+ "@com_google_absl//absl/memory",
],
)
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index ec7d8210a7..2e834a79d9 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -16,21 +16,16 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_runner.h"
-#include <set>
#include <string>
#include <utility>
+#include "absl/memory/memory.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
-#include "tensorflow/compiler/xla/service/backend.h"
-#include "tensorflow/compiler/xla/service/executable.h"
-#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/transfer_manager.h"
#include "tensorflow/compiler/xla/shape_util.h"
-#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/tools/parser/hlo_parser.h"
-#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/common_runtime/eigen_thread_pool.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
@@ -91,15 +86,6 @@ HloRunner::ReadModuleFromHloTextFile(const std::string& filename,
return tools::Parse(hlo_string, config);
}
-// Define this in .cc file to avoid having to include eigen or forward declare
-// these types in the header.
-struct HloRunner::EigenThreadPoolWrapper {
- std::unique_ptr<EigenThreadPoolWrapper> pool;
- std::unique_ptr<Eigen::ThreadPoolDevice> device;
-};
-
-HloRunner::HloRunner() {}
-
HloRunner::HloRunner(se::Platform* platform) {
BackendOptions backend_options;
backend_options.set_platform(platform);
@@ -113,32 +99,14 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
std::unique_ptr<HloModule> module,
const tensorflow::gtl::ArraySlice<Literal*> arguments,
bool run_hlo_passes) {
- if (run_hlo_passes) {
- TF_ASSIGN_OR_RETURN(
- module, backend().compiler()->RunHloPasses(
- std::move(module), backend().default_stream_executor(),
- /*device_allocator=*/nullptr));
- }
- TF_ASSIGN_OR_RETURN(
- std::unique_ptr<Executable> executable,
- backend().compiler()->RunBackend(std::move(module),
- backend().default_stream_executor(),
- /*device_allocator=*/nullptr));
-
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable,
+ CreateExecutable(std::move(module), run_hlo_passes));
se::Stream stream(backend().default_stream_executor());
stream.Init();
- ExecutableRunOptions run_options;
- run_options.set_device_ordinal(backend().default_device_ordinal());
- run_options.set_stream(&stream);
- run_options.set_allocator(backend().memory_allocator());
- run_options.set_inter_op_thread_pool(backend().inter_op_thread_pool());
- run_options.set_intra_op_thread_pool(
- backend().eigen_intra_op_thread_pool_device());
-
- ServiceExecutableRunOptions service_run_options(
- run_options, backend().StreamBorrower(),
- backend().inter_op_thread_pool());
+ ServiceExecutableRunOptions service_run_options(GetServiceRunOptionsForDevice(
+ backend().default_device_ordinal(), &stream, nullptr));
+ const ExecutableRunOptions& run_options = service_run_options.run_options();
// Copy arguments to device.
std::vector<std::unique_ptr<ScopedShapedBuffer>> argument_buffers;
@@ -178,10 +146,153 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
return result_literal;
}
+StatusOr<std::vector<std::unique_ptr<Literal>>> HloRunner::ExecuteReplicated(
+ std::unique_ptr<HloModule> module,
+ const ReplicatedExecuteOptions& options) {
+ TF_ASSIGN_OR_RETURN(
+ std::unique_ptr<Executable> executable,
+ CreateExecutable(std::move(module), options.run_hlo_passes));
+ TF_ASSIGN_OR_RETURN(
+ DeviceAssignment device_assignment,
+ backend().computation_placer()->AssignDevices(options.num_replicas, 1));
+ std::vector<std::unique_ptr<se::Stream>> streams;
+ std::vector<ServiceExecutableRunOptions> service_run_options;
+ std::vector<std::unique_ptr<ScopedShapedBuffer>> argument_buffers;
+ // Plus one so we can safely get &argument_buffer_ptrs[0] in case there are
+ // no arguments.
+ std::vector<const ShapedBuffer*> argument_buffer_ptrs(
+ options.num_replicas * options.arguments.size() + 1);
+ std::vector<tensorflow::gtl::ArraySlice<const ShapedBuffer*>>
+ argument_buffer_slices;
+ int64 index = 0;
+ for (int64 i = 0; i < options.num_replicas; ++i) {
+ int64 device = device_assignment(i, 0);
+ TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor,
+ backend().stream_executor(device));
+ streams.push_back(absl::make_unique<se::Stream>(executor));
+ streams.back()->Init();
+ service_run_options.emplace_back(GetServiceRunOptionsForDevice(
+ device, streams.back().get(), &device_assignment));
+
+ // Copy arguments to device.
+ for (const Literal* argument : options.arguments) {
+ TF_ASSIGN_OR_RETURN(
+ std::unique_ptr<ScopedShapedBuffer> argument_buffer,
+ backend().transfer_manager()->AllocateScopedShapedBuffer(
+ argument->shape(), backend().memory_allocator(), device));
+ TF_RETURN_IF_ERROR(backend().transfer_manager()->TransferLiteralToDevice(
+ executor, *argument, *argument_buffer));
+ argument_buffers.push_back(std::move(argument_buffer));
+ argument_buffer_ptrs[index++] = argument_buffers.back().get();
+ }
+ argument_buffer_slices.emplace_back(
+ &argument_buffer_ptrs[index - options.arguments.size()],
+ options.arguments.size());
+ }
+
+ std::unique_ptr<tensorflow::thread::ThreadPool> pool;
+ int64 num_threads = (options.infeed != nullptr) ? options.num_replicas : 0;
+ if (ShapeUtil::IsInitialized(options.outfeed_shape)) {
+ num_threads += options.num_replicas;
+ }
+ if (num_threads > 0) {
+ pool = absl::make_unique<tensorflow::thread::ThreadPool>(
+ tensorflow::Env::Default(), "infeed_outfeed",
+ /*num_threads=*/num_threads);
+ }
+ if (options.infeed != nullptr) {
+ for (int64 i = 0; i < options.num_replicas; ++i) {
+ int64 device = device_assignment(i, 0);
+ pool->Schedule([this, device, &options]() {
+ se::StreamExecutor* executor =
+ backend().stream_executor(device).ValueOrDie();
+ VLOG(1) << "Starting infeed on device " << device;
+ for (int64 step = 1;
+ options.infeed_steps < 0 || step <= options.infeed_steps; ++step) {
+ TF_CHECK_OK(backend().transfer_manager()->TransferLiteralToInfeed(
+ executor, *options.infeed));
+ if (step % 100 == 0) {
+ VLOG(1) << "Infeed step " << step;
+ }
+ }
+ });
+ }
+ }
+ if (ShapeUtil::IsInitialized(options.outfeed_shape)) {
+ for (int64 i = 0; i < options.num_replicas; ++i) {
+ int64 device = device_assignment(i, 0);
+ pool->Schedule([this, device, &options]() {
+ se::StreamExecutor* executor =
+ backend().stream_executor(device).ValueOrDie();
+ VLOG(1) << "Starting outfeed on device " << device;
+ for (int64 step = 1;
+ options.infeed_steps < 0 || step <= options.infeed_steps; ++step) {
+ auto literal = absl::make_unique<Literal>();
+ TF_CHECK_OK(backend().transfer_manager()->TransferLiteralFromOutfeed(
+ executor, options.outfeed_shape, literal.get()));
+ if (options.outfeed_values != nullptr) {
+ options.outfeed_values->push_back(std::move(literal));
+ }
+ if (step % 100 == 0) {
+ VLOG(1) << "Outfeed step " << step;
+ }
+ }
+ });
+ }
+ }
+
+ LOG(INFO) << "Replicated execution started";
+ TF_ASSIGN_OR_RETURN(std::vector<std::unique_ptr<ShapedBuffer>> results,
+ executable->ExecuteOnStreams(service_run_options,
+ argument_buffer_slices));
+ LOG(INFO) << "Replicated execution terminated";
+
+ std::vector<std::unique_ptr<Literal>> exec_results;
+ for (int64 i = 0; i < options.num_replicas; ++i) {
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<ScopedShapedBuffer> result,
+ ScopedShapedBuffer::MakeScoped(
+ results[i].get(), backend().memory_allocator()));
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> literal,
+ backend().transfer_manager()->TransferLiteralFromDevice(
+ streams[i]->parent(), *result));
+ exec_results.push_back(std::move(literal));
+ }
+ return std::move(exec_results);
+}
+
+StatusOr<std::unique_ptr<Executable>> HloRunner::CreateExecutable(
+ std::unique_ptr<HloModule> module, bool run_hlo_passes) {
+ if (run_hlo_passes) {
+ TF_ASSIGN_OR_RETURN(
+ module, backend().compiler()->RunHloPasses(
+ std::move(module), backend().default_stream_executor(),
+ backend().memory_allocator()));
+ }
+ return backend().compiler()->RunBackend(std::move(module),
+ backend().default_stream_executor(),
+ backend().memory_allocator());
+}
+
+ServiceExecutableRunOptions HloRunner::GetServiceRunOptionsForDevice(
+ int64 device, se::Stream* stream, DeviceAssignment* device_assignment) {
+ ExecutableRunOptions run_options;
+ run_options.set_device_ordinal(device);
+ run_options.set_stream(stream);
+ run_options.set_allocator(backend().memory_allocator());
+ run_options.set_inter_op_thread_pool(backend().inter_op_thread_pool());
+ run_options.set_intra_op_thread_pool(
+ backend().eigen_intra_op_thread_pool_device());
+ if (device_assignment != nullptr) {
+ run_options.set_device_assignment(device_assignment);
+ }
+ return ServiceExecutableRunOptions(run_options, backend().StreamBorrower(),
+ backend().inter_op_thread_pool());
+}
+
Backend& HloRunner::backend() {
if (!backend_) {
backend_ = Backend::CreateDefaultBackend().ConsumeValueOrDie();
- VLOG(1) << "executing on platform " << backend().platform()->Name();
+ VLOG(1) << "Executing on platform " << backend().platform()->Name();
}
return *backend_;
}
diff --git a/tensorflow/compiler/xla/service/hlo_runner.h b/tensorflow/compiler/xla/service/hlo_runner.h
index 06ce22a5b9..f54fb44766 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.h
+++ b/tensorflow/compiler/xla/service/hlo_runner.h
@@ -16,12 +16,16 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_RUNNER_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_RUNNER_H_
+#include <map>
#include <memory>
+#include <set>
#include <string>
#include <vector>
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/service/compiler.h"
+#include "tensorflow/compiler/xla/service/computation_placer.h"
+#include "tensorflow/compiler/xla/service/executable.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -40,9 +44,43 @@ namespace xla {
// file), or parsed from a hlo textual IR string.
class HloRunner {
public:
- HloRunner();
-
- HloRunner(::perftools::gputools::Platform* platform);
+ // The options used to configure a ExecuteReplicated() call.
+ struct ReplicatedExecuteOptions {
+ // The number of devices the HLO module should be replicated onto.
+ int64 num_replicas = 1;
+
+ // The arguments to be fed to each replica. Since this is used for a
+ // replicated execution, all the arguments are the same for all replicas.
+ std::vector<const Literal*> arguments;
+
+ // If the HLO module being run has an infeed instruction, this will be the
+ // data which will be fed to it, for as many as infeed_steps steps.
+ const Literal* infeed = nullptr;
+
+ // The number of times the infeed literal should be fed to the HLO module.
+ // For a clean exit, this should match the iterations-per-loop parameter
+ // used when generating the HLO module proto (that is usually the main
+ // while bounary counter). A value higher then iterations-per-loop would
+ // lead to infeed threads feeding to a gone computation, while a lower
+ // value would trigger a stuck ExecuteReplicated() call (the computation
+ // will be trying to infeed data which will never come).
+ int64 infeed_steps = -1;
+
+ // The shape of the outfeed operation. If empty, the HLO module does not
+ // generate any outfeed.
+ Shape outfeed_shape;
+
+ // A pointer to a vector where the outfeed values will be stored. If
+ // nullptr, the values will be read and discarded.
+ std::vector<std::unique_ptr<Literal>>* outfeed_values = nullptr;
+
+ // Whether the HLO passes should be run on the input module. Usually
+ // saved modules are coming from after the HLO pass pipeline, so triggering
+ // another run will likely cause errors.
+ bool run_hlo_passes = false;
+ };
+
+ explicit HloRunner(::perftools::gputools::Platform* platform);
~HloRunner();
@@ -86,6 +124,13 @@ class HloRunner {
return Execute(std::move(module), argument_pointers, run_hlo_passes);
}
+ // Executes a given HLO module into a set of replicas, and returns a map
+ // with the replica number as key, and the corresponding returned literal as
+ // value.
+ StatusOr<std::vector<std::unique_ptr<Literal>>> ExecuteReplicated(
+ std::unique_ptr<HloModule> module,
+ const ReplicatedExecuteOptions& options);
+
// If backend is not created in the constructor, creates and returns the
// default backend. If creation fails, crashes the program.
//
@@ -94,9 +139,18 @@ class HloRunner {
Backend& backend();
private:
- struct EigenThreadPoolWrapper;
-
- std::unique_ptr<EigenThreadPoolWrapper> thread_pool_wrapper_;
+ // Creates an executable object given an HLO module. If run_hlo_passes is
+ // true, the HLO passes will be run before.
+ StatusOr<std::unique_ptr<Executable>> CreateExecutable(
+ std::unique_ptr<HloModule> module, bool run_hlo_passes);
+
+ // Creates a ServiceExecutableRunOptions object to configure a run on device,
+ // using the provided stream object. If device_assignment is not nullptr, it
+ // will be used to configure the replication parameters. Replicated executions
+ // should pass the device_assignment parameter.
+ ServiceExecutableRunOptions GetServiceRunOptionsForDevice(
+ int64 device, ::perftools::gputools::Stream* stream,
+ DeviceAssignment* device_assignment);
std::unique_ptr<Backend> backend_;
};
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index 1375f981a8..6d228eff46 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -319,6 +319,11 @@ class ShapeUtil {
// Returns an empty tuple shape. Can be used to indicate side-effects.
static Shape MakeNil() { return MakeTupleShape({}); }
+ // Checks whether the shape is initialized.
+ static bool IsInitialized(const Shape& shape) {
+ return shape.element_type() != PRIMITIVE_TYPE_INVALID;
+ }
+
// Constructs a new shape with the given element type and sequence of
// dimensions.
static Shape MakeShape(PrimitiveType element_type,
diff --git a/tensorflow/compiler/xla/tests/test_utils.cc b/tensorflow/compiler/xla/tests/test_utils.cc
index e30d115fae..cda1989fad 100644
--- a/tensorflow/compiler/xla/tests/test_utils.cc
+++ b/tensorflow/compiler/xla/tests/test_utils.cc
@@ -340,8 +340,8 @@ StatusOr<std::vector<std::unique_ptr<Literal>>> MakeFakeArguments(
}
Status VerifyHloModule(const perftools::gputools::Platform& platform,
- HloModule* const module) {
- return HloVerifier().Run(module).status();
+ HloModule* const module, bool allow_mixed_precision) {
+ return HloVerifier(allow_mixed_precision).Run(module).status();
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/test_utils.h b/tensorflow/compiler/xla/tests/test_utils.h
index 0fb024ffb0..b5ab779574 100644
--- a/tensorflow/compiler/xla/tests/test_utils.h
+++ b/tensorflow/compiler/xla/tests/test_utils.h
@@ -69,7 +69,8 @@ StatusOr<std::vector<std::unique_ptr<Literal>>> MakeFakeArguments(
// Check that a given module satisfies various constraints before trying to
// execute it.
Status VerifyHloModule(const perftools::gputools::Platform& platform,
- HloModule* const module);
+ HloModule* const module,
+ bool allow_mixed_precision = false);
} // namespace xla