aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/plugin
diff options
context:
space:
mode:
authorGravatar Kay Zhu <kayzhu@google.com>2017-07-26 21:55:47 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-07-26 22:01:49 -0700
commit631a364cd1ddd822cf4b8712a5388d5ea39ecd7e (patch)
tree0f548e342516dab2b80639fb4bfecb82103db975 /tensorflow/compiler/plugin
parenta52470172324e77dff9f548cb40e45d6a3a156b5 (diff)
[XLA] Add Reduce, DynamicSlice and DynamicSliceUpdate to HloEvaluator.
- Reduce is disabled explicitly for constant folding, as not all types of embedded computation can be currently supported by the evaluator. - Added support to evaluate HloModule to HloEvaluator. - Minor signature change to Evaluate(). PiperOrigin-RevId: 163299238
Diffstat (limited to 'tensorflow/compiler/plugin')
-rw-r--r--tensorflow/compiler/plugin/executor/executable.cc11
1 files changed, 5 insertions, 6 deletions
diff --git a/tensorflow/compiler/plugin/executor/executable.cc b/tensorflow/compiler/plugin/executor/executable.cc
index 4673a90e0a..38479d688d 100644
--- a/tensorflow/compiler/plugin/executor/executable.cc
+++ b/tensorflow/compiler/plugin/executor/executable.cc
@@ -30,8 +30,8 @@ ExecutorExecutable::ExecutorExecutable(std::unique_ptr<HloModule> hlo_module)
ExecutorExecutable::~ExecutorExecutable() {}
-static se::DeviceMemoryBase AllocateSingleOutput(sep::ExecutorExecutor* executor,
- const Literal& literal) {
+static se::DeviceMemoryBase AllocateSingleOutput(
+ sep::ExecutorExecutor* executor, const Literal& literal) {
int64 size(xla::ShapeUtil::ByteSizeOf(literal.shape()));
void* buf = executor->Allocate(size);
const void* src = literal.InternalData();
@@ -39,8 +39,8 @@ static se::DeviceMemoryBase AllocateSingleOutput(sep::ExecutorExecutor* executor
return se::DeviceMemoryBase(buf, size);
}
-static se::DeviceMemoryBase AllocateOutputBuffer(sep::ExecutorExecutor* executor,
- const Literal& literal) {
+static se::DeviceMemoryBase AllocateOutputBuffer(
+ sep::ExecutorExecutor* executor, const Literal& literal) {
const Shape& shape = literal.shape();
if (shape.element_type() != xla::TUPLE) {
return AllocateSingleOutput(executor, literal);
@@ -96,7 +96,7 @@ StatusOr<se::DeviceMemoryBase> ExecutorExecutable::ExecuteOnStream(
// Execute the graph using the evaluator
HloEvaluator evaluator;
TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> output,
- evaluator.Evaluate(computation, arg_literals_ptrs));
+ evaluator.Evaluate(*computation, arg_literals_ptrs));
// Copy the result into the return buffer
perftools::gputools::StreamExecutor* executor(stream->parent());
@@ -139,6 +139,5 @@ StatusOr<se::DeviceMemoryBase> ExecutorExecutable::ExecuteAsyncOnStream(
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
-
} // namespace executorplugin
} // namespace xla