aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/plugin
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-07-01 08:23:11 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-07-01 08:27:28 -0700
commit818b768cdabbb5137e7d522a34c9754872fd50b7 (patch)
treee1ec24a1b18e637c9f8c9cbcf95987650d93ab9c /tensorflow/compiler/plugin
parent9999dd321a3b8ec8f0c6d8a3aae3682c45c57976 (diff)
Minor cleanup: remove unused constructions
PiperOrigin-RevId: 160736783
Diffstat (limited to 'tensorflow/compiler/plugin')
-rw-r--r--tensorflow/compiler/plugin/executor/compiler.cc17
-rw-r--r--tensorflow/compiler/plugin/executor/executable.cc15
-rw-r--r--tensorflow/compiler/plugin/executor/executor.cc14
3 files changed, 16 insertions, 30 deletions
diff --git a/tensorflow/compiler/plugin/executor/compiler.cc b/tensorflow/compiler/plugin/executor/compiler.cc
index 3a84f08c00..72fe7ba451 100644
--- a/tensorflow/compiler/plugin/executor/compiler.cc
+++ b/tensorflow/compiler/plugin/executor/compiler.cc
@@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/compiler/plugin/executor/compiler.h"
#include "tensorflow/compiler/plugin/executor/executable.h"
-
#include "tensorflow/compiler/xla/service/algebraic_simplifier.h"
#include "tensorflow/compiler/xla/service/flatten_call_graph.h"
#include "tensorflow/compiler/xla/service/hlo_constant_folding.h"
@@ -30,18 +29,15 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/inliner.h"
#include "tensorflow/compiler/xla/service/reshape_mover.h"
#include "tensorflow/compiler/xla/status_macros.h"
-
+#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/stream_executor/lib/initialize.h"
#include "tensorflow/stream_executor/lib/strcat.h"
-#include "tensorflow/core/lib/core/errors.h"
+namespace xla {
+namespace executorplugin {
namespace se = ::perftools::gputools;
namespace sep = ::perftools::gputools::executorplugin;
-namespace port = ::perftools::gputools::port;
-
-namespace xla {
-namespace executorplugin {
/*
* Run optimization passes on the module. The graph is transformed by
@@ -111,12 +107,11 @@ ExecutorCompiler::ShapeSizeBytesFunction() const {
return ExecutorExecutable::ShapeSizeBytes;
}
-
-} // namespace executorplugin
-} // namespace xla
-
REGISTER_MODULE_INITIALIZER(executor_compiler, {
xla::Compiler::RegisterCompilerFactory(sep::kExecutorPlatformId, []() {
return xla::MakeUnique<xla::executorplugin::ExecutorCompiler>();
});
});
+
+} // namespace executorplugin
+} // namespace xla
diff --git a/tensorflow/compiler/plugin/executor/executable.cc b/tensorflow/compiler/plugin/executor/executable.cc
index 79eea9af3f..4673a90e0a 100644
--- a/tensorflow/compiler/plugin/executor/executable.cc
+++ b/tensorflow/compiler/plugin/executor/executable.cc
@@ -15,18 +15,16 @@ limitations under the License.
#include "tensorflow/compiler/plugin/executor/executable.h"
#include "tensorflow/compiler/plugin/executor/executor.h"
-
-#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
-
#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/shape_util.h"
-namespace se = ::perftools::gputools;
-namespace sep = ::perftools::gputools::executorplugin;
-
namespace xla {
namespace executorplugin {
+namespace se = ::perftools::gputools;
+namespace sep = ::perftools::gputools::executorplugin;
+
ExecutorExecutable::ExecutorExecutable(std::unique_ptr<HloModule> hlo_module)
: Executable(std::move(hlo_module), ShapeSizeBytes) {}
@@ -90,15 +88,14 @@ StatusOr<se::DeviceMemoryBase> ExecutorExecutable::ExecuteOnStream(
arg_literals_ptrs.push_back(arg_literals.back().get());
// Copy in the data from the stream_executor buffers
- void* buffer = arg_literals.back().get()->MutableInternalData();
+ void* buffer = arg_literals.back()->MutableInternalData();
memcpy(buffer, arguments[p].opaque(),
ShapeUtil::ByteSizeOf(param->shape()));
}
// Execute the graph using the evaluator
HloEvaluator evaluator;
- std::unique_ptr<Literal> output;
- TF_ASSIGN_OR_RETURN(output,
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> output,
evaluator.Evaluate(computation, arg_literals_ptrs));
// Copy the result into the return buffer
diff --git a/tensorflow/compiler/plugin/executor/executor.cc b/tensorflow/compiler/plugin/executor/executor.cc
index e72c2711f7..908b996bc9 100644
--- a/tensorflow/compiler/plugin/executor/executor.cc
+++ b/tensorflow/compiler/plugin/executor/executor.cc
@@ -14,14 +14,12 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/plugin/executor/executor.h"
-#include "tensorflow/compiler/plugin/executor/platform_id.h"
-
-#include "tensorflow/compiler/xla/status_macros.h"
#include <stdlib.h>
#include <string.h>
-namespace se = ::perftools::gputools;
+#include "tensorflow/compiler/plugin/executor/platform_id.h"
+#include "tensorflow/compiler/xla/status_macros.h"
namespace perftools {
namespace gputools {
@@ -37,10 +35,7 @@ ExecutorExecutor::ExecutorExecutor(const PluginConfig &plugin_config)
ExecutorExecutor::~ExecutorExecutor() {}
-void *ExecutorExecutor::Allocate(uint64 size) {
- void *buf = new char[size];
- return buf;
-}
+void *ExecutorExecutor::Allocate(uint64 size) { return new char[size]; }
void *ExecutorExecutor::AllocateSubBuffer(DeviceMemoryBase *parent,
uint64 offset_bytes,
@@ -126,8 +121,7 @@ DeviceDescription *ExecutorExecutor::PopulateDeviceDescription() const {
builder.set_device_memory_size(static_cast<uint64>(4) * 1024 * 1024 * 1024);
builder.set_clock_rate_ghz(static_cast<float>(CLOCKS_PER_SEC) / 1e9);
- auto built = builder.Build();
- return built.release();
+ return builder.Build().release();
}
} // namespace executorplugin