From 26b83da42fb47015aabd6ba1aa8e6d41ff8763dc Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Thu, 8 Mar 2018 23:28:53 -0800 Subject: Remove a layer of templatization With this change - HloTestBase always calls HloRunner with an array of non-owning Literal pointers as arguments - HloRunner no longer has a general LiteralPtr, but just provides explicit overloads for std::unique_ptr and Literal* This was prompted by a dependent change that needs to call HloTestBase::RunAndCompare with Literal* arguments. PiperOrigin-RevId: 188446331 --- tensorflow/compiler/xla/service/hlo_runner.h | 37 +++++++++++----------------- 1 file changed, 15 insertions(+), 22 deletions(-) (limited to 'tensorflow/compiler/xla/service/hlo_runner.h') diff --git a/tensorflow/compiler/xla/service/hlo_runner.h b/tensorflow/compiler/xla/service/hlo_runner.h index cbaebc68be..06ce22a5b9 100644 --- a/tensorflow/compiler/xla/service/hlo_runner.h +++ b/tensorflow/compiler/xla/service/hlo_runner.h @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/compiler/xla/status_macros.h" #include "tensorflow/compiler/xla/statusor.h" #include "tensorflow/compiler/xla/types.h" +#include "tensorflow/compiler/xla/util.h" #include "tensorflow/compiler/xla/xla_data.pb.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/stream_executor_no_cuda.h" @@ -64,17 +65,27 @@ class HloRunner { const std::string& filename, const DebugOptions& debug_options); // Executes the given module with given literals as input and returns the - // result as a Literal. The LiteralPtr type accepts Literal* or - // std::unique_ptr. + // result as a Literal. // // If run_hlo_passes is false, the module will be executed without Hlo // optimization. - template StatusOr> Execute( std::unique_ptr module, - const tensorflow::gtl::ArraySlice arguments, + const tensorflow::gtl::ArraySlice arguments, bool run_hlo_passes = true); + StatusOr> Execute( + std::unique_ptr module, + const tensorflow::gtl::ArraySlice> arguments, + bool run_hlo_passes = true) { + // Construct a vector of plain pointers for the arguments. + std::vector argument_pointers; + c_transform( + arguments, std::back_inserter(argument_pointers), + [](const std::unique_ptr& literal) { return literal.get(); }); + return Execute(std::move(module), argument_pointers, run_hlo_passes); + } + // If backend is not created in the constructor, creates and returns the // default backend. If creation fails, crashes the program. // @@ -83,11 +94,6 @@ class HloRunner { Backend& backend(); private: - StatusOr> ExecuteInternal( - std::unique_ptr module, - const tensorflow::gtl::ArraySlice arguments, - bool run_hlo_passes = true); - struct EigenThreadPoolWrapper; std::unique_ptr thread_pool_wrapper_; @@ -95,19 +101,6 @@ class HloRunner { std::unique_ptr backend_; }; -template -StatusOr> HloRunner::Execute( - std::unique_ptr module, - const tensorflow::gtl::ArraySlice arguments, - bool run_hlo_passes) { - // Construct a vector of plain pointers for the arguments. - std::vector argument_pointers; - for (const auto& argument : arguments) { - argument_pointers.push_back(&*argument); - } - return ExecuteInternal(std::move(module), argument_pointers, run_hlo_passes); -} - } // namespace xla #endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_RUNNER_H_ -- cgit v1.2.3