diff options
author | 2017-11-13 21:05:09 -0800 | |
---|---|---|
committer | 2017-11-13 21:09:38 -0800 | |
commit | b10d5b2545eb2302c370c3d472099b3afd6baba5 (patch) | |
tree | d2676588a791015e5abd3302ae36450a56eebd14 /tensorflow/compiler/xla/service/llvm_compiler.cc | |
parent | fe3a35a12304c4a6eeaffc208e3c8a006d80455f (diff) |
[XLA:CPU/GPU] Implement multi-module compilation for the CPU and GPU backends
For CPU and GPU this is a simple wrapper around the single-module Compile method
since the CPU and GPU backends do not perform cross-module optimizations and
analyses.
PiperOrigin-RevId: 175631791
Diffstat (limited to 'tensorflow/compiler/xla/service/llvm_compiler.cc')
-rw-r--r-- | tensorflow/compiler/xla/service/llvm_compiler.cc | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/tensorflow/compiler/xla/service/llvm_compiler.cc b/tensorflow/compiler/xla/service/llvm_compiler.cc new file mode 100644 index 0000000000..ba0304fb8c --- /dev/null +++ b/tensorflow/compiler/xla/service/llvm_compiler.cc @@ -0,0 +1,37 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/compiler/xla/service/llvm_compiler.h" + +namespace xla { +StatusOr<std::vector<std::unique_ptr<Executable>>> LLVMCompiler::Compile( + std::vector<std::unique_ptr<HloModule>> modules, + std::vector<std::vector<perftools::gputools::StreamExecutor*>> + stream_execs) { + std::vector<std::unique_ptr<Executable>> result; + for (size_t i = 0; i < modules.size(); i++) { + if (stream_execs[i].size() != 1) { + return Unimplemented( + "Model partitioning not implemented for the CPU/GPU compilers!"); + } + + TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable, + Compile(std::move(modules[i]), stream_execs[i][0])); + result.push_back(std::move(executable)); + } + + return {std::move(result)}; +} +} // namespace xla |