diff options
author | Peter Hawkins <phawkins@google.com> | 2017-01-09 12:04:37 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-01-09 12:26:35 -0800 |
commit | 1e67c90e2caceeff82d09793d1ef5fa0300d219b (patch) | |
tree | 6567ea8b0fa01fcfcd608b7e4c636865d33c7032 /tensorflow/compiler/xla/service/cpu/compiler_functor.h | |
parent | 7ad7e4dfae4344d6b955b5eb61dc4b6bb792f1b3 (diff) |
Initial open-source release of XLA: Accelerated Linear Algebra.
XLA is a compiler-based linear algebra execution engine that targets CPUs, GPUs and custom accelerators.
XLA is still experimental; we are releasing it early to get the community involved.
Change: 143990941
Diffstat (limited to 'tensorflow/compiler/xla/service/cpu/compiler_functor.h')
-rw-r--r-- | tensorflow/compiler/xla/service/cpu/compiler_functor.h | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/tensorflow/compiler/xla/service/cpu/compiler_functor.h b/tensorflow/compiler/xla/service/cpu/compiler_functor.h new file mode 100644 index 0000000000..17dadebe97 --- /dev/null +++ b/tensorflow/compiler/xla/service/cpu/compiler_functor.h @@ -0,0 +1,69 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_COMPILER_FUNCTOR_H_ +#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_COMPILER_FUNCTOR_H_ + +#include "external/llvm/include/llvm/IR/LegacyPassManager.h" +#include "external/llvm/include/llvm/IR/Module.h" +#include "external/llvm/include/llvm/Object/ObjectFile.h" +#include "external/llvm/include/llvm/Target/TargetMachine.h" +#include "tensorflow/compiler/xla/service/cpu/disassembler.h" +#include "tensorflow/core/platform/logging.h" + +namespace xla { +namespace cpu { + +// Functor class for compiling an LLVM module down to an object file. For use by +// Orc JIT compile layer. +class CompilerFunctor { + public: + // Describes the set of vector intrinsics available to the generated code. + struct VectorIntrinsics { + bool sse_intrinsics; + bool avx_intrinsics; + }; + + // Returns a VectorIntrinsics where all intrinsics are available. + static VectorIntrinsics AllIntrinsics(); + + explicit CompilerFunctor(llvm::TargetMachine* target_machine, + const Disassembler* disassembler, int opt_level, + const VectorIntrinsics& available_intrinsics) + : target_machine_(target_machine), + disassembler_(CHECK_NOTNULL(disassembler)), + opt_level_(opt_level), + available_intrinsics_(available_intrinsics) {} + + // Compile a Module to an ObjectFile. + llvm::object::OwningBinary<llvm::object::ObjectFile> operator()( + llvm::Module& module) const; // NOLINT + + private: + // Populates the given pass managers based on the optimization level. + void AddOptimizationPasses( + llvm::legacy::PassManagerBase* module_passes, + llvm::legacy::FunctionPassManager* function_passes) const; + + llvm::TargetMachine* target_machine_; + const Disassembler* disassembler_; + const unsigned opt_level_; + const VectorIntrinsics available_intrinsics_; +}; + +} // namespace cpu +} // namespace xla + +#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_COMPILER_FUNCTOR_H_ |