diff options
author | Justin Lebar <jlebar@google.com> | 2018-04-17 21:04:35 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-04-17 21:07:05 -0700 |
commit | d77a621a571d8ab0d69f2682586674e6dff4ec4e (patch) | |
tree | 08b29db3c5889725a596b25928aa2ea098be042a /tensorflow/compiler/xla/service/backend.h | |
parent | 41e2cd187b31e9e6d88bc042e21e73f7be0ed729 (diff) |
[XLA] Convert XLA to use xla::se as a namespace alias for ::stream_executor.
PiperOrigin-RevId: 193301997
Diffstat (limited to 'tensorflow/compiler/xla/service/backend.h')
-rw-r--r-- | tensorflow/compiler/xla/service/backend.h | 34 |
1 files changed, 14 insertions, 20 deletions
diff --git a/tensorflow/compiler/xla/service/backend.h b/tensorflow/compiler/xla/service/backend.h index b5ca483b72..d32a0a400d 100644 --- a/tensorflow/compiler/xla/service/backend.h +++ b/tensorflow/compiler/xla/service/backend.h @@ -44,8 +44,8 @@ namespace xla { class BackendOptions { public: // Set the platform backing the backend, or nullptr for the default platform. - BackendOptions& set_platform(perftools::gputools::Platform* platform); - perftools::gputools::Platform* platform() const; + BackendOptions& set_platform(se::Platform* platform); + se::Platform* platform() const; // Sets the thread pool size for parallel execution of an individual operator. // The default value of -1 will result in initializing the thread pool with @@ -54,7 +54,7 @@ class BackendOptions { int intra_op_parallelism_threads() const; private: - perftools::gputools::Platform* platform_ = nullptr; + se::Platform* platform_ = nullptr; int intra_op_parallelism_threads_ = -1; }; @@ -66,7 +66,7 @@ class BackendOptions { // StreamPtr stream = backend->BorrowStream().ConsumeValueOrDie(); class Backend { public: - using StreamPtr = Pool<perftools::gputools::Stream>::SmartPtr; + using StreamPtr = Pool<se::Stream>::SmartPtr; // Creates a new backend. static StatusOr<std::unique_ptr<Backend>> CreateBackend( @@ -79,7 +79,7 @@ class Backend { ~Backend(); // Accessors for the various objects. - perftools::gputools::Platform* platform() const { return platform_; } + se::Platform* platform() const { return platform_; } Compiler* compiler() const { return compiler_; } DeviceMemoryAllocator* memory_allocator() const { return memory_allocator_.get(); @@ -96,19 +96,17 @@ class Backend { // Returns stream executors of all supported devices for this backend. The // executors are ordered by the device ordinal. - const std::vector<perftools::gputools::StreamExecutor*>& stream_executors() - const { + const std::vector<se::StreamExecutor*>& stream_executors() const { return stream_executors_; } // Returns the stream executor for the given device ordinal. - StatusOr<perftools::gputools::StreamExecutor*> stream_executor( - int device_ordinal) const; + StatusOr<se::StreamExecutor*> stream_executor(int device_ordinal) const; // Returns the stream executor for the default device ordinal. This stream // executor can only be used when the number of computations is 1 (replication // can be > 1). - perftools::gputools::StreamExecutor* default_stream_executor() const { + se::StreamExecutor* default_stream_executor() const { CHECK(!stream_executors_.empty()); return stream_executors_[0]; } @@ -117,8 +115,7 @@ class Backend { // internal pool, or by constructing/initializating it, and returns the result // to the caller. StatusOr<StreamPtr> BorrowStream(int device_ordinal); - StatusOr<StreamPtr> BorrowStream( - perftools::gputools::StreamExecutor* executor); + StatusOr<StreamPtr> BorrowStream(se::StreamExecutor* executor); // Returns a function to borrow a stream, as `BorrowStream` above does. // Purely for convenience, the caller could rather make this anonymous @@ -157,29 +154,26 @@ class Backend { private: struct EigenThreadPoolWrapper; - Backend(perftools::gputools::Platform* platform, Compiler* compiler, - tensorflow::gtl::ArraySlice<perftools::gputools::StreamExecutor*> - stream_executors, + Backend(se::Platform* platform, Compiler* compiler, + tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors, TransferManager* transfer_manager, ComputationPlacer* computation_placer, int intra_op_parallelism_threads); Backend(const Backend&) = delete; Backend& operator=(const Backend&) = delete; - perftools::gputools::Platform* platform_; + se::Platform* platform_; Compiler* compiler_; TransferManager* transfer_manager_; ComputationPlacer* computation_placer_; // Vector of stream executors. stream_executors_[0] is the default executor. - std::vector<perftools::gputools::StreamExecutor*> stream_executors_; + std::vector<se::StreamExecutor*> stream_executors_; tensorflow::mutex mu_; // Mapping from stream executor to stream pools, used by `BorrowStream` above. - std::map<perftools::gputools::StreamExecutor*, - Pool<perftools::gputools::Stream>> - stream_pools_ GUARDED_BY(mu_); + std::map<se::StreamExecutor*, Pool<se::Stream>> stream_pools_ GUARDED_BY(mu_); // The default memory allocator to use. std::unique_ptr<StreamExecutorMemoryAllocator> memory_allocator_; |