diff options
author | 2018-04-17 14:26:16 -0700 | |
---|---|---|
committer | 2018-04-17 14:28:51 -0700 | |
commit | 4764bf2986e2779d5c80b5aca08d72d5c878818b (patch) | |
tree | 1f7c389ec28de120568b85670e6d364857c6d7ba /tensorflow/compiler/xla/executable_run_options.h | |
parent | 72df3d60faa8bbf42bb3f5c7ed38887215fad037 (diff) |
[StreamExecutor] Rename ::perftools::gputools -> ::stream_executor, part 1.
Step 1 of re-namespace'ing StreamExecutor into ::stream_executor.
This moves everything inside of stream_executor/..., and leaves a
namespace alias into ::perftools::gputools. The next steps will clean
up users to use the new namespace.
This is mostly a mechanical change, but it also includes a bunch of
non-mechanical changes that ideally would be split out into separate
patches. Unfortunately they all sort of need to be shoved in here for
various reasons:
- forward declarations need to be in the same namespace as the actual
types, so we need to change all forward declarations of
StreamExecutor types in this one patch.
- Uses of these forward declarations need to be changed to the new
namespace (or otherwise we need to add a namespace alias to the
relevant header, but this is pretty ugly).
- Various initialization code needs to live in StreamExecutor's "real"
namespace, so all this needs to be changed.
PiperOrigin-RevId: 193256128
Diffstat (limited to 'tensorflow/compiler/xla/executable_run_options.h')
-rw-r--r-- | tensorflow/compiler/xla/executable_run_options.h | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/tensorflow/compiler/xla/executable_run_options.h b/tensorflow/compiler/xla/executable_run_options.h index 2c1d9ffff1..1a095a82cc 100644 --- a/tensorflow/compiler/xla/executable_run_options.h +++ b/tensorflow/compiler/xla/executable_run_options.h @@ -16,29 +16,31 @@ limitations under the License. #ifndef TENSORFLOW_COMPILER_XLA_EXECUTABLE_RUN_OPTIONS_H_ #define TENSORFLOW_COMPILER_XLA_EXECUTABLE_RUN_OPTIONS_H_ -// Intentionally forward declared so that ExecutableRunOptions can be linked +// These classes are forward declared so that ExecutableRunOptions can be linked // into an XLA-compiled binary without having to link all of the pointed-to // objects (e.g., for an ahead-of-time compiled CPU binary, the gpu tools don't // need to be linked). -namespace perftools { -namespace gputools { +namespace stream_executor { class Stream; class Platform; -} -} +} // namespace stream_executor namespace tensorflow { namespace thread { class ThreadPool; -} -} +} // namespace thread +} // namespace tensorflow namespace Eigen { struct ThreadPoolDevice; -} +} // namespace Eigen namespace xla { +// TODO(b/77980417): Once the perftools::gputools -> stream_executor migration +// is complete, add "using namespace se = stream_executor" here and +// s/stream_executor/se::/ to match our idiom elsewhere. + class DeviceMemoryAllocator; class DeviceAssignment; class ExecutionProfile; @@ -61,8 +63,8 @@ class ExecutableRunOptions { // If set, this is the stream to run the computation on. The platform of the // stream must match the platform the executable was built for. A value of // nullptr indicates the option has not been set. - ExecutableRunOptions& set_stream(perftools::gputools::Stream* stream); - perftools::gputools::Stream* stream() const; + ExecutableRunOptions& set_stream(stream_executor::Stream* stream); + stream_executor::Stream* stream() const; // Sets the thread pool on which to run parallel CPU backend // computations. Does not take ownership. @@ -91,7 +93,7 @@ class ExecutableRunOptions { DeviceMemoryAllocator* allocator_ = nullptr; int device_ordinal_ = -1; DeviceAssignment* device_assignment_ = nullptr; - perftools::gputools::Stream* stream_ = nullptr; + stream_executor::Stream* stream_ = nullptr; tensorflow::thread::ThreadPool* inter_op_thread_pool_ = nullptr; const Eigen::ThreadPoolDevice* intra_op_thread_pool_ = nullptr; ExecutionProfile* execution_profile_ = nullptr; |