aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/transfer_manager.cc
diff options
context:
space:
mode:
authorGravatar Justin Lebar <jlebar@google.com>2018-04-17 21:04:35 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-17 21:07:05 -0700
commitd77a621a571d8ab0d69f2682586674e6dff4ec4e (patch)
tree08b29db3c5889725a596b25928aa2ea098be042a /tensorflow/compiler/xla/service/transfer_manager.cc
parent41e2cd187b31e9e6d88bc042e21e73f7be0ed729 (diff)
[XLA] Convert XLA to use xla::se as a namespace alias for ::stream_executor.
PiperOrigin-RevId: 193301997
Diffstat (limited to 'tensorflow/compiler/xla/service/transfer_manager.cc')
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.cc19
1 files changed, 7 insertions, 12 deletions
diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc
index 2f36e2b16e..be8231b73c 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/transfer_manager.cc
@@ -25,24 +25,20 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
-namespace se = ::perftools::gputools;
-
namespace xla {
/* static */ tensorflow::mutex
TransferManager::platform_transfer_manager_mutex_(
tensorflow::LINKER_INITIALIZED);
-/* static */ std::map<perftools::gputools::Platform::Id,
- TransferManager::State>*
+/* static */ std::map<se::Platform::Id, TransferManager::State>*
TransferManager::GetPlatformTransferManagers() {
- static auto* r =
- new std::map<perftools::gputools::Platform::Id, TransferManager::State>;
+ static auto* r = new std::map<se::Platform::Id, TransferManager::State>;
return r;
}
Status TransferManager::TransferArrayToDevice(
- perftools::gputools::StreamExecutor* executor, const Literal& literal,
- const perftools::gputools::DeviceMemoryBase& dest) {
+ se::StreamExecutor* executor, const Literal& literal,
+ const se::DeviceMemoryBase& dest) {
const Shape on_device_shape = HostShapeToDeviceShape(literal.shape());
TF_RET_CHECK(ShapeUtil::IsArray(on_device_shape))
<< "On-device representation of "
@@ -61,8 +57,8 @@ Status TransferManager::TransferArrayToDevice(
}
StatusOr<std::unique_ptr<Literal>> TransferManager::TransferArrayFromDevice(
- perftools::gputools::StreamExecutor* executor, const Shape& shape,
- const perftools::gputools::DeviceMemoryBase& source) {
+ se::StreamExecutor* executor, const Shape& shape,
+ const se::DeviceMemoryBase& source) {
TF_RET_CHECK(ShapeUtil::Equal(HostShapeToDeviceShape(shape), shape))
<< "Shape " << ShapeUtil::HumanString(shape)
<< " has a differently shaped representation on-device: "
@@ -112,8 +108,7 @@ StatusOr<std::unique_ptr<Literal>> TransferManager::TransferArrayFromDevice(
}
Status TransferManager::WriteTupleIndexTables(
- perftools::gputools::StreamExecutor* executor,
- const ShapedBuffer& device_buffer) {
+ se::StreamExecutor* executor, const ShapedBuffer& device_buffer) {
VLOG(2) << "Writing tuple index tables for " << device_buffer;
TF_RET_CHECK(executor->device_ordinal() == device_buffer.device_ordinal());