aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/transfer_manager.cc
diff options
context:
space:
mode:
authorGravatar Justin Lebar <jlebar@google.com>2018-04-19 17:18:10 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-19 17:22:23 -0700
commit4e17a3f1496b398afe632b002b0589b7346b2e3f (patch)
tree3fadd04a5e74f698e2bd2c2e2a5cc21f230b9b51 /tensorflow/compiler/xla/service/transfer_manager.cc
parent2d8da1d12a5fbeaa99e1cdd761b735a02020611b (diff)
[XLA] De-unique_ptr-ify ShapedBuffer and ScopedShapedBuffer.
These are already notionally equivalent to T* and unique_ptr<T>, so having a unique_ptr of a {Scoped,}ShapedBuffer is pretty redundant. Also clean up the ScopedShapedBuffer API a bit. PiperOrigin-RevId: 193599773
Diffstat (limited to 'tensorflow/compiler/xla/service/transfer_manager.cc')
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.cc21
1 files changed, 10 insertions, 11 deletions
diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc
index be8231b73c..98d0111d04 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/transfer_manager.cc
@@ -175,7 +175,7 @@ Status TransferManager::TransferBufferToDevice(
return Status::OK();
}
-StatusOr<std::unique_ptr<ShapedBuffer>> TransferManager::AllocateShapedBuffer(
+StatusOr<ShapedBuffer> TransferManager::AllocateShapedBuffer(
const Shape& on_host_shape, DeviceMemoryAllocator* allocator,
int device_ordinal) {
if (!LayoutUtil::HasLayout(on_host_shape)) {
@@ -187,31 +187,30 @@ StatusOr<std::unique_ptr<ShapedBuffer>> TransferManager::AllocateShapedBuffer(
const Shape on_device_shape = HostShapeToDeviceShape(on_host_shape);
TF_RET_CHECK(LayoutUtil::HasLayout(on_device_shape));
- auto shaped_buffer = WrapUnique(new ShapedBuffer(
- on_host_shape, on_device_shape, allocator->platform(), device_ordinal));
+ ShapedBuffer shaped_buffer(on_host_shape, on_device_shape,
+ allocator->platform(), device_ordinal);
// Allocate an appropriate sized buffer for each element in the shape
// including the tuple pointer arrays.
- for (auto& pair : shaped_buffer->buffers()) {
+ for (auto& pair : shaped_buffer.buffers()) {
const ShapeIndex& index = pair.first;
se::DeviceMemoryBase& memory_base = pair.second;
const Shape& subshape = ShapeUtil::GetSubshape(on_device_shape, index);
TF_ASSIGN_OR_RETURN(memory_base,
- allocator->Allocate(shaped_buffer->device_ordinal(),
+ allocator->Allocate(shaped_buffer.device_ordinal(),
GetByteSizeRequirement(subshape)));
}
return std::move(shaped_buffer);
}
-StatusOr<std::unique_ptr<ScopedShapedBuffer>>
-TransferManager::AllocateScopedShapedBuffer(const Shape& on_host_shape,
- DeviceMemoryAllocator* allocator,
- int device_ordinal) {
+StatusOr<ScopedShapedBuffer> TransferManager::AllocateScopedShapedBuffer(
+ const Shape& on_host_shape, DeviceMemoryAllocator* allocator,
+ int device_ordinal) {
TF_ASSIGN_OR_RETURN(
- std::unique_ptr<ShapedBuffer> unscoped_buffer,
+ ShapedBuffer unscoped_buffer,
AllocateShapedBuffer(on_host_shape, allocator, device_ordinal));
- return ScopedShapedBuffer::MakeScoped(unscoped_buffer.get(), allocator);
+ return ScopedShapedBuffer(std::move(unscoped_buffer), allocator);
}
} // namespace xla