aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-10-04 10:10:58 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-04 10:14:59 -0700
commit5e1b45d0a8aa3f268745cdc683c26d9ebdd1ea8b (patch)
tree5a1a6ea4219e82fdef746560ea0a1aae416528d7 /tensorflow/compiler
parentc2552cd33c05fa84f280e766e33ba01308ffbcb2 (diff)
Automated rollback of commit f22037abf5a6f4581f5fb6013f72f91747f22965
PiperOrigin-RevId: 215757701
Diffstat (limited to 'tensorflow/compiler')
-rw-r--r--tensorflow/compiler/jit/xla_device_context.cc15
-rw-r--r--tensorflow/compiler/jit/xla_device_context.h3
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.cc2
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.h7
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.h16
5 files changed, 10 insertions, 33 deletions
diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc
index e083652978..af83c792e5 100644
--- a/tensorflow/compiler/jit/xla_device_context.cc
+++ b/tensorflow/compiler/jit/xla_device_context.cc
@@ -75,9 +75,8 @@ XlaTransferManager::XlaTransferManager(
}
}
-Status XlaTransferManager::TransferLiteralToDevice(const Tensor& host_tensor,
- Tensor* device_tensor,
- bool buffer_is_fresh) const {
+Status XlaTransferManager::TransferLiteralToDevice(
+ const Tensor& host_tensor, Tensor* device_tensor) const {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(host_tensor.dtype(),
host_tensor.shape(), &xla_shape));
@@ -98,11 +97,8 @@ Status XlaTransferManager::TransferLiteralToDevice(const Tensor& host_tensor,
// synchronized.
host_to_device_stream_->ThenWaitFor(stream_.get());
}
- xla::TransferManager::TransferToDeviceHint hint =
- buffer_is_fresh ? xla::TransferManager::kBufferUndefined
- : xla::TransferManager::kNoHint;
TF_RETURN_IF_ERROR(transfer_manager_->TransferLiteralToDeviceAsync(
- host_to_device_stream_.get(), *literal, shaped_buffer, hint));
+ host_to_device_stream_.get(), *literal, shaped_buffer));
if (UseMultipleStreams()) {
auto event = std::make_shared<se::Event>(stream_->parent());
TF_RET_CHECK(event->Init()) << "Event failed to initialize!";
@@ -169,7 +165,6 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
return;
}
TensorShape shape = shape_or_status.ValueOrDie();
- bool buffer_is_fresh = false;
if (!xla_tensor->has_shaped_buffer()) {
Status s =
xla_tensor->AllocateShapedBuffer(device_tensor->dtype(), shape, client_,
@@ -178,7 +173,6 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
done(s);
return;
}
- buffer_is_fresh = true;
}
Status status;
@@ -189,8 +183,7 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
"Tensor::CopyFrom failed when copying from CPU to XLA device"));
return;
}
- status = TransferLiteralToDevice(reshaped_cpu_tensor, device_tensor,
- buffer_is_fresh);
+ status = TransferLiteralToDevice(reshaped_cpu_tensor, device_tensor);
} else {
se::DeviceMemoryBase dev_dst_ptr =
XlaTensor::DeviceMemoryFromTensor(*device_tensor);
diff --git a/tensorflow/compiler/jit/xla_device_context.h b/tensorflow/compiler/jit/xla_device_context.h
index a4c0c296fc..df82421294 100644
--- a/tensorflow/compiler/jit/xla_device_context.h
+++ b/tensorflow/compiler/jit/xla_device_context.h
@@ -67,8 +67,7 @@ class XlaTransferManager {
private:
Status TransferLiteralToDevice(const Tensor& host_tensor,
- Tensor* device_tensor,
- bool buffer_is_fresh) const;
+ Tensor* device_tensor) const;
void TransferLiteralFromDevice(Tensor* host_tensor,
const Tensor& device_tensor,
const StatusCallback& done) const;
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index f92fde7f46..bec02e14f9 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -98,7 +98,7 @@ Status GenericTransferManager::TransferLiteralFromDeviceInternal(
Status GenericTransferManager::TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
- const ShapedBuffer& device_buffer, TransferToDeviceHint /*hint*/) {
+ const ShapedBuffer& device_buffer) {
const Shape& shape = literal.shape();
VLOG(2) << "transferring literal shape to device: "
<< ShapeUtil::HumanString(shape)
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index b1cba82b9f..86c8b1c145 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -45,10 +45,9 @@ class GenericTransferManager : public TransferManager {
MutableBorrowingLiteral literal,
std::function<void(Status)> done) override;
- Status TransferLiteralToDeviceAsync(se::Stream* stream,
- const LiteralSlice& literal,
- const ShapedBuffer& device_buffer,
- TransferToDeviceHint hint) override;
+ Status TransferLiteralToDeviceAsync(
+ se::Stream* stream, const LiteralSlice& literal,
+ const ShapedBuffer& device_buffer) override;
Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h
index 9199e32d0f..f952e64af2 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.h
+++ b/tensorflow/compiler/xla/service/transfer_manager.h
@@ -89,16 +89,6 @@ class TransferManager {
const LiteralSlice& literal,
const ShapedBuffer& device_buffer);
- // Hint type given to TransferLiteralToDeviceAsync.
- enum TransferToDeviceHint {
- // No hint available.
- kNoHint,
-
- // The destination buffer is undefined on the device, meaning it can be
- // transferred to eagerly rather than waiting for Stream ordering.
- kBufferUndefined,
- };
-
// Transfers the given literal into the previously allocated device memory
// represented by the given ShapedBuffer using the given executor. The shape
// of the ShapedBuffer and DeviceShape(literal.shape()) must be compatible,
@@ -106,13 +96,9 @@ class TransferManager {
//
// This operation is performed asynchronously on the given stream. It returns
// once the transfer is enqueued.
- //
- // The optional hint can allow implementations to optimize transfers. It is
- // not mandatory for an implementation to obey the hint.
virtual Status TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
- const ShapedBuffer& device_buffer,
- TransferToDeviceHint hint = kNoHint) = 0;
+ const ShapedBuffer& device_buffer) = 0;
// Convenience methods for transferring an array to or from the device at a
// known address. This avoids having to construct a ShapedBuffer just to