aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/compiler/plugin/executor/transfer_manager.cc3
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.cc4
-rw-r--r--tensorflow/compiler/xla/literal_util.cc2
-rw-r--r--tensorflow/compiler/xla/literal_util.h6
-rw-r--r--tensorflow/compiler/xla/service/cpu_transfer_manager.cc8
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.cc2
6 files changed, 8 insertions, 17 deletions
diff --git a/tensorflow/compiler/plugin/executor/transfer_manager.cc b/tensorflow/compiler/plugin/executor/transfer_manager.cc
index 51c5deeea5..32d6a0c04d 100644
--- a/tensorflow/compiler/plugin/executor/transfer_manager.cc
+++ b/tensorflow/compiler/plugin/executor/transfer_manager.cc
@@ -75,8 +75,7 @@ Status ExecutorTransferManager::TransferLiteralFromDevice(
executor, source, ShapeUtil::ByteSizeOf(device_shape),
literal->MutableInternalData()));
if (!ShapeUtil::Equal(literal_shape, device_shape)) {
- literal->Swap(
- literal->Relayout(literal_shape.layout()).get());
+ *literal = std::move(*literal->Relayout(literal_shape.layout()));
}
TF_RET_CHECK(ShapeUtil::Equal(literal_shape, literal->shape()));
return Status::OK();
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
index e36eafa6e4..459d9edb87 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
@@ -126,7 +126,7 @@ Status XlaOpKernelContext::ConstantInputReshaped(
"Error evaluating ", context_->op_kernel().name(), " input ", index,
": ", computed.status().error_message());
}
- constant_literal->Swap(computed.ValueOrDie().get());
+ *constant_literal = std::move(*computed.ValueOrDie());
return Status::OK();
}
@@ -195,7 +195,7 @@ Status XlaOpKernelContext::ConstantInputAsInt64Literal(int index,
return Status::OK();
case xla::S64:
- out->Swap(&literal);
+ *out = std::move(literal);
return Status::OK();
default:
diff --git a/tensorflow/compiler/xla/literal_util.cc b/tensorflow/compiler/xla/literal_util.cc
index 6190bd624d..d19e28cdb1 100644
--- a/tensorflow/compiler/xla/literal_util.cc
+++ b/tensorflow/compiler/xla/literal_util.cc
@@ -662,7 +662,7 @@ string Literal::ToString() const {
std::vector<Shape> shape;
for (auto& tuple_element : elements) {
shape.push_back(tuple_element->shape());
- literal->add_tuple_literals()->Swap(tuple_element.get());
+ *literal->add_tuple_literals() = std::move(*tuple_element);
}
*literal->mutable_shape() = ShapeUtil::MakeTupleShape(shape);
return literal;
diff --git a/tensorflow/compiler/xla/literal_util.h b/tensorflow/compiler/xla/literal_util.h
index 6451345918..ccbc09af88 100644
--- a/tensorflow/compiler/xla/literal_util.h
+++ b/tensorflow/compiler/xla/literal_util.h
@@ -166,12 +166,6 @@ class Literal {
const Shape& shape() const { return shape_; }
Shape* mutable_shape() { return &shape_; }
- void Swap(Literal* other) {
- Literal temp = *this;
- *this = *other;
- *other = temp;
- }
-
// Creates a new literal of a given rank. To minimize ambiguity (for users
// and the compiler) these CreateR[0-2] methods should explicitly specify the
// native type. For example:
diff --git a/tensorflow/compiler/xla/service/cpu_transfer_manager.cc b/tensorflow/compiler/xla/service/cpu_transfer_manager.cc
index bf43c04ae2..b1b0cfdbe7 100644
--- a/tensorflow/compiler/xla/service/cpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/cpu_transfer_manager.cc
@@ -182,9 +182,8 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
tensorflow::gtl::ArraySlice<int64> dimensions(
tensorflow::bit_cast<const int64*>(literal_shape.dimensions().data()),
literal_shape.dimensions().size());
- auto empty =
- Literal::CreateFromDimensions(literal_shape.element_type(), dimensions);
- literal->Swap(empty.get());
+ *literal = std::move(*Literal::CreateFromDimensions(
+ literal_shape.element_type(), dimensions));
TF_ASSIGN_OR_RETURN(Shape received_shape,
TransferArrayBufferFromOutfeed(
executor, literal->MutableInternalData(), size));
@@ -235,8 +234,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
for (int64 i = 0; i < literal_shape.tuple_shapes_size(); ++i) {
*elements[i]->mutable_shape() = received_shape.tuple_shapes(i);
}
- auto result = Literal::MakeTupleOwned(std::move(elements));
- literal->Swap(result.get());
+ *literal = std::move(*Literal::MakeTupleOwned(std::move(elements)));
TF_RET_CHECK(ShapeUtil::Equal(literal->shape(), literal_shape));
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index 69195c45ed..5cae034d08 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -87,7 +87,7 @@ Status GenericTransferManager::TransferLiteralFromDevice(
executor, source, /*size=*/ShapeUtil::ByteSizeOf(device_shape),
/*destination=*/literal->MutableInternalData()));
if (!ShapeUtil::Equal(literal_shape, device_shape)) {
- literal->Swap(literal->Relayout(literal_shape.layout()).get());
+ *literal = std::move(*literal->Relayout(literal_shape.layout()));
}
TF_RET_CHECK(ShapeUtil::Equal(literal_shape, literal->shape()));
return Status::OK();