aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/distributed_runtime
diff options
context:
space:
mode:
authorGravatar Noah Eisen <ncteisen@google.com>2018-06-20 14:58:02 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-20 15:02:01 -0700
commiteacbaabf6d0983d61c99e1bb17658cd80a24f1ee (patch)
treedeaeb91c53003480e4b350b2e01bc68cd59cf7e5 /tensorflow/core/distributed_runtime
parent2cd247d20422a41c33e0f4be265eba2df537ed3b (diff)
Rename tensor_data_is_large to share_tensor_slice_memory
PiperOrigin-RevId: 201422113
Diffstat (limited to 'tensorflow/core/distributed_runtime')
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc24
1 files changed, 16 insertions, 8 deletions
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
index d0684f1833..159435fd7d 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
@@ -26,6 +26,8 @@ limitations under the License.
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/protobuf/worker.pb.h"
+// (Omitted internal-only flag)
+
namespace tensorflow {
namespace grpc {
@@ -168,15 +170,20 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
(header.size() +
VarLengthEncodingSize(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize));
- // If "tensor_data_is_large == false", we copy the tensor data to the
- // end of the buffer we are preparing that holds the rest of the
+ // If "share_tensor_slice_memory == false", we copy the tensor data to
+ // the end of the buffer we are preparing that holds the rest of the
// RecvTensorResponse protocol buffer.
//
- // If "tensor_data_is_large == true", we arrange to share the backing
- // store of the data by creating a slice that also points to the
+ // If "share_tensor_slice_memory == true", we arrange to share the
+ // backing store of the data by creating a slice that also points to the
// backing store, with appropriate reference counts to keep the
// backing store alive as needed.
- bool tensor_data_is_large = (tdata.size() > kLargeTensorBytes);
+ //
+ // We enable this behavior if the tensor is large.
+ bool share_tensor_slice_memory = (tdata.size() > kLargeTensorBytes);
+
+ // (Omitted internal-only conditional)
+
size_t encoder_size = expected_size - tdata.size();
// Encode all but the actual "tdata", but including the tag and
@@ -201,10 +208,11 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
::grpc::Slice slices[2];
int num_slices = 0;
{
- size_t slice_len = e.size() + (tensor_data_is_large ? 0 : tdata.size());
+ size_t slice_len =
+ e.size() + (share_tensor_slice_memory ? 0 : tdata.size());
slices[0] = ::grpc::Slice(slice_len);
memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size());
- if (!tensor_data_is_large) {
+ if (!share_tensor_slice_memory) {
// (E)
memcpy(const_cast<uint8_t*>(slices[0].begin()) + e.size(), tdata.data(),
tdata.size());
@@ -212,7 +220,7 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
num_slices += 1;
}
- if (tensor_data_is_large) {
+ if (share_tensor_slice_memory) {
// (E) Encode tensor data, but by sharing backing store
const TensorBuffer* buf = DMAHelper::buffer(&val);
buf->Ref();