diff options
author | Mingxing Tan <tanmingxing@google.com> | 2018-06-21 12:03:01 -0700 |
---|---|---|
committer | Mingxing Tan <tanmingxing@google.com> | 2018-06-21 12:03:01 -0700 |
commit | ba86a8ed1e2b1617f40f25ad0107e8448e9e0848 (patch) | |
tree | b9fed8c18eab093ec13279e2195d4137c7f4ada1 /tensorflow/core/distributed_runtime | |
parent | 9d2d40079c273e8de8644136b452715c0146b907 (diff) | |
parent | 7b4080564c268a54a5c0b877b28e67faaadff268 (diff) |
Merge commit for internal changes
Diffstat (limited to 'tensorflow/core/distributed_runtime')
-rw-r--r-- | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | 24 |
1 files changed, 16 insertions, 8 deletions
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc index d0684f1833..159435fd7d 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc @@ -26,6 +26,8 @@ limitations under the License. #include "tensorflow/core/platform/env.h" #include "tensorflow/core/protobuf/worker.pb.h" +// (Omitted internal-only flag) + namespace tensorflow { namespace grpc { @@ -168,15 +170,20 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, (header.size() + VarLengthEncodingSize(RecvTensorResponse::kTensorFieldNumber, overall_tensor_proto_bytesize)); - // If "tensor_data_is_large == false", we copy the tensor data to the - // end of the buffer we are preparing that holds the rest of the + // If "share_tensor_slice_memory == false", we copy the tensor data to + // the end of the buffer we are preparing that holds the rest of the // RecvTensorResponse protocol buffer. // - // If "tensor_data_is_large == true", we arrange to share the backing - // store of the data by creating a slice that also points to the + // If "share_tensor_slice_memory == true", we arrange to share the + // backing store of the data by creating a slice that also points to the // backing store, with appropriate reference counts to keep the // backing store alive as needed. - bool tensor_data_is_large = (tdata.size() > kLargeTensorBytes); + // + // We enable this behavior if the tensor is large. + bool share_tensor_slice_memory = (tdata.size() > kLargeTensorBytes); + + // (Omitted internal-only conditional) + size_t encoder_size = expected_size - tdata.size(); // Encode all but the actual "tdata", but including the tag and @@ -201,10 +208,11 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, ::grpc::Slice slices[2]; int num_slices = 0; { - size_t slice_len = e.size() + (tensor_data_is_large ? 0 : tdata.size()); + size_t slice_len = + e.size() + (share_tensor_slice_memory ? 0 : tdata.size()); slices[0] = ::grpc::Slice(slice_len); memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size()); - if (!tensor_data_is_large) { + if (!share_tensor_slice_memory) { // (E) memcpy(const_cast<uint8_t*>(slices[0].begin()) + e.size(), tdata.data(), tdata.size()); @@ -212,7 +220,7 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, num_slices += 1; } - if (tensor_data_is_large) { + if (share_tensor_slice_memory) { // (E) Encode tensor data, but by sharing backing store const TensorBuffer* buf = DMAHelper::buffer(&val); buf->Ref(); |