aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc')
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc26
1 files changed, 13 insertions, 13 deletions
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
index df226a92a5..9121303b0e 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc
@@ -38,9 +38,9 @@ static void unref_tensorbuffer(void* raw) {
void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
::grpc::ByteBuffer* result) {
size_t len = proto.ByteSizeLong();
- gpr_slice s = gpr_slice_malloc(len);
+ grpc_slice s = grpc_slice_malloc(len);
proto.SerializeWithCachedSizesToArray(
- reinterpret_cast<uint8*>(GPR_SLICE_START_PTR(s)));
+ reinterpret_cast<uint8*>(GRPC_SLICE_START_PTR(s)));
::grpc::Slice slice(s, ::grpc::Slice::STEAL_REF);
*result = ::grpc::ByteBuffer(&slice, 1);
}
@@ -68,12 +68,12 @@ void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
// E: <actual data for val's representation>
//
// If the tensor data is up to "kLargeTensorBytes", then A
-// through E will all be encoded into "*result" in a single gpr_slice.
+// through E will all be encoded into "*result" in a single grpc_slice.
//
// If the tensor data is larger than "kLargeTensorBytes", then A through
-// D2 will be encoded in one gpr_slice, and E will be encoded in a second
-// gpr_slice that points to the backing store for the tensor data, to avoid
-// copying the tensor data (and the gpr_slice setup will be arrange so as
+// D2 will be encoded in one grpc_slice, and E will be encoded in a second
+// grpc_slice that points to the backing store for the tensor data, to avoid
+// copying the tensor data (and the grpc_slice setup will be arrange so as
// to dereference the underlying tensor data buffer when it is no longer
// needed in the "*result" ByteBuffer).
static int VarLengthEncodingSize(uint32 tag, size_t bytes) {
@@ -209,11 +209,11 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
int num_slices = 0;
{
size_t slice_len = e.size() + (tensor_data_is_large ? 0 : tdata.size());
- gpr_slice s0 = gpr_slice_malloc(slice_len);
- memcpy(GPR_SLICE_START_PTR(s0), e.data(), e.size());
+ grpc_slice s0 = grpc_slice_malloc(slice_len);
+ memcpy(GRPC_SLICE_START_PTR(s0), e.data(), e.size());
if (!tensor_data_is_large) {
// (E)
- memcpy(GPR_SLICE_START_PTR(s0) + e.size(), tdata.data(), tdata.size());
+ memcpy(GRPC_SLICE_START_PTR(s0) + e.size(), tdata.data(), tdata.size());
}
slices[0] = ::grpc::Slice(s0, ::grpc::Slice::STEAL_REF);
num_slices += 1;
@@ -230,7 +230,7 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
// hypothetical grpc_slice-related changes (e.g. the
// implementation could decide to destroy 0-length slices
// eagerly). In practice, this does not happen with the current
- // implementation, and the gpr_slice interface at the moment does
+ // implementation, and the grpc_slice interface at the moment does
// not allow us to do the Tensor-unreferencing in the right way
// (since the Tensor pointer is different than the backing store
// array pointer).
@@ -245,13 +245,13 @@ void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val,
const TensorBuffer* buf = DMAHelper::buffer(&val);
buf->Ref();
- gpr_slice s1 = gpr_slice_new(
+ grpc_slice s1 = grpc_slice_new(
const_cast<void*>(static_cast<const void*>(tdata.data())),
tdata.size(), do_nothing);
slices[1] = ::grpc::Slice(s1, ::grpc::Slice::STEAL_REF);
- gpr_slice s2 =
- gpr_slice_new(const_cast<TensorBuffer*>(buf), 0, unref_tensorbuffer);
+ grpc_slice s2 =
+ grpc_slice_new(const_cast<TensorBuffer*>(buf), 0, unref_tensorbuffer);
slices[2] = ::grpc::Slice(s2, ::grpc::Slice::STEAL_REF);
num_slices += 2;
}