diff options
Diffstat (limited to 'tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc')
-rw-r--r-- | tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc index fdac89eb24..f51656d565 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc @@ -23,7 +23,9 @@ limitations under the License. #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/dma_helper.h" +#if GOOGLE_CUDA #include "tensorflow/core/common_runtime/gpu/gpu_util.h" +#endif // GOOGLE_CUDA #include "tensorflow/core/common_runtime/local_device.h" #include "tensorflow/core/common_runtime/process_util.h" #include "tensorflow/core/common_runtime/step_stats_collector.h" @@ -429,10 +431,12 @@ class GrpcWorkerService : public AsyncServiceInterface { // device type*. // const size_t bytes = is_dead ? 0 : val.TotalBytes(); const bool on_host = send_args.alloc_attrs.on_host(); - const DeviceContext* send_dev_context = send_args.device_context; { // Non-DMA cases. if (src_dev->tensorflow_gpu_device_info() && (!on_host)) { +#if GOOGLE_CUDA + const DeviceContext* send_dev_context = + send_args.device_context; RecvTensorResponse* tmp = new RecvTensorResponse; tmp->set_is_dead(is_dead); CHECK(send_dev_context) @@ -460,6 +464,10 @@ class GrpcWorkerService : public AsyncServiceInterface { GPUUtil::SetProtoFromGPU(val, src_dev, send_dev_context, tmp->mutable_tensor(), is_dead, response_ready); +#else + call->SendResponse(ToGrpcStatus( + errors::Internal("No GPU device in process"))); +#endif // GOOGLE_CUDA } else { grpc::EncodeTensorToByteBuffer(is_dead, val, &call->response); call->SendResponse(ToGrpcStatus(Status::OK())); |