diff options
author | Wen-Heng (Jack) Chung <whchung@gmail.com> | 2018-07-20 19:03:55 +0000 |
---|---|---|
committer | Wen-Heng (Jack) Chung <whchung@gmail.com> | 2018-08-31 16:34:32 +0000 |
commit | 1b166c7e6f30bf7179f31764b3615e63025a7472 (patch) | |
tree | 11042dc030a62eb7518b65b5a28a9e45e4f00097 /tensorflow/contrib/tensorrt | |
parent | 9af748444002eabc2e2ffa7ec5ad67f593f05c19 (diff) |
Rename CUDA GPU ID to platform GPU ID
Rename CUDA GPU ID to platform GPU ID so the notion is applicable on both CUDA
and ROCm platform.
Diffstat (limited to 'tensorflow/contrib/tensorrt')
-rw-r--r-- | tensorflow/contrib/tensorrt/convert/convert_graph.cc | 8 | ||||
-rw-r--r-- | tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc | 13 |
2 files changed, 11 insertions, 10 deletions
diff --git a/tensorflow/contrib/tensorrt/convert/convert_graph.cc b/tensorflow/contrib/tensorrt/convert/convert_graph.cc index b019c99882..f29f4d6deb 100644 --- a/tensorflow/contrib/tensorrt/convert/convert_graph.cc +++ b/tensorflow/contrib/tensorrt/convert/convert_graph.cc @@ -780,12 +780,12 @@ std::pair<int, tensorflow::Allocator*> GetDeviceAndAllocator( // If device is not set, use the first found GPU device for the conversion. for (int tf_gpu_id_value = 0; tf_gpu_id_value < 100; ++tf_gpu_id_value) { TfGpuId tf_gpu_id(tf_gpu_id_value); - CudaGpuId cuda_gpu_id; - Status s = GpuIdManager::TfToCudaGpuId(tf_gpu_id, &cuda_gpu_id); + PlatformGpuId platform_gpu_id; + Status s = GpuIdManager::TfToPlatformGpuId(tf_gpu_id, &platform_gpu_id); if (s.ok()) { VLOG(1) << "Found TF GPU " << tf_gpu_id.value() << " at cuda device " - << cuda_gpu_id.value(); - cuda_device_id = cuda_gpu_id.value(); + << platform_gpu_id.value(); + cuda_device_id = platform_gpu_id.value(); GPUOptions gpu_options; // If the TF to Cuda gpu id mapping exist, the device and corresponding // allocator must have been initialized already, so the diff --git a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc index 2b42d81f47..88cf8d5980 100644 --- a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc +++ b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc @@ -565,21 +565,22 @@ tensorflow::Status TRTEngineOp::AllocateCalibrationResources( new TRTInt8Calibrator(device_buffers_, batch_size, name())); const string label(name()); auto segment_graph = &segment_graph_; - const int cuda_gpu_id = ctx->device()->tensorflow_gpu_device_info()->gpu_id; - if (cuda_gpu_id < 0) { + const int platform_gpu_id = + ctx->device()->tensorflow_gpu_device_info()->gpu_id; + if (platform_gpu_id < 0) { LOG(ERROR) << "Can't get gpu_device_info from context->device()"; return tensorflow::errors::InvalidArgument( "Context->device doesn't contain device info!"); } const int64 workspace_size_bytes = workspace_size_; cres->thr_.reset(new std::thread([cres, label, segment_graph, shapes, - cuda_gpu_id, workspace_size_bytes]() { - VLOG(0) << "Starting calibration thread on device " << cuda_gpu_id + platform_gpu_id, workspace_size_bytes]() { + VLOG(0) << "Starting calibration thread on device " << platform_gpu_id << ", Calibration Resource @ " << cres; - auto err = cudaSetDevice(cuda_gpu_id); + auto err = cudaSetDevice(platform_gpu_id); if (err != cudaSuccess) { // TODO(aaroey): should return error here. - LOG(ERROR) << "Couldn't set cuda device to " << cuda_gpu_id + LOG(ERROR) << "Couldn't set cuda device to " << platform_gpu_id << " in calibration thread"; } // ConvertGraphDefToEngine() will try to build the engine. This thread |