diff options
author | Akshay Modi <nareshmodi@google.com> | 2018-07-16 16:14:29 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-07-16 16:17:50 -0700 |
commit | 67e0f5d68729d508469a2c811a5b021c17942a7f (patch) | |
tree | 42e02261c03707f5ee0c7a4c465260368cfe122d | |
parent | 973d80e8ed664e881e5a15903690fd767bb53b22 (diff) |
Add a method to check if a tensor handle is on the host cpu.
PiperOrigin-RevId: 204825266
-rw-r--r-- | tensorflow/c/eager/c_api.cc | 12 | ||||
-rw-r--r-- | tensorflow/core/common_runtime/eager/tensor_handle.h | 6 |
2 files changed, 12 insertions, 6 deletions
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc index 82ca2be2cf..6c510536d6 100644 --- a/tensorflow/c/eager/c_api.cc +++ b/tensorflow/c/eager/c_api.cc @@ -664,17 +664,17 @@ TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t) { const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory( TFE_TensorHandle* h, TF_Status* status) { - tensorflow::Device* d = nullptr; - tensorflow::Device* op_device = nullptr; - const tensorflow::Tensor* t = nullptr; - status->status = h->handle->TensorAndDevice(&t, &d, &op_device); - if (!status->status.ok()) return nullptr; - if (d != nullptr) { + if (!h->handle->OnHostCPU()) { status->status = tensorflow::errors::FailedPrecondition( "TFE_TensorHandle is placed in device (not host) memory. Cannot return " "a tensorflow::Tensor"); return nullptr; } + tensorflow::Device* d = nullptr; + tensorflow::Device* op_device = nullptr; + const tensorflow::Tensor* t = nullptr; + status->status = h->handle->TensorAndDevice(&t, &d, &op_device); + if (!status->status.ok()) return nullptr; return t; } diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.h b/tensorflow/core/common_runtime/eager/tensor_handle.h index 5580d37234..1bc9c6531a 100644 --- a/tensorflow/core/common_runtime/eager/tensor_handle.h +++ b/tensorflow/core/common_runtime/eager/tensor_handle.h @@ -140,6 +140,12 @@ class TensorHandle : public core::RefCounted { remote_shape_ = std::move(remote_shape); } + bool OnHostCPU() { + mutex_lock ml(ctx_mutex_); + return device_ == nullptr || + (ctx_ == nullptr || ctx_->HostCPU() == device_); + } + private: // If the contents of the Tensor pointed to by this handle is yet to be // computed by a EagerNode, this function will block till that compuatation is |