aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-12-09 10:07:17 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-12-09 10:11:03 -0800
commit37641276d8e6ff9617478f78afaefaf1a5c28332 (patch)
tree8347a0318b73a8730b24f6b644e69fed69d7c20a /tensorflow
parentfefa1c222fb883ebf119d35151fd67f91a73a07f (diff)
Replace StreamExecutorInterface::BlockHostUntilDone with BlockHostUntilDoneWithStatus
All known overrides of StreamExecutorInterface::BlockHostUntilDone are changed by this CL. PiperOrigin-RevId: 178492517
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executor.cc4
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executor.h2
-rw-r--r--tensorflow/stream_executor/cuda/cuda_driver.cc13
-rw-r--r--tensorflow/stream_executor/cuda/cuda_driver.h2
-rw-r--r--tensorflow/stream_executor/cuda/cuda_gpu_executor.cc2
-rw-r--r--tensorflow/stream_executor/cuda/cuda_gpu_executor.h2
-rw-r--r--tensorflow/stream_executor/host/host_gpu_executor.cc4
-rw-r--r--tensorflow/stream_executor/host/host_gpu_executor.h2
-rw-r--r--tensorflow/stream_executor/stream_executor_internal.cc19
-rw-r--r--tensorflow/stream_executor/stream_executor_internal.h3
10 files changed, 17 insertions, 36 deletions
diff --git a/tensorflow/compiler/xla/service/interpreter/executor.cc b/tensorflow/compiler/xla/service/interpreter/executor.cc
index 0bb3259ef4..511de87b1b 100644
--- a/tensorflow/compiler/xla/service/interpreter/executor.cc
+++ b/tensorflow/compiler/xla/service/interpreter/executor.cc
@@ -100,9 +100,9 @@ bool InterpreterExecutor::StopTimer(Stream *stream, Timer *timer) {
return true;
}
-bool InterpreterExecutor::BlockHostUntilDone(Stream *stream) {
+port::Status InterpreterExecutor::BlockHostUntilDoneWithStatus(Stream *stream) {
AsExecutorStream(stream)->BlockUntilDone();
- return true;
+ return port::Status::OK();
}
DeviceDescription *InterpreterExecutor::PopulateDeviceDescription() const {
diff --git a/tensorflow/compiler/xla/service/interpreter/executor.h b/tensorflow/compiler/xla/service/interpreter/executor.h
index c59b2ccb15..d3753a6a65 100644
--- a/tensorflow/compiler/xla/service/interpreter/executor.h
+++ b/tensorflow/compiler/xla/service/interpreter/executor.h
@@ -157,7 +157,7 @@ class InterpreterExecutor : public internal::StreamExecutorInterface {
bool StartTimer(Stream *stream, Timer *timer) override;
bool StopTimer(Stream *stream, Timer *timer) override;
- bool BlockHostUntilDone(Stream *stream) override;
+ port::Status BlockHostUntilDoneWithStatus(Stream *stream) override;
int PlatformDeviceCount() override { return 1; }
diff --git a/tensorflow/stream_executor/cuda/cuda_driver.cc b/tensorflow/stream_executor/cuda/cuda_driver.cc
index b6a96ed3e5..a017ff64d4 100644
--- a/tensorflow/stream_executor/cuda/cuda_driver.cc
+++ b/tensorflow/stream_executor/cuda/cuda_driver.cc
@@ -1115,19 +1115,20 @@ CUDADriver::ContextGetSharedMemConfig(CudaContext* context) {
return true;
}
-/* static */ bool CUDADriver::SynchronizeStream(CudaContext* context,
- CUstream stream) {
+/* static */ port::Status CUDADriver::SynchronizeStream(CudaContext *context,
+ CUstream stream) {
ScopedActivateContext activated{context};
CHECK(stream != nullptr);
CUresult res = cuStreamSynchronize(stream);
if (res != CUDA_SUCCESS) {
- LOG(ERROR) << "could not synchronize on CUDA stream: " << ToString(res)
- << " :: " << port::CurrentStackTrace();
- return false;
+ port::Status status = port::InternalError(
+ port::StrCat("could not synchronize on CUDA stream: ", ToString(res)));
+ LOG(ERROR) << status << " :: " << port::CurrentStackTrace();
+ return status;
}
VLOG(2) << "successfully synchronized stream " << stream << " on context "
<< context;
- return true;
+ return port::Status::OK();
}
/* static */ bool CUDADriver::IsStreamIdle(CudaContext *context,
diff --git a/tensorflow/stream_executor/cuda/cuda_driver.h b/tensorflow/stream_executor/cuda/cuda_driver.h
index 68494aba65..4002ba2021 100644
--- a/tensorflow/stream_executor/cuda/cuda_driver.h
+++ b/tensorflow/stream_executor/cuda/cuda_driver.h
@@ -304,7 +304,7 @@ class CUDADriver {
// amount of time?
//
// http://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__STREAM.html#group__CUDA__STREAM_1g15e49dd91ec15991eb7c0a741beb7dad
- static bool SynchronizeStream(CudaContext* context, CUstream stream);
+ static port::Status SynchronizeStream(CudaContext* context, CUstream stream);
// Blocks the calling thread until the operations associated with the context
// have been completed, via cuCtxSynchronize.
diff --git a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
index 60eaaba21c..7f8a7ca7c7 100644
--- a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
+++ b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
@@ -664,7 +664,7 @@ bool CUDAExecutor::StopTimer(Stream *stream, Timer *timer) {
return AsCUDATimer(timer)->Stop(AsCUDAStream(stream));
}
-bool CUDAExecutor::BlockHostUntilDone(Stream *stream) {
+port::Status CUDAExecutor::BlockHostUntilDoneWithStatus(Stream *stream) {
return CUDADriver::SynchronizeStream(context_, AsCUDAStreamValue(stream));
}
diff --git a/tensorflow/stream_executor/cuda/cuda_gpu_executor.h b/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
index 8ff4a30d62..5adbb59856 100644
--- a/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
+++ b/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
@@ -152,7 +152,7 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
Event::Status PollForEventStatus(Event *event) override;
- bool BlockHostUntilDone(Stream *stream) override;
+ port::Status BlockHostUntilDoneWithStatus(Stream *stream) override;
int PlatformDeviceCount() override { return CUDADriver::GetDeviceCount(); }
diff --git a/tensorflow/stream_executor/host/host_gpu_executor.cc b/tensorflow/stream_executor/host/host_gpu_executor.cc
index 0af2c8cc3d..1fd8eeb881 100644
--- a/tensorflow/stream_executor/host/host_gpu_executor.cc
+++ b/tensorflow/stream_executor/host/host_gpu_executor.cc
@@ -177,9 +177,9 @@ bool HostExecutor::StopTimer(Stream *stream, Timer *timer) {
return true;
}
-bool HostExecutor::BlockHostUntilDone(Stream *stream) {
+port::Status HostExecutor::BlockHostUntilDoneWithStatus(Stream *stream) {
AsHostStream(stream)->BlockUntilDone();
- return true;
+ return port::Status::OK();
}
DeviceDescription *HostExecutor::PopulateDeviceDescription() const {
diff --git a/tensorflow/stream_executor/host/host_gpu_executor.h b/tensorflow/stream_executor/host/host_gpu_executor.h
index 77b07e4a57..e884554a15 100644
--- a/tensorflow/stream_executor/host/host_gpu_executor.h
+++ b/tensorflow/stream_executor/host/host_gpu_executor.h
@@ -139,7 +139,7 @@ class HostExecutor : public internal::StreamExecutorInterface {
bool StopTimer(Stream *stream, Timer *timer) override;
- bool BlockHostUntilDone(Stream *stream) override;
+ port::Status BlockHostUntilDoneWithStatus(Stream *stream) override;
int PlatformDeviceCount() override { return 1; }
diff --git a/tensorflow/stream_executor/stream_executor_internal.cc b/tensorflow/stream_executor/stream_executor_internal.cc
index 25b579fc16..273d970b6f 100644
--- a/tensorflow/stream_executor/stream_executor_internal.cc
+++ b/tensorflow/stream_executor/stream_executor_internal.cc
@@ -15,10 +15,6 @@ limitations under the License.
#include "tensorflow/stream_executor/stream_executor_internal.h"
-#include "tensorflow/stream_executor/lib/error.h"
-#include "tensorflow/stream_executor/lib/statusor.h"
-#include "tensorflow/stream_executor/lib/stringprintf.h"
-
namespace perftools {
namespace gputools {
namespace internal {
@@ -41,21 +37,6 @@ StreamExecutorFactory* MakeOpenCLExecutorImplementation() {
StreamExecutorFactory MakeHostExecutorImplementation;
-// TODO(b/70298427) There are two similar methods:
-// bool BlockHostUntilDone(Stream*);
-// Status BlockHostUntilDoneWithStatus(Stream*);
-//
-// The intention is to replace all implementations of the bool version with the
-// Status version. In the meantime, just implement one in terms of the other.
-port::Status StreamExecutorInterface::BlockHostUntilDoneWithStatus(
- Stream* stream) {
- if (!BlockHostUntilDone(stream)) {
- return port::Status(port::error::INTERNAL,
- "Failed to block host until done.");
- }
- return port::Status::OK();
-}
-
} // namespace internal
} // namespace gputools
} // namespace perftools
diff --git a/tensorflow/stream_executor/stream_executor_internal.h b/tensorflow/stream_executor/stream_executor_internal.h
index d2426f46e2..0a9bef71d0 100644
--- a/tensorflow/stream_executor/stream_executor_internal.h
+++ b/tensorflow/stream_executor/stream_executor_internal.h
@@ -219,8 +219,7 @@ class StreamExecutorInterface {
virtual void DeallocateTimer(Timer *timer) = 0;
virtual bool StartTimer(Stream *stream, Timer *timer) = 0;
virtual bool StopTimer(Stream *stream, Timer *timer) = 0;
- virtual bool BlockHostUntilDone(Stream *stream) = 0;
- virtual port::Status BlockHostUntilDoneWithStatus(Stream *stream);
+ virtual port::Status BlockHostUntilDoneWithStatus(Stream *stream) = 0;
virtual int PlatformDeviceCount() = 0;
virtual port::Status EnablePeerAccessTo(StreamExecutorInterface *other) = 0;
virtual bool CanEnablePeerAccessTo(StreamExecutorInterface *other) = 0;