aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/stream_executor/dnn.h
diff options
context:
space:
mode:
authorGravatar Wen-Heng (Jack) Chung <whchung@gmail.com>2018-07-11 18:35:43 +0000
committerGravatar Wen-Heng (Jack) Chung <whchung@gmail.com>2018-07-11 20:34:36 +0000
commit456aaa2fdbf821296a31f5493955f4653ae119dd (patch)
tree68f2f260cb66dd135a9c207006d721910d272e36 /tensorflow/stream_executor/dnn.h
parent135e419e780423a888ddd45e479129493336c52b (diff)
[ROCm] Interface changes for pooling APIs in StreamExecutor
Due to the design of MIOpen, the DNN library on ROCm platform, an instance of ScratchAllocator has to be passed into pooling routines. This commit address such interface changes and the implementation in CUDA StreamExecutor.
Diffstat (limited to 'tensorflow/stream_executor/dnn.h')
-rw-r--r--tensorflow/stream_executor/dnn.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h
index 9eca5abe1a..75705e2b49 100644
--- a/tensorflow/stream_executor/dnn.h
+++ b/tensorflow/stream_executor/dnn.h
@@ -1552,14 +1552,16 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) = 0;
+ DeviceMemory<float>* output_data,
+ ScratchAllocator* workspace_allocator = nullptr) = 0;
virtual bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) {
+ DeviceMemory<double>* output_data,
+ ScratchAllocator* workspace_allocator = nullptr) {
LOG(FATAL) << "DoPoolForward not implemented for double.";
return false;
}
@@ -1569,7 +1571,8 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) {
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator = nullptr) {
LOG(FATAL) << "DoPoolForward not implemented for float16.";
return false;
}
@@ -1582,7 +1585,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) {
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator = nullptr) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1594,7 +1598,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) {
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator = nullptr) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1606,7 +1611,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) {
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator = nullptr) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1653,7 +1659,8 @@ class DnnSupport {
const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) {
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator = nullptr) {
return false;
}