aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/stream_executor/stream.h
diff options
context:
space:
mode:
authorGravatar RJ Ryan <rjryan@google.com>2016-07-12 23:19:39 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-07-13 00:32:20 -0700
commit35df3ed43edabbc4ad1b2439bbc7de8917026d6e (patch)
tree8898a5bdbf2363131f49617a02baebe378f17ebd /tensorflow/stream_executor/stream.h
parent8e69f9864fcdab1e7b9f4d9ff86d27491f3877fc (diff)
Roll-forward of "Local Response Normalization GPU support via Stream Executor."
Move AsDeviceMemory function into a StreamExecutorUtil class, not a GPUUtil one, since it's independent of GPUs. Make lrn_op use the new version of that function. Change: 127289319
Diffstat (limited to 'tensorflow/stream_executor/stream.h')
-rw-r--r--tensorflow/stream_executor/stream.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h
index dabc9f98e3..4d514804e5 100644
--- a/tensorflow/stream_executor/stream.h
+++ b/tensorflow/stream_executor/stream.h
@@ -453,6 +453,21 @@ class Stream {
const DeviceMemory<float> &input_data,
DeviceMemory<float> *output_data);
+ // Similar to ThenNormalize, but normalizes across feature maps and allows for
+ // specifying the dimensions of the tensor.
+ Stream &ThenNormalizeWithDimensions(
+ const dnn::NormalizeDescriptor &normalize_descriptor,
+ const dnn::BatchDescriptor &dimensions,
+ const DeviceMemory<float> &input_data, DeviceMemory<float> *output_data);
+
+ Stream &ThenNormalizeBackwardWithDimensions(
+ const dnn::NormalizeDescriptor &normalize_descriptor,
+ const dnn::BatchDescriptor &dimensions,
+ const DeviceMemory<float> &raw_data,
+ const DeviceMemory<float> &normalized_data,
+ const DeviceMemory<float> &normalized_variable_gradient,
+ DeviceMemory<float> *raw_variable_gradient);
+
Stream &ThenActivate(dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor &dimensions,
const DeviceMemory<float> &input_data,