diff options
author | Justin Lebar <jlebar@google.com> | 2017-12-17 10:43:11 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-12-18 11:21:02 -0800 |
commit | 483e439c7494dbe30a660b90a3bca1349a1bf8fd (patch) | |
tree | 2a1610815b659c89465475e8e7c1c41f032db336 /tensorflow/stream_executor/dnn.h | |
parent | 01da208158687c575a9c459cb62e3c5f90968bd2 (diff) |
[StreamExecutor] Change "variance" to "inv_var" in BatchNormalizationBackward.
This parameter is not the variance of the data, but rather is
1/(sqrt(variance + epsilon). Neglecting epsilon, this is the inverse
standard deviation.
"inv_stddev" might be a better name, but "inv_var" is certainly better
than plain "variance", and it matches nvidia's name for this parameter,
which I think may override the desire for a more precise name.
No functional change.
PiperOrigin-RevId: 179352839
Diffstat (limited to 'tensorflow/stream_executor/dnn.h')
-rw-r--r-- | tensorflow/stream_executor/dnn.h | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h index 73b96de438..07314a0ff7 100644 --- a/tensorflow/stream_executor/dnn.h +++ b/tensorflow/stream_executor/dnn.h @@ -908,8 +908,8 @@ class DnnSupport { // the running variance. // reserve_space_1: saved mean, to be reused in the backward gradient // computation. - // reserve_space_2: saved variance, to be reused in the backward gradient - // computation. + // reserve_space_2: saved inv_var (1/sqrt(epsilon + variance), to be reused + // in the backward gradient computation. // is_training: Set to true for training, false for inference. // var_to_inv_var: a function to convert the variance to inverted variance // for cuDNN v4 forward inference. @@ -957,6 +957,7 @@ class DnnSupport { // y_backprop: gradient with regard to output y. // x: input data. // scale: scaling parameters. + // inv_var: 1/sqrt(epsilon + variance) of x. // x_desc: dimensions of the input data, which is the same as the dimensions // of the output. // scale_offset_desc: dimensions of scale and offset. @@ -967,7 +968,7 @@ class DnnSupport { virtual bool DoBatchNormalizationBackward( Stream* stream, const DeviceMemory<float>& y_backprop, const DeviceMemory<float>& x, const DeviceMemory<float>& scale, - const DeviceMemory<float>& mean, const DeviceMemory<float>& variance, + const DeviceMemory<float>& mean, const DeviceMemory<float>& inv_var, const dnn::BatchDescriptor& x_desc, const dnn::BatchDescriptor& scale_offset_desc, const double epsilon, DeviceMemory<float>* x_backprop, DeviceMemory<float>* scale_backprop, @@ -981,7 +982,7 @@ class DnnSupport { virtual bool DoBatchNormalizationBackward( Stream* stream, const DeviceMemory<Eigen::half>& y_backprop, const DeviceMemory<Eigen::half>& x, const DeviceMemory<float>& scale, - const DeviceMemory<float>& mean, const DeviceMemory<float>& variance, + const DeviceMemory<float>& mean, const DeviceMemory<float>& inv_var, const dnn::BatchDescriptor& x_desc, const dnn::BatchDescriptor& scale_offset_desc, const double epsilon, DeviceMemory<Eigen::half>* x_backprop, |