diff options
author | 2017-09-25 19:35:53 -0700 | |
---|---|---|
committer | 2017-09-25 19:39:42 -0700 | |
commit | e2e3a943c0a28b7656325acb3fcd035743d55ea0 (patch) | |
tree | f4b909d5410bdf3b94012392909e7805cd27a2a7 /tensorflow/python/ops/nn_grad.py | |
parent | df22044be98c8b707601e03fe22ded53bcc28c7e (diff) |
Merge changes from github.
END_PUBLIC
---
Commit 1e1b3d902 authored by Pete Warden<pete@petewarden.com>
Committed by gunan<gunan@google.com>:
Changed output directory for Pi CI build to fix permissions problem with nightlies (#13257)
* Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed
* Fixed Pi Zero OpenBLAS build problems and tidied up directories used
* More robust checks in Pi build script
* Changed output directory for Pi CI build to fix permissions problem
---
Commit fe3a2e65c authored by Yan Facai (???)<facai.yan@gmail.com>
Committed by drpngx<drpngx@users.noreply.github.com>:
check invalid string type for dest_nodes in extract_sub_graph (#13057)
* BUG: check str type
* TST: add unit test
* CLN: remove list check
* CLN: use warning
* CLN: 2 indent
* CLN: raise TypeError if not list
* CLN: check string only
---
Commit 225ab7629 authored by Jean Wanka<jm.wanka@gmail.com>
Committed by Jean Wanka<jm.wanka@gmail.com>:
Fix polynomial decay with cycle for global step=0
For polynomial decay with cycle=True the learning rate at
step 0 becomes NaN, because in the process of calculating it we
devide by 0. This change should fix it, by setting the multiplier
for the decay steps to one for global_step=0.
---
Commit 286f57061 authored by Bjarke Hammersholt Roune<broune@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Make Service::TransferToClient not attempt to manipulate the literal when the transfer failed, preventing a crash and allowing the caller to see the reason for the failed transfer.
PiperOrigin-RevId: 169770126
---
Commit e0501bc4d authored by Yong Tang<yong.tang.github@outlook.com>
Committed by Shanqing Cai<cais@google.com>:
Fix GRUBlockCell parameter naming inconsistency (#13153)
* Fix GRUBlockCell parameter naming inconsistency
This fix tries to fix the issue in 13137 where
parameter `cell_size` is used instead of `num_units`.
This is inconsistent with other RNN cells.
This fix adds support of `num_units` while at the same
time maintains backward compatiblility for `cell_size`.
This fix fixes 13137.
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
* Add `@deprecated_args` for 'cell_size' in `GRUBlockCell`
This commit adds `@deprecated_args` for 'cell_size' in `GRUBlockCell`
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
* Address review comment
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
---
Commit 02a2eba05 authored by Pete Warden<pete@petewarden.com>
Committed by gunan<gunan@google.com>:
Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change that's no longer needed (#13251)
* Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed
* Fixed Pi Zero OpenBLAS build problems and tidied up directories used
* More robust checks in Pi build script
---
Commit 8ef722253 authored by Sanjoy Das<sanjoy@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Remove a redundant setName.
The EmitComputation should have emitted a function with the right name, so use a
CHECK instead.
PiperOrigin-RevId: 169764856
---
Commit 1b94147dc authored by Neal Wu<wun@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Fix broken GitHub links in tensorflow and tensorflow_models resulting from The Great Models Move (a.k.a. the research subfolder)
PiperOrigin-RevId: 169763373
---
Commit b1ada5f0c authored by Justine Tunney<jart@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Fix TensorBoard python -m invoke in docs
PiperOrigin-RevId: 169758752
---
Commit 2957cd894 authored by Mustafa Ispir<ispir@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
Local run option of estimator training.
PiperOrigin-RevId: 169756384
---
Commit 1dc2fe7ac authored by Gunhan Gulsoy<gunan@google.com>
Committed by TensorFlower Gardener<gardener@tensorflow.org>:
BEGIN_PUBLIC
Automated g4 rollback of changelist 166264198
PiperOrigin-RevId: 169998124
Diffstat (limited to 'tensorflow/python/ops/nn_grad.py')
-rw-r--r-- | tensorflow/python/ops/nn_grad.py | 110 |
1 files changed, 67 insertions, 43 deletions
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py index 54627b6fd9..c3e8d403ba 100644 --- a/tensorflow/python/ops/nn_grad.py +++ b/tensorflow/python/ops/nn_grad.py @@ -736,64 +736,85 @@ def _FusedBatchNormGrad(op, *grad): else: pop_mean = op.inputs[3] pop_var = op.inputs[4] - if data_format == b"NHWC": - reduce_axis = [0, 1, 2] - else: - reduce_axis = [0, 2, 3] - shape = [1, array_ops.size(pop_mean), 1, 1] - pop_mean = array_ops.reshape(pop_mean, shape) - pop_var = array_ops.reshape(pop_var, shape) - scale = array_ops.reshape(scale, shape) - - grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) - var_rsqrt = math_ops.rsqrt(pop_var + epsilon) - grad_scale = math_ops.reduce_sum( - grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis) - grad_x = grad_y * scale * var_rsqrt - return grad_x, grad_scale, grad_offset, None, None + if data_format == b"NCHW": + x = array_ops.transpose(x, [0, 2, 3, 1]) + grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1]) + dx, dscale, doffset, _, _ = gen_nn_ops.fused_batch_norm_grad( + grad_y, + x, + scale, + pop_mean, + pop_var, + epsilon=epsilon, + data_format='NHWC', + is_training=is_training) + if data_format == b"NCHW": + dx = array_ops.transpose(dx, [0, 3, 1, 2]) + return dx, dscale, doffset, None, None -def _BatchNormGrad(grad_y, x, scale, epsilon, data_format): +def _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training=True): """Returns the gradients for the 3 inputs of BatchNorm. Args: grad_y: A `Tensor` of 4 dimensions for gradient for y. x: A `Tensor` of 4 dimensions for x. scale: A `Tensor` of 1 dimension for scaling. + pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when is_training=False. + pop_var: A `Tensor` of 1 dimension for the population variance. Only used when is_training=False. epsilon: A small float number added to the variance of x. data_format: The data format for input. Either b"NHWC" or b"NCHW". + is_training: A bool value to indicate the operation is for training (default) + or inference. Returns: A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient for x, grad_scale the gradient for scale, and grad_offset the gradient for offset. """ - if data_format == b"NHWC": - keep_dims = False - reduce_axis = [0, 1, 2] + if is_training: + if data_format == b"NHWC": + keep_dims = False + reduce_axis = [0, 1, 2] + else: + keep_dims = True + reduce_axis = [0, 2, 3] + shape = [1, array_ops.size(scale), 1, 1] + scale = array_ops.reshape(scale, shape) + mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keep_dims=keep_dims) + mean_x = math_ops.reduce_mean(x, reduce_axis, keep_dims=keep_dims) + var_x = math_ops.reduce_mean( + math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)), + reduce_axis, + keep_dims=keep_dims) + grad_y_offset = grad_y - mean_grad_y + x_offset = x - mean_x + mean = math_ops.reduce_mean( + grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims) + grad_x = scale * math_ops.rsqrt(var_x + epsilon) * ( + grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset) + grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum( + grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims) + if data_format == b"NCHW": + grad_scale = array_ops.squeeze(grad_scale) + grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) + return grad_x, grad_scale, grad_offset else: - keep_dims = True - reduce_axis = [0, 2, 3] - shape = [1, array_ops.size(scale), 1, 1] - scale = array_ops.reshape(scale, shape) - mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keep_dims=keep_dims) - mean_x = math_ops.reduce_mean(x, reduce_axis, keep_dims=keep_dims) - var_x = math_ops.reduce_mean( - math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)), - reduce_axis, - keep_dims=keep_dims) - grad_y_offset = grad_y - mean_grad_y - x_offset = x - mean_x - mean = math_ops.reduce_mean( - grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims) - grad_x = scale * math_ops.rsqrt(var_x + epsilon) * ( - grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset) - grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum( - grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims) - if data_format == b"NCHW": - grad_scale = array_ops.squeeze(grad_scale) - grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) - return grad_x, grad_scale, grad_offset + if data_format == b"NHWC": + reduce_axis = [0, 1, 2] + else: + reduce_axis = [0, 2, 3] + shape = [1, array_ops.size(pop_mean), 1, 1] + pop_mean = array_ops.reshape(pop_mean, shape) + pop_var = array_ops.reshape(pop_var, shape) + scale = array_ops.reshape(scale, shape) + + grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) + var_rsqrt = math_ops.rsqrt(pop_var + epsilon) + grad_scale = math_ops.reduce_sum( + grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis) + grad_x = grad_y * scale * var_rsqrt + return grad_x, grad_scale, grad_offset @ops.RegisterGradient("FusedBatchNormGrad") @@ -813,14 +834,17 @@ def _FusedBatchNormGradGrad(op, *grad): """ data_format = op.get_attr("data_format") epsilon = op.get_attr("epsilon") + is_training = op.get_attr("is_training") grad_y = op.inputs[0] x = op.inputs[1] scale = op.inputs[2] + pop_mean = op.inputs[3] + pop_var = op.inputs[4] grad_grad_x = grad[0] grad_grad_scale = grad[1] grad_grad_offset = grad[2] - grad_x, grad_scale, grad_offset = _BatchNormGrad(grad_y, x, scale, epsilon, - data_format) + grad_x, grad_scale, grad_offset = _BatchNormGrad( + grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training) grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset] grad_grad_y, grad_x, grad_scale = gradients_impl.gradients( [grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial) |