diff options
author | 2016-11-08 10:19:12 -0800 | |
---|---|---|
committer | 2016-11-08 16:22:47 -0800 | |
commit | 5b92bf6221ed1c494233102b89fee483fa7c0e85 (patch) | |
tree | 6cb05b71883fa7b11b441564b9451d86275c0ded | |
parent | 6bce62eed137e97d72b58ecf57db6c49c9e33a2a (diff) |
Explicitly set `zero_debias` in moving averages to the default. This CL is a noop.
Change: 138532885
-rw-r--r-- | tensorflow/contrib/layers/python/layers/layers.py | 16 | ||||
-rw-r--r-- | tensorflow/contrib/layers/python/layers/optimizers.py | 2 |
2 files changed, 9 insertions, 9 deletions
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py index b0cf453f14..e4bcff3d2f 100644 --- a/tensorflow/contrib/layers/python/layers/layers.py +++ b/tensorflow/contrib/layers/python/layers/layers.py @@ -320,9 +320,9 @@ def _fused_batch_norm( def _force_updates(): """Internal function forces updates moving_vars if is_training.""" update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) + moving_mean, mean, decay, zero_debias=False) update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) + moving_variance, variance, decay, zero_debias=False) with ops.control_dependencies( [update_moving_mean, update_moving_variance]): return array_ops.identity(outputs) @@ -332,9 +332,9 @@ def _fused_batch_norm( def _delay_updates(): """Internal function that delay updates moving_vars if is_training.""" update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) + moving_mean, mean, decay, zero_debias=False) update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) + moving_variance, variance, decay, zero_debias=False) return update_moving_mean, update_moving_variance update_mean, update_variance = utils.smart_cond(is_training, _delay_updates, @@ -564,9 +564,9 @@ def batch_norm( def _force_updates(): """Internal function forces updates moving_vars if is_training.""" update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) + moving_mean, mean, decay, zero_debias=False) update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) + moving_variance, variance, decay, zero_debias=False) with ops.control_dependencies([update_moving_mean, update_moving_variance]): return array_ops.identity(mean), array_ops.identity(variance) @@ -577,9 +577,9 @@ def batch_norm( def _delay_updates(): """Internal function that delay updates moving_vars if is_training.""" update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) + moving_mean, mean, decay, zero_debias=False) update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) + moving_variance, variance, decay, zero_debias=False) return update_moving_mean, update_moving_variance update_mean, update_variance = utils.smart_cond(is_training, diff --git a/tensorflow/contrib/layers/python/layers/optimizers.py b/tensorflow/contrib/layers/python/layers/optimizers.py index 6cb7e91b73..94adb8bf98 100644 --- a/tensorflow/contrib/layers/python/layers/optimizers.py +++ b/tensorflow/contrib/layers/python/layers/optimizers.py @@ -298,7 +298,7 @@ def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer, trainable=False) return moving_averages.assign_moving_average( - moving_average_variable, value, decay) + moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: |