diff options
author | 2017-11-21 23:32:43 -0800 | |
---|---|---|
committer | 2017-11-21 23:36:22 -0800 | |
commit | c6d603f02e1a98f871912cda6716cdcbed6b439e (patch) | |
tree | 4658788770c03d8a5bfd48eee59c247559344f08 /tensorflow/python/layers/normalization.py | |
parent | f93c48dc061d23495a4425fcad17d55159cb02b1 (diff) |
Merge changes from github.
PiperOrigin-RevId: 176615107
Diffstat (limited to 'tensorflow/python/layers/normalization.py')
-rw-r--r-- | tensorflow/python/layers/normalization.py | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/tensorflow/python/layers/normalization.py b/tensorflow/python/layers/normalization.py index 9d9b2b3941..4d5fb97845 100644 --- a/tensorflow/python/layers/normalization.py +++ b/tensorflow/python/layers/normalization.py @@ -26,6 +26,7 @@ import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.layers import base @@ -239,6 +240,12 @@ class BatchNormalization(base.Layer): raise ValueError('Unsupported axis, fused batch norm only supports ' 'axis == [1] or axis == [3]') + # Raise parameters of fp16 batch norm to fp32 + if self.dtype == dtypes.float16: + param_dtype = dtypes.float32 + else: + param_dtype = self.dtype or dtypes.float32 + axis_to_dim = {x: input_shape[x].value for x in self.axis} for x in axis_to_dim: if axis_to_dim[x] is None: @@ -262,6 +269,7 @@ class BatchNormalization(base.Layer): if self.scale: self.gamma = self.add_variable(name='gamma', shape=param_shape, + dtype=param_dtype, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, @@ -269,11 +277,14 @@ class BatchNormalization(base.Layer): else: self.gamma = None if self.fused: - self._gamma_const = array_ops.constant(1.0, shape=param_shape) + self._gamma_const = array_ops.constant(1.0, + dtype=param_dtype, + shape=param_shape) if self.center: self.beta = self.add_variable(name='beta', shape=param_shape, + dtype=param_dtype, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, @@ -281,7 +292,9 @@ class BatchNormalization(base.Layer): else: self.beta = None if self.fused: - self._beta_const = array_ops.constant(0.0, shape=param_shape) + self._beta_const = array_ops.constant(0.0, + dtype=param_dtype, + shape=param_shape) # Disable variable partitioning when creating the moving mean and variance try: @@ -293,12 +306,14 @@ class BatchNormalization(base.Layer): self.moving_mean = self.add_variable( name='moving_mean', shape=param_shape, + dtype=param_dtype, initializer=self.moving_mean_initializer, trainable=False) self.moving_variance = self.add_variable( name='moving_variance', shape=param_shape, + dtype=param_dtype, initializer=self.moving_variance_initializer, trainable=False) @@ -314,6 +329,7 @@ class BatchNormalization(base.Layer): def _renorm_variable(name, shape): var = self.add_variable(name=name, shape=shape, + dtype=param_dtype, initializer=init_ops.zeros_initializer(), trainable=False) return var @@ -356,7 +372,6 @@ class BatchNormalization(base.Layer): def _fused_batch_norm(self, inputs, training): """Returns the output of fused batch norm.""" - # TODO(reedwm): Add support for fp16 inputs. beta = self.beta if self.center else self._beta_const gamma = self.gamma if self.scale else self._gamma_const @@ -752,6 +767,7 @@ def batch_normalization(inputs, virtual_batch_size=virtual_batch_size, adjustment=adjustment, name=name, + dtype=inputs.dtype.base_dtype, _reuse=reuse, _scope=name) return layer.apply(inputs, training=training) |