aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/nn_fused_batchnorm_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops/nn_fused_batchnorm_test.py')
-rw-r--r--tensorflow/python/ops/nn_fused_batchnorm_test.py119
1 files changed, 61 insertions, 58 deletions
diff --git a/tensorflow/python/ops/nn_fused_batchnorm_test.py b/tensorflow/python/ops/nn_fused_batchnorm_test.py
index 1fcd0384da..e72d34d1f7 100644
--- a/tensorflow/python/ops/nn_fused_batchnorm_test.py
+++ b/tensorflow/python/ops/nn_fused_batchnorm_test.py
@@ -335,22 +335,22 @@ class BatchNormalizationTest(test.TestCase):
def testInference(self):
x_shape = [1, 1, 6, 1]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
- self._test_inference(
- x_shape, np.float32, [1], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_inference(
+ x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
- self._test_inference(
- x_shape, np.float32, [2], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_inference(
+ x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
@@ -359,33 +359,33 @@ class BatchNormalizationTest(test.TestCase):
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
- self._test_inference(
- x_shape, np.float32, [6], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_inference(
+ x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
- self._test_training(
- x_shape, np.float32, [1], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_training(
+ x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
- self._test_training(
- x_shape, np.float32, [2], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_training(
+ x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
@@ -394,20 +394,20 @@ class BatchNormalizationTest(test.TestCase):
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
- self._test_training(
- x_shape, np.float32, [6], np.float32, use_gpu=False, data_format='NHWC')
+ self._test_training(
+ x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [1],
@@ -422,17 +422,17 @@ class BatchNormalizationTest(test.TestCase):
use_gpu=True,
data_format='NCHW',
is_training=is_training)
- self._test_gradient(
- x_shape,
- np.float32, [1],
- np.float32,
- use_gpu=False,
- data_format='NHWC',
- is_training=is_training)
+ self._test_gradient(
+ x_shape,
+ dtype, [1],
+ np.float32,
+ use_gpu=False,
+ data_format='NHWC',
+ is_training=is_training)
x_shape = [1, 1, 6, 2]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [2],
@@ -440,13 +440,13 @@ class BatchNormalizationTest(test.TestCase):
use_gpu=True,
data_format='NHWC',
is_training=is_training)
- self._test_gradient(
- x_shape,
- np.float32, [2],
- np.float32,
- use_gpu=False,
- data_format='NHWC',
- is_training=is_training)
+ self._test_gradient(
+ x_shape,
+ dtype, [2],
+ np.float32,
+ use_gpu=False,
+ data_format='NHWC',
+ is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
@@ -460,8 +460,8 @@ class BatchNormalizationTest(test.TestCase):
is_training=is_training)
x_shape = [5, 7, 11, 4]
- if test.is_gpu_available(cuda_only=True):
- for dtype in [np.float16, np.float32]:
+ for dtype in [np.float16, np.float32]:
+ if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [7],
@@ -476,13 +476,13 @@ class BatchNormalizationTest(test.TestCase):
use_gpu=True,
data_format='NHWC',
is_training=is_training)
- self._test_gradient(
- x_shape,
- np.float32, [4],
- np.float32,
- use_gpu=False,
- data_format='NHWC',
- is_training=is_training)
+ self._test_gradient(
+ x_shape,
+ dtype, [4],
+ np.float32,
+ use_gpu=False,
+ data_format='NHWC',
+ is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
@@ -506,15 +506,14 @@ class BatchNormalizationTest(test.TestCase):
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
- if dtype != np.float16:
- self._test_grad_grad(
- shape,
- np.float32, [shape[3]],
- np.float32,
- use_gpu=False,
- data_format='NHWC',
- is_training=is_training,
- err_tolerance=err_tolerance)
+ self._test_grad_grad(
+ shape,
+ dtype, [shape[3]],
+ np.float32,
+ use_gpu=False,
+ data_format='NHWC',
+ is_training=is_training,
+ err_tolerance=err_tolerance)
def testBatchNormGradGrad(self):
configs = [{
@@ -526,6 +525,10 @@ class BatchNormalizationTest(test.TestCase):
'err_tolerance': 1e-3,
'dtype': np.float32,
}, {
+ 'shape': [2, 3, 4, 5],
+ 'err_tolerance': 1e-2,
+ 'dtype': np.float16,
+ }, {
'shape': [2, 3, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float16,