aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/nn_fused_batchnorm_test.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops/nn_fused_batchnorm_test.py')
-rw-r--r--tensorflow/python/ops/nn_fused_batchnorm_test.py24
1 files changed, 12 insertions, 12 deletions
diff --git a/tensorflow/python/ops/nn_fused_batchnorm_test.py b/tensorflow/python/ops/nn_fused_batchnorm_test.py
index 35b4ec3134..e366c76770 100644
--- a/tensorflow/python/ops/nn_fused_batchnorm_test.py
+++ b/tensorflow/python/ops/nn_fused_batchnorm_test.py
@@ -139,66 +139,66 @@ class BatchNormalizationTest(tf.test.TestCase):
def testInference(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
x_shape = [1, 1, 6, 1]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [7, 9, 13, 6]
- if tf.test.is_gpu_available():
+ if tf.test.is_gpu_available(cuda_only=True):
self._test_gradient(x_shape, [9], use_gpu=True, data_format='NCHW')
self._test_gradient(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_gradient(x_shape, [6], use_gpu=False, data_format='NHWC')