aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-31 09:51:31 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-05-31 11:04:18 -0700
commit07792c757457e8ecf62c8d37038e458484eab78a (patch)
treefbfd0c191977e400c9b5accb4641a951f7dfd272 /tensorflow/python
parent3ce1d20108cfc190553bac98c17a53b23457f8bd (diff)
Added support for convolutions of 16bit floats on CPU
Change: 123659102
Diffstat (limited to 'tensorflow/python')
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_test.py27
1 files changed, 14 insertions, 13 deletions
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
index de3b8d0691..ec723ae045 100644
--- a/tensorflow/python/kernel_tests/conv_ops_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -163,13 +163,13 @@ def GetTestConfigs():
class Conv2DTest(tf.test.TestCase):
- def _DtypesToTest(self):
- if test_util.CudaSupportsHalfMatMulAndConv():
+ def _DtypesToTest(self, use_gpu):
+ if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
+ return [tf.float32]
+ else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [tf.float32, tf.float16]
- else:
- return [tf.float32]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, strides,
padding, data_format, dtype, use_gpu):
@@ -255,10 +255,9 @@ class Conv2DTest(tf.test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, expected):
- for dtype in self._DtypesToTest():
- print(dtype)
- tensors = []
- for (data_format, use_gpu) in GetTestConfigs():
+ tensors = []
+ for (data_format, use_gpu) in GetTestConfigs():
+ for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(tensor_in_sizes,
filter_in_sizes,
strides,
@@ -274,7 +273,10 @@ class Conv2DTest(tf.test.TestCase):
value = values[i]
print("expected = ", expected)
print("actual = ", value)
- self.assertAllCloseAccordingToType(expected, np.ravel(value))
+ tol = 1e-5
+ if value.dtype == np.float16:
+ tol = 1e-3
+ self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self):
@@ -360,7 +362,7 @@ class Conv2DTest(tf.test.TestCase):
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
- # Testing for backprops
+ # Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
@@ -506,7 +508,7 @@ class Conv2DTest(tf.test.TestCase):
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
- for dtype in self._DtypesToTest():
+ for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = tf.constant(x0, shape=input_sizes, dtype=dtype)
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
@@ -635,7 +637,7 @@ class Conv2DTest(tf.test.TestCase):
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
- for dtype in self._DtypesToTest():
+ for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(input_data, shape=input_shape,
dtype=dtype, name="input")
@@ -935,7 +937,6 @@ class Conv2DTest(tf.test.TestCase):
strides=[1, 1, 1, 1], padding="SAME")
-
# This is only a very simple test. More comprehensive tests live in
# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
# where we compare the numeric results of the depthwise conv op with the