aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Bixia Zheng <bixia@google.com>2018-04-13 15:15:44 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-13 15:18:31 -0700
commit1298c3240aa9f36b79ea7f0e772edfff87381771 (patch)
tree2a38699c7d6bdf08d8db36db5a2da3e98366b9ef
parentaedc409605be54f9c7cb67f7b49bdc123d65a8fb (diff)
[TF] Enable half precision XLA compiler tests for the gpu backend.
Modify some tests to allow larger error for half precision. Enable half precision SpaceToBatchNDTest for the cpu backend. PiperOrigin-RevId: 192831909
-rw-r--r--tensorflow/compiler/tests/build_defs.bzl2
-rw-r--r--tensorflow/compiler/tests/ftrl_test.py14
-rw-r--r--tensorflow/compiler/tests/image_ops_test.py3
-rw-r--r--tensorflow/compiler/tests/spacetobatch_op_test.py23
-rw-r--r--tensorflow/python/framework/test_util.py4
5 files changed, 32 insertions, 14 deletions
diff --git a/tensorflow/compiler/tests/build_defs.bzl b/tensorflow/compiler/tests/build_defs.bzl
index 45b6a6eb86..7b114d4f85 100644
--- a/tensorflow/compiler/tests/build_defs.bzl
+++ b/tensorflow/compiler/tests/build_defs.bzl
@@ -56,7 +56,7 @@ def tf_xla_py_test(name, srcs=[], deps=[], tags=[], data=[], main=None,
elif backend == "gpu":
backend_args += [
"--test_device=XLA_GPU",
- "--types=DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_BFLOAT16"
+ "--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_INT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_BFLOAT16"
]
backend_tags += ["requires-gpu-sm35"]
elif backend in plugins:
diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py
index f9db4cf201..8e6407dffd 100644
--- a/tensorflow/compiler/tests/ftrl_test.py
+++ b/tensorflow/compiler/tests/ftrl_test.py
@@ -134,9 +134,15 @@ class FtrlOptimizerTest(XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
- np.array([-2.60260963, -4.29698515]), var0.eval(), float_rtol=1e-5)
+ np.array([-2.60260963, -4.29698515]),
+ var0.eval(),
+ float_rtol=1e-5,
+ half_rtol=1e-2)
self.assertAllCloseAccordingToType(
- np.array([-0.28432083, -0.56694895]), var1.eval(), float_rtol=1e-5)
+ np.array([-0.28432083, -0.56694895]),
+ var1.eval(),
+ float_rtol=1e-5,
+ half_rtol=1e-2)
def testFtrlwithoutRegularization2(self):
for dtype in self.float_types:
@@ -272,8 +278,8 @@ class FtrlOptimizerTest(XLATestCase):
with self.test_session(), self.test_scope():
val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype)
- self.assertAllCloseAccordingToType(val0, val2, rtol=1e-4)
- self.assertAllCloseAccordingToType(val1, val3, rtol=1e-4)
+ self.assertAllCloseAccordingToType(val0, val2, rtol=1e-4, half_rtol=1e-2)
+ self.assertAllCloseAccordingToType(val1, val3, rtol=1e-4, half_rtol=1e-2)
def testEquivGradientDescentwithoutRegularization(self):
steps = 5
diff --git a/tensorflow/compiler/tests/image_ops_test.py b/tensorflow/compiler/tests/image_ops_test.py
index 3bc41b7cfd..12791ef8ac 100644
--- a/tensorflow/compiler/tests/image_ops_test.py
+++ b/tensorflow/compiler/tests/image_ops_test.py
@@ -65,7 +65,8 @@ class RGBToHSVTest(XLATestCase):
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
- self.assertAllCloseAccordingToType(batch2, inp, bfloat16_atol=0.03)
+ self.assertAllCloseAccordingToType(
+ batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
diff --git a/tensorflow/compiler/tests/spacetobatch_op_test.py b/tensorflow/compiler/tests/spacetobatch_op_test.py
index ef47187477..f37c34156f 100644
--- a/tensorflow/compiler/tests/spacetobatch_op_test.py
+++ b/tensorflow/compiler/tests/spacetobatch_op_test.py
@@ -163,17 +163,26 @@ class SpaceToBatchNDTest(XLATestCase):
# error.
if dtype == dtypes.bfloat16.as_numpy_dtype:
continue
- # TODO(b/77694432): Half test failed on CPU, last ran on 04-06-2018.
- if dtype == np.float16 and self.device == "XLA_CPU":
- continue
+ if dtype == np.float16:
+ actual_inputs = np.array(inputs).astype(dtype)
+ actual_paddings = np.array(paddings).astype(dtype)
+ expected_outputs = np.array(outputs).astype(dtype)
+ else:
+ actual_inputs = inputs
+ actual_paddings = paddings
+ expected_outputs = outputs
placeholder = array_ops.placeholder(dtype)
# outputs = space_to_batch(inputs)
- x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, paddings)
- self.assertAllEqual(sess.run(x_tf, {placeholder: inputs}), outputs)
+ x_tf = array_ops.space_to_batch_nd(placeholder, block_shape,
+ actual_paddings)
+ self.assertAllEqual(
+ sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs)
# inputs = batch_to_space(outputs)
placeholder = array_ops.placeholder(dtype)
- x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, paddings)
- self.assertAllEqual(sess.run(x_tf, {placeholder: outputs}), inputs)
+ x_tf = array_ops.batch_to_space_nd(placeholder, block_shape,
+ actual_paddings)
+ self.assertAllEqual(
+ sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index eea27d76c6..70e70abc06 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -1380,7 +1380,9 @@ class TensorFlowTestCase(googletest.TestCase):
" %s" % (a.shape, b.shape, msg))
same = (a == b)
- if a.dtype == np.float32 or a.dtype == np.float64:
+ if (a.dtype in [
+ np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
+ ]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.