aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/tests
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-30 21:12:31 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-30 21:16:21 -0700
commit348367a88e02a9e1750738b11a8e0784b2eb6b65 (patch)
tree29ee99e854185fc57cdaf1a0787f49dd0ac0b6f4 /tensorflow/compiler/tests
parente1f0d2ef46690157045d0509a4117de5ed0c6187 (diff)
Rollback of a rollback with fixes included. See below for details of the original change.
This CL fixes additional two CI tests that broke due to the changed bfloat16 behavior. ================================================== Automated rollback of commit 37b2b0eb613b6c3c66b96374851cfd95050346a0 PiperOrigin-RevId: 211031073
Diffstat (limited to 'tensorflow/compiler/tests')
-rw-r--r--tensorflow/compiler/tests/adam_test.py7
-rw-r--r--tensorflow/compiler/tests/ftrl_test.py7
-rw-r--r--tensorflow/compiler/tests/reduce_ops_test.py2
3 files changed, 10 insertions, 6 deletions
diff --git a/tensorflow/compiler/tests/adam_test.py b/tensorflow/compiler/tests/adam_test.py
index 0d2e4d0296..df0f21471a 100644
--- a/tensorflow/compiler/tests/adam_test.py
+++ b/tensorflow/compiler/tests/adam_test.py
@@ -22,6 +22,7 @@ import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
@@ -53,7 +54,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
- if dtype == np.float16:
+ if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
@@ -95,7 +96,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testTensorLearningRate(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
- if dtype == np.float16:
+ if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
@@ -137,7 +138,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testSharing(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
- if dtype == np.float16:
+ if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py
index b1deb7f6a7..f1b87a5ffb 100644
--- a/tensorflow/compiler/tests/ftrl_test.py
+++ b/tensorflow/compiler/tests/ftrl_test.py
@@ -29,7 +29,6 @@ from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
-
class FtrlOptimizerTest(xla_test.XLATestCase):
def initVariableAndGradient(self, dtype):
@@ -196,7 +195,11 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
- np.array([-7.66718769, -10.91273689]), var0.eval(), rtol=1e-4)
+ np.array([-7.66718769, -10.91273689]),
+ var0.eval(),
+ rtol=1e-4,
+ bfloat16_rtol=1e-1,
+ bfloat16_atol=1e-1)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), var1.eval(), rtol=1e-4)
diff --git a/tensorflow/compiler/tests/reduce_ops_test.py b/tensorflow/compiler/tests/reduce_ops_test.py
index 5ae5b1bc1d..132c59c32c 100644
--- a/tensorflow/compiler/tests/reduce_ops_test.py
+++ b/tensorflow/compiler/tests/reduce_ops_test.py
@@ -219,7 +219,7 @@ class ReduceOpPrecisionTest(xla_test.XLATestCase):
bf16_max = np.float32(dtypes.bfloat16.max)
f32_max = dtypes.float32.max
- value = min(bf16_max, f32_max - bf16_max)
+ value = min(bf16_max, f32_max - bf16_max) / 2
self._testReduceSum(
dtypes.bfloat16.as_numpy_dtype(value), dtypes.bfloat16.as_numpy_dtype,
itertools.permutations([bf16_max, value, bf16_max * (-1.0)], 3))