aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler
diff options
context:
space:
mode:
authorGravatar Peter Hawkins <phawkins@google.com>2017-07-19 10:47:49 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-07-19 10:51:52 -0700
commitfbd7059017d24b8c185ef7e6ad04857bc0616731 (patch)
tree767565399c83e634414ae50c2a791dc258232da8 /tensorflow/compiler
parentc6ec24290259c09099de22eca7ed5351a9fde811 (diff)
[TF:XLA] Relax numeric tolerances of some tests.
PiperOrigin-RevId: 162505321
Diffstat (limited to 'tensorflow/compiler')
-rw-r--r--tensorflow/compiler/tests/adagrad_test.py18
-rw-r--r--tensorflow/compiler/tests/binary_ops_test.py11
-rw-r--r--tensorflow/compiler/tests/ftrl_test.py30
-rw-r--r--tensorflow/compiler/tests/pooling_ops_test.py2
4 files changed, 37 insertions, 24 deletions
diff --git a/tensorflow/compiler/tests/adagrad_test.py b/tensorflow/compiler/tests/adagrad_test.py
index a5c5885b42..9a93b32164 100644
--- a/tensorflow/compiler/tests/adagrad_test.py
+++ b/tensorflow/compiler/tests/adagrad_test.py
@@ -49,9 +49,11 @@ class AdagradOptimizerTest(XLATestCase):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
- np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
+ np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
+ float_rtol=1e-5)
self.assertAllCloseAccordingToType(
- np.array([2.715679168701172, 3.715679168701172]), var1.eval())
+ np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
+ float_rtol=1e-5)
def testTensorLearningRate(self):
for dtype in self.float_types:
@@ -73,9 +75,11 @@ class AdagradOptimizerTest(XLATestCase):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
- np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
+ np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
+ float_rtol=1e-5)
self.assertAllCloseAccordingToType(
- np.array([2.715679168701172, 3.715679168701172]), var1.eval())
+ np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
+ float_rtol=1e-5)
def testSharing(self):
for dtype in self.float_types:
@@ -107,9 +111,11 @@ class AdagradOptimizerTest(XLATestCase):
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
- np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
+ np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
+ float_rtol=1e-5)
self.assertAllCloseAccordingToType(
- np.array([2.715679168701172, 3.715679168701172]), var1.eval())
+ np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
+ float_rtol=1e-5)
if __name__ == "__main__":
diff --git a/tensorflow/compiler/tests/binary_ops_test.py b/tensorflow/compiler/tests/binary_ops_test.py
index 7221a0a3c7..9eaede7f40 100644
--- a/tensorflow/compiler/tests/binary_ops_test.py
+++ b/tensorflow/compiler/tests/binary_ops_test.py
@@ -555,17 +555,18 @@ class BinaryOpsTest(XLATestCase):
self._testBinary(
math_ops.matmul,
np.array(
- [[[[1000, 100], [10, 1]], [[2000, 200], [20, 2]]],
- [[[3000, 300], [30, 3]], [[4000, 400], [40, 4]]]],
+ [[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
+ [[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
- [[[[1300, 2400], [13, 24]], [[11400, 13600], [114, 136]]],
- [[[42900, 79200], [429, 792]], [[250800, 299200], [2508, 2992]]]],
+ [[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
+ [[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
+
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
@@ -581,7 +582,7 @@ class BinaryOpsTest(XLATestCase):
# Regression test for b/31472796.
if hasattr(np, "matmul"):
- x = np.arange(0, 3 * 5 * 16 * 7, dtype=np.float32).reshape((3, 5, 16, 7))
+ x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py
index 7918276849..7e3871312c 100644
--- a/tensorflow/compiler/tests/ftrl_test.py
+++ b/tensorflow/compiler/tests/ftrl_test.py
@@ -134,9 +134,9 @@ class FtrlOptimizerTest(XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
- np.array([-2.60260963, -4.29698515]), var0.eval())
+ np.array([-2.60260963, -4.29698515]), var0.eval(), float_rtol=1e-5)
self.assertAllCloseAccordingToType(
- np.array([-0.28432083, -0.56694895]), var1.eval())
+ np.array([-0.28432083, -0.56694895]), var1.eval(), float_rtol=1e-5)
def testFtrlwithoutRegularization2(self):
for dtype in self.float_types:
@@ -189,8 +189,10 @@ class FtrlOptimizerTest(XLATestCase):
ftrl_update.run()
# Validate updated params
- self.assertAllClose(np.array([-7.66718769, -10.91273689]), var0.eval())
- self.assertAllClose(np.array([-0.93460727, -1.86147261]), var1.eval())
+ self.assertAllClose(np.array([-7.66718769, -10.91273689]), var0.eval(),
+ rtol=1e-4)
+ self.assertAllClose(np.array([-0.93460727, -1.86147261]), var1.eval(),
+ rtol=1e-4)
def testFtrlWithL1_L2(self):
for dtype in self.float_types:
@@ -215,8 +217,10 @@ class FtrlOptimizerTest(XLATestCase):
ftrl_update.run()
# Validate updated params
- self.assertAllClose(np.array([-0.24059935, -0.46829352]), var0.eval())
- self.assertAllClose(np.array([-0.02406147, -0.04830509]), var1.eval())
+ self.assertAllClose(np.array([-0.24059935, -0.46829352]), var0.eval(),
+ rtol=1e-5)
+ self.assertAllClose(np.array([-0.02406147, -0.04830509]), var1.eval(),
+ rtol=1e-5)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
@@ -248,8 +252,10 @@ class FtrlOptimizerTest(XLATestCase):
ftrl_update.run()
# Validate updated params
- self.assertAllClose(np.array([-0.21931979, -0.40642974]), var0.eval())
- self.assertAllClose(np.array([-0.0282721, -0.07188385]), var1.eval())
+ self.assertAllClose(np.array([-0.21931979, -0.40642974]), var0.eval(),
+ rtol=1e-4)
+ self.assertAllClose(np.array([-0.0282721, -0.07188385]), var1.eval(),
+ rtol=1e-4)
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
@@ -266,8 +272,8 @@ class FtrlOptimizerTest(XLATestCase):
with self.test_session(), self.test_scope():
val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype)
- self.assertAllClose(val0, val2)
- self.assertAllClose(val1, val3)
+ self.assertAllClose(val0, val2, rtol=1e-4)
+ self.assertAllClose(val1, val3, rtol=1e-4)
def testEquivGradientDescentwithoutRegularization(self):
steps = 5
@@ -278,8 +284,8 @@ class FtrlOptimizerTest(XLATestCase):
val2, val3 = self.equivGradientDescentTest_GradientDescentPart(
steps, dtype)
- self.assertAllClose(val0, val2)
- self.assertAllClose(val1, val3)
+ self.assertAllClose(val0, val2, rtol=1e-5)
+ self.assertAllClose(val1, val3, rtol=1e-5)
if __name__ == "__main__":
diff --git a/tensorflow/compiler/tests/pooling_ops_test.py b/tensorflow/compiler/tests/pooling_ops_test.py
index 52290e6354..7c19a99c4e 100644
--- a/tensorflow/compiler/tests/pooling_ops_test.py
+++ b/tensorflow/compiler/tests/pooling_ops_test.py
@@ -376,7 +376,7 @@ class PoolGradTest(XLATestCase):
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
- rtol=1e-5,
+ rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual, inputs)