diff options
author | 2018-06-19 09:08:00 +0100 | |
---|---|---|
committer | 2018-06-19 09:08:00 +0100 | |
commit | df1134694f47b9a924df1bf48f673392ee1d3e15 (patch) | |
tree | 3f8800cf38364389e1c0f18880eaf8f8b488a3f2 /tensorflow/compiler/tests/adam_test.py | |
parent | fb704f74c4f697a1d85cb18e24a48f1a86d0825b (diff) |
Add comments explaining why the test is skipped for f16
Diffstat (limited to 'tensorflow/compiler/tests/adam_test.py')
-rw-r--r-- | tensorflow/compiler/tests/adam_test.py | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/tensorflow/compiler/tests/adam_test.py b/tensorflow/compiler/tests/adam_test.py index ee56a38f94..b904e6676b 100644 --- a/tensorflow/compiler/tests/adam_test.py +++ b/tensorflow/compiler/tests/adam_test.py @@ -52,6 +52,7 @@ class AdamOptimizerTest(XLATestCase): def testBasic(self): for dtype in self.float_types: + # TODO: test fails for float16 due to excessive precision requirements. if dtype == np.float16: continue with self.test_session(), self.test_scope(): @@ -93,6 +94,7 @@ class AdamOptimizerTest(XLATestCase): def testTensorLearningRate(self): for dtype in self.float_types: + # TODO: test fails for float16 due to excessive precision requirements. if dtype == np.float16: continue with self.test_session(), self.test_scope(): @@ -134,6 +136,7 @@ class AdamOptimizerTest(XLATestCase): def testSharing(self): for dtype in self.float_types: + # TODO: test fails for float16 due to excessive precision requirements. if dtype == np.float16: continue with self.test_session(), self.test_scope(): |