diff options
author | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2016-05-26 18:49:57 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2016-05-26 20:04:16 -0700 |
commit | bb0190f6c26bf11f601102dfe2166a68a7833020 (patch) | |
tree | a39b07e64f9f8c37d2691cfa91f125ceddcbed60 /tensorflow/python/kernel_tests/softmax_op_test.py | |
parent | 3c402f5fc156ec7d9d3b37a9dc91005852b62083 (diff) |
Added support for fp16 to the softmax operation
Change: 123382264
Diffstat (limited to 'tensorflow/python/kernel_tests/softmax_op_test.py')
-rw-r--r-- | tensorflow/python/kernel_tests/softmax_op_test.py | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/tensorflow/python/kernel_tests/softmax_op_test.py b/tensorflow/python/kernel_tests/softmax_op_test.py index b7fb75941b..4c5a68b87f 100644 --- a/tensorflow/python/kernel_tests/softmax_op_test.py +++ b/tensorflow/python/kernel_tests/softmax_op_test.py @@ -50,13 +50,13 @@ class SoftmaxTest(tf.test.TestCase): else: tf_softmax = tf.nn.softmax(np_features, name=name) out = tf_softmax.eval() - self.assertAllClose(np_softmax, out) + self.assertAllCloseAccordingToType(np_softmax, out) self.assertShapeEqual(np_softmax, tf_softmax) if not log: # Bonus check: the softmaxes should add to one in each # batch element. - self.assertAllClose(np.ones(out.shape[0]), - np.sum(out, axis=1)) + self.assertAllCloseAccordingToType(np.ones(out.shape[0]), + np.sum(out, axis=1)) def _testAll(self, features): self._testSoftmax(features, use_gpu=False) @@ -118,6 +118,10 @@ class SoftmaxTest(tf.test.TestCase): self._testAll( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32)) + def testHalf(self): + self._testAll( + np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)) + def testDouble(self): self._testSoftmax( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64), |