aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/kernel_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/kernel_tests')
-rw-r--r--tensorflow/python/kernel_tests/in_topk_op_test.py14
-rw-r--r--tensorflow/python/kernel_tests/relu_op_test.py91
2 files changed, 105 insertions, 0 deletions
diff --git a/tensorflow/python/kernel_tests/in_topk_op_test.py b/tensorflow/python/kernel_tests/in_topk_op_test.py
index 4a4686d1b9..37e9a8e3d1 100644
--- a/tensorflow/python/kernel_tests/in_topk_op_test.py
+++ b/tensorflow/python/kernel_tests/in_topk_op_test.py
@@ -20,7 +20,9 @@ from __future__ import print_function
import numpy as np
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
+from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
@@ -69,6 +71,18 @@ class InTopKTest(test.TestCase):
"target.*out of range"):
nn_ops.in_top_k(predictions, target, 2).eval()
+ def testTensorK(self):
+ predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
+ target = [0, 2]
+ k = constant_op.constant(3)
+ np_ans = np.array([False, True])
+ with self.test_session():
+ # TODO (yongtang): The test will be switch to nn_ops.in_top
+ # once nn_ops.in_top points to _in_top_kv2 later
+ precision = gen_nn_ops._in_top_kv2(predictions, target, k)
+ out = precision.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, precision)
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/relu_op_test.py b/tensorflow/python/kernel_tests/relu_op_test.py
index 63ac743843..8cd1f52d80 100644
--- a/tensorflow/python/kernel_tests/relu_op_test.py
+++ b/tensorflow/python/kernel_tests/relu_op_test.py
@@ -320,6 +320,97 @@ class EluTest(test.TestCase):
self.assertLess(err, 1e-6)
+class SeluTest(test.TestCase):
+
+ def _npSelu(self, np_features):
+ scale = 1.0507009873554804934193349852946
+ scale_alpha = 1.7580993408473768599402175208123
+ return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
+ scale * np_features)
+
+ def testNpSelu(self):
+ self.assertAllClose(
+ np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103 , -0.16730527],
+ [0.1050701 , -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
+ self._npSelu(
+ np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7, 0.9]
+ ])))
+
+ def _testSelu(self, np_features, use_gpu=False):
+ np_selu = self._npSelu(np_features)
+ with self.test_session(use_gpu=use_gpu):
+ selu = nn_ops.selu(np_features)
+ tf_selu = selu.eval()
+ self.assertAllClose(np_selu, tf_selu)
+ self.assertShapeEqual(np_selu, selu)
+
+ def testNumbers(self):
+ for t in [np.float16, np.float32, np.float64]:
+ self._testSelu(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=False)
+ self._testSelu(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=True)
+
+ def testGradientFloat32(self):
+ with self.test_session():
+ x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
+ x = constant_op.constant(x_val, name="x")
+ y = nn_ops.selu(x, name="selu")
+ x_init = np.asarray(x_val, dtype=np.float32, order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print("selu (float32) gradient err = ", err)
+ self.assertLess(err, 1e-4)
+
+ def testGradientFloat64(self):
+ with self.test_session():
+ x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
+ x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
+ y = nn_ops.selu(x, name="selu")
+ x_init = np.asarray(x_val, dtype=np.float64, order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print("selu (float64) gradient err = ", err)
+ self.assertLess(err, 1e-6)
+
+ def testGradGradFloat32(self):
+ with self.test_session():
+ x = constant_op.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.selu(x, name="selu")
+ z = gradients_impl.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
+ print("selu (float32) gradient of gradient err = ", err)
+ self.assertLess(err, 1e-4)
+
+ def testGradGradFloat64(self):
+ with self.test_session():
+ x = constant_op.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.selu(x, name="selu")
+ z = gradients_impl.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
+ print("selu (float64) gradient of gradient err = ", err)
+ self.assertLess(err, 1e-6)
+
+
class CreluTest(test.TestCase):
def testCreluShape(self):