diff options
author | Manjunath Kudlur <keveman@gmail.com> | 2015-11-06 16:27:58 -0800 |
---|---|---|
committer | Manjunath Kudlur <keveman@gmail.com> | 2015-11-06 16:27:58 -0800 |
commit | f41959ccb2d9d4c722fe8fc3351401d53bcf4900 (patch) | |
tree | ef0ca22cb2a5ac4bdec9d080d8e0788a53ed496d /tensorflow/python/kernel_tests/transpose_op_test.py |
TensorFlow: Initial commit of TensorFlow library.
TensorFlow is an open source software library for numerical computation
using data flow graphs.
Base CL: 107276108
Diffstat (limited to 'tensorflow/python/kernel_tests/transpose_op_test.py')
-rw-r--r-- | tensorflow/python/kernel_tests/transpose_op_test.py | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py new file mode 100644 index 0000000000..2786eaf37b --- /dev/null +++ b/tensorflow/python/kernel_tests/transpose_op_test.py @@ -0,0 +1,176 @@ +"""Functional tests for Transpose op.""" +import itertools +import tensorflow.python.platform + +import numpy as np +import tensorflow as tf + +from tensorflow.python.kernel_tests.gradient_checker import ComputeGradient + + +class TransposeTest(tf.test.TestCase): + + def _np_transpose(self, x, perm): + ret = np.copy(x) + ret = ret.transpose(perm) + return ret + + def _compareCpu(self, x, p): + np_ans = self._np_transpose(x, p) + with self.test_session(use_gpu=False): + inx = tf.convert_to_tensor(x) + y = tf.transpose(inx, p) + tf_ans = y.eval() + self.assertAllEqual(np_ans, tf_ans) + self.assertShapeEqual(np_ans, y) + + jacob_t = None + # Gradient check on CPU. + xs = list(np.shape(x)) + ys = list(np.shape(tf_ans)) + if x.dtype == np.float32: + jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2) + self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3) + elif x.dtype == np.float64: + jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2) + self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6) + + return tf_ans, jacob_t + + def _compareGpu(self, x, p): + np_ans = self._np_transpose(x, p) + with self.test_session(use_gpu=True): + inx = tf.convert_to_tensor(x) + y = tf.transpose(inx, p) + tf_ans = y.eval() + self.assertAllEqual(np_ans, tf_ans) + self.assertShapeEqual(np_ans, y) + + jacob_t = None + # Gradient check on GPU. + xs = list(np.shape(x)) + ys = list(np.shape(tf_ans)) + if x.dtype == np.float32: + jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2) + self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3) + elif x.dtype == np.float64: + jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2) + self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6) + + return tf_ans, jacob_t + + def _compare(self, x, use_gpu=False): + n = np.ndim(x) + # generate all permutations of [0, 1, ... n-1] in random order. + all_perm = np.random.permutation( + [p for p in itertools.permutations(range(n))]).astype(np.int32) + for p in all_perm[0:2]: + self._compareCpu(x, p) + if use_gpu: + self._compareGpu(x, p) + + def _compare_cpu_gpu(self, x): + n = np.ndim(x) + # generate all permutation of [0, 1, ... n-1] in random order. + all_perm = np.random.permutation( + [p for p in itertools.permutations(range(n))]).astype(np.int32) + for p in all_perm[0:2]: + tf_a_cpu, tf_g_cpu = self._compareCpu(x, p) + tf_a_gpu, tf_g_gpu = self._compareGpu(x, p) + assert tf_g_cpu is not None + assert tf_g_gpu is not None + if x.dtype == np.float32: + self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3) + self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3) + elif x.dtype == np.float64: + self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6) + self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6) + + def _testCpu(self, x): + self._compare(x, use_gpu=False) + + def test1D(self): + self._compareCpu(np.arange(0., 2), [0]) + + def testNop(self): + self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1]) + + def testSimple(self): + self._compareCpu(np.arange(0, 8).reshape([2, 4]).astype(np.float32), + np.array([1, 0]).astype(np.int32)) + + def testFloat(self): + self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32)) + self._compare_cpu_gpu( + np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)) + + def testDouble(self): + self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64)) + self._compare_cpu_gpu( + np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64)) + + def testSComplex(self): + self._testCpu(np.complex(1, 2) * np.arange(0, 21).reshape( + [3, 7]).astype(np.complex64)) + self._testCpu(np.complex(1, 2) * np.arange(0, 210).reshape( + [2, 3, 5, 7]).astype(np.complex64)) + + def testInt8(self): + self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int8)) + self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8)) + + def testInt16(self): + self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int16)) + self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16)) + + def testInt32(self): + self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int32)) + self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32)) + + def testInt64(self): + self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int64)) + self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64)) + + def testTranspose2DAuto(self): + x_np = [[1, 2, 3], [4, 5, 6]] + for use_gpu in [False, True]: + with self.test_session(use_gpu=use_gpu): + x_tf = tf.transpose(x_np).eval() + self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]]) + + def testTransposeShapes(self): + self.assertEqual([], tf.transpose( + tf.placeholder(tf.int32, shape=[])).get_shape().dims) + self.assertEqual([100], tf.transpose( + tf.placeholder(tf.int32, shape=[100])).get_shape().dims) + self.assertEqual([37, 100], tf.transpose( + tf.placeholder(tf.int32, shape=[100, 37])).get_shape().dims) + self.assertEqual([100, 37], tf.transpose( + tf.placeholder(tf.int32, shape=[100, 37]), [0, 1]).get_shape().dims) + self.assertEqual([15, 37, 100], tf.transpose( + tf.placeholder(tf.int32, shape=[100, 37, 15])).get_shape().dims) + self.assertEqual([15, 100, 37], tf.transpose( + tf.placeholder(tf.int32, + shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims) + self.assertEqual(tf.TensorShape(None), tf.transpose( + tf.placeholder(tf.int32)).get_shape()) + + def _testError(self, x, p, err): + with self.test_session(): + with self.assertRaisesOpError(err): + tf.transpose(x, p).eval() + + def testError(self): + with self.assertRaises(ValueError): + tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]]) + self._testError(np.arange(0., 2 ** 10).reshape([2] * 10), + range(10), + "not implemented") + with self.assertRaises(IndexError): + tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3]) + self._testError(np.arange(0., 30).reshape([2, 3, 5]), + [0, 1, 1], + "2 is missing") + +if __name__ == "__main__": + tf.test.main() |