aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/kernel_tests/batch_matmul_op_test.py
diff options
context:
space:
mode:
authorGravatar Gunhan Gulsoy <gunan@google.com>2016-09-09 22:34:51 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-09-09 23:47:03 -0700
commit60efa7994acb2c38cc855f2915ceff6e9304779e (patch)
treee3d245f8dce8839aef08f027294c1fc924d09a05 /tensorflow/python/kernel_tests/batch_matmul_op_test.py
parent9ae831a6ce29dc7983a69484ef44307468778119 (diff)
Clean up another batch of tensorflow tests that are using use_gpu.
Change: 132750089
Diffstat (limited to 'tensorflow/python/kernel_tests/batch_matmul_op_test.py')
-rw-r--r--tensorflow/python/kernel_tests/batch_matmul_op_test.py111
1 files changed, 51 insertions, 60 deletions
diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
index 198f3db236..0b9338887a 100644
--- a/tensorflow/python/kernel_tests/batch_matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
@@ -71,8 +71,8 @@ class BatchMatmulOpTest(tf.test.TestCase):
# Compares _tfpBatchMatmul(x, y, alpha, adj) and _npBatchMatMul(x, y, alpha,
# adj)
- def _compare(self, x, y, adj_x, adj_y, use_gpu=False):
- with self.test_session(use_gpu=use_gpu):
+ def _compare(self, x, y, adj_x, adj_y):
+ with self.test_session(use_gpu=True):
z0 = tf.batch_matmul(x, y, adj_x=adj_x, adj_y=adj_y)
z0_val = z0.eval()
z1 = self._npBatchMatmul(x, y, adj_x, adj_y)
@@ -88,37 +88,34 @@ class BatchMatmulOpTest(tf.test.TestCase):
return np.array(vals, dtype=np.float32)
def testSimpleFloat(self):
- for use_gpu in [False, True]:
- self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 3, 5]),
- False, False, use_gpu)
- self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 5, 3]),
- False, True, use_gpu)
- self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 3, 5]),
- True, False, use_gpu)
- self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 5, 3]),
- True, True, use_gpu)
+ self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 3, 5]),
+ False, False)
+ self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 5, 3]),
+ False, True)
+ self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 3, 5]),
+ True, False)
+ self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 5, 3]),
+ True, True)
def testLargeFloat(self):
- for use_gpu in [False, True]:
- self._compare(self._randFloat([10, 64, 75]),
- self._randFloat([10, 75, 30]), False, False, use_gpu)
- self._compare(self._randFloat([10, 75, 64]),
- self._randFloat([10, 75, 30]), True, False, use_gpu)
- self._compare(self._randFloat([10, 64, 75]),
- self._randFloat([10, 30, 75]), False, True, use_gpu)
- self._compare(self._randFloat([10, 75, 64]),
- self._randFloat([10, 30, 75]), True, True, use_gpu)
+ self._compare(self._randFloat([10, 64, 75]),
+ self._randFloat([10, 75, 30]), False, False)
+ self._compare(self._randFloat([10, 75, 64]),
+ self._randFloat([10, 75, 30]), True, False)
+ self._compare(self._randFloat([10, 64, 75]),
+ self._randFloat([10, 30, 75]), False, True)
+ self._compare(self._randFloat([10, 75, 64]),
+ self._randFloat([10, 30, 75]), True, True)
def testHighNDims(self):
- for use_gpu in [False, True]:
- self._compare(self._randFloat([5, 7, 2, 3]),
- self._randFloat([5, 7, 3, 5]), False, False, use_gpu)
- self._compare(self._randFloat([5, 7, 3, 2]),
- self._randFloat([5, 7, 3, 5]), True, False, use_gpu)
- self._compare(self._randFloat([5, 7, 2, 3]),
- self._randFloat([5, 7, 5, 3]), False, True, use_gpu)
- self._compare(self._randFloat([5, 7, 3, 2]),
- self._randFloat([5, 7, 5, 3]), True, True, use_gpu)
+ self._compare(self._randFloat([5, 7, 2, 3]),
+ self._randFloat([5, 7, 3, 5]), False, False)
+ self._compare(self._randFloat([5, 7, 3, 2]),
+ self._randFloat([5, 7, 3, 5]), True, False)
+ self._compare(self._randFloat([5, 7, 2, 3]),
+ self._randFloat([5, 7, 5, 3]), False, True)
+ self._compare(self._randFloat([5, 7, 3, 2]),
+ self._randFloat([5, 7, 5, 3]), True, True)
# Returns a random complex numpy array of "shape".
def _randComplex(self, shape):
@@ -128,27 +125,24 @@ class BatchMatmulOpTest(tf.test.TestCase):
return np.array(vals, dtype=np.complex64).reshape(shape)
def testSimpleComplex(self):
- for use_gpu in [False, True]:
- self._compare(self._randComplex([7, 2, 3]),
- self._randComplex([7, 3, 5]), False, False, use_gpu)
- self._compare(self._randComplex([7, 2, 3]),
- self._randComplex([7, 5, 3]), False, True, use_gpu)
- self._compare(self._randComplex([7, 3, 2]),
- self._randComplex([7, 3, 5]), True, False, use_gpu)
- self._compare(self._randComplex([7, 3, 2]),
- self._randComplex([7, 5, 3]), True, True, use_gpu)
+ self._compare(self._randComplex([7, 2, 3]),
+ self._randComplex([7, 3, 5]), False, False)
+ self._compare(self._randComplex([7, 2, 3]),
+ self._randComplex([7, 5, 3]), False, True)
+ self._compare(self._randComplex([7, 3, 2]),
+ self._randComplex([7, 3, 5]), True, False)
+ self._compare(self._randComplex([7, 3, 2]),
+ self._randComplex([7, 5, 3]), True, True)
def testLargeComplex(self):
- for use_gpu in [False, True]:
- self._compare(self._randComplex([10, 64, 75]),
- self._randComplex([10, 75, 30]), False,
- False, use_gpu)
- self._compare(self._randComplex([10, 64, 75]),
- self._randComplex([10, 30, 75]), False, True, use_gpu)
- self._compare(self._randComplex([10, 75, 64]),
- self._randComplex([10, 75, 30]), True, False, use_gpu)
- self._compare(self._randComplex([10, 75, 64]),
- self._randComplex([10, 30, 75]), True, True, use_gpu)
+ self._compare(self._randComplex([10, 64, 75]),
+ self._randComplex([10, 75, 30]), False, False)
+ self._compare(self._randComplex([10, 64, 75]),
+ self._randComplex([10, 30, 75]), False, True)
+ self._compare(self._randComplex([10, 75, 64]),
+ self._randComplex([10, 75, 30]), True, False)
+ self._compare(self._randComplex([10, 75, 64]),
+ self._randComplex([10, 30, 75]), True, True)
def testEmpty(self):
self._compare(np.zeros([0, 3, 2]).astype(np.float32),
@@ -165,10 +159,10 @@ class BatchMatmulGradientTest(tf.test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
- def _checkGrad(self, x, y, adj_x, adj_y, use_gpu):
+ def _checkGrad(self, x, y, adj_x, adj_y):
assert 3 == x.ndim
assert 3 == y.ndim
- with self.test_session(use_gpu=use_gpu):
+ with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
z = tf.batch_matmul(inx, iny, adj_x, adj_y)
@@ -194,22 +188,19 @@ class BatchMatmulGradientTest(tf.test.TestCase):
# n, k] y is a 3D tensor of shape [b, k, m] the batched matmul
# computes z of shape [b, n, m], where z[i, :, :] = x[i, :, :]
# matmul y[i, :, :]
- def _compare(self, b, n, k, m, use_gpu):
+ def _compare(self, b, n, k, m):
x = np.random.normal(0, 1, b * n * k).astype(np.float32).reshape([b, n, k])
y = np.random.normal(0, 1, b * k * m).astype(np.float32).reshape([b, k, m])
- self._checkGrad(x, y, False, False, use_gpu)
- self._checkGrad(x.reshape([b, k, n]), y, True, False, use_gpu)
- self._checkGrad(x, y.reshape([b, m, k]), False, True, use_gpu)
- self._checkGrad(x.reshape([b, k, n]), y.reshape([b, m, k]), True, True,
- use_gpu)
+ self._checkGrad(x, y, False, False)
+ self._checkGrad(x.reshape([b, k, n]), y, True, False)
+ self._checkGrad(x, y.reshape([b, m, k]), False, True)
+ self._checkGrad(x.reshape([b, k, n]), y.reshape([b, m, k]), True, True)
def testSmall(self):
- for use_gpu in [False, True]:
- self._compare(1, 2, 3, 5, use_gpu)
+ self._compare(1, 2, 3, 5)
def testMedium(self):
- for use_gpu in [False, True]:
- self._compare(3, 4, 7, 10, use_gpu)
+ self._compare(3, 4, 7, 10)
# Can't do testLarge using very large inputs because gradient
# checker will take way too long time.