diff options
8 files changed, 86 insertions, 136 deletions
diff --git a/tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py b/tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py index d2ec648537..baf17431f3 100644 --- a/tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py +++ b/tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py @@ -40,10 +40,9 @@ from tensorflow.python.training import gradient_descent class GRUBlockCellTest(test.TestCase): - _use_gpu = False def testNoneDimsWithDynamicRNN(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 cell_size = 5 input_size = 6 @@ -60,7 +59,7 @@ class GRUBlockCellTest(test.TestCase): sess.run(output, feed) def testBlockGRUToGRUCellSingleStep(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 cell_size = 5 input_size = 6 @@ -93,7 +92,7 @@ class GRUBlockCellTest(test.TestCase): self.assertAllClose(block, basic) def testBlockGRUToGRUCellMultiStep(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 2 cell_size = 3 input_size = 3 @@ -152,7 +151,7 @@ class GRUBlockCellTest(test.TestCase): self.assertAllClose(block_res[1], block_res[1]) def testDerivativeOfBlockGRUToGRUCellSingleStep(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 2 cell_size = 3 input_size = 4 @@ -222,7 +221,7 @@ class GRUBlockCellTest(test.TestCase): cell_size = 3 input_size = 4 time_steps = 2 - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: # Random initializers. seed = 1994 initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed) @@ -289,7 +288,7 @@ class GRUBlockCellTest(test.TestCase): self.assertAllClose(block, basic) def testGradient(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 1 cell_size = 3 input_size = 2 @@ -331,10 +330,6 @@ class GRUBlockCellTest(test.TestCase): self.assertLess(error_b_c, eps) -class GRUBlockCellGpuTest(GRUBlockCellTest): - _use_gpu = True - - #### Benchmarking GRUBlockCell vs GRUCell. diff --git a/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py b/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py index 1e6c44a115..0ec37411f5 100644 --- a/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py +++ b/tensorflow/contrib/rnn/python/kernel_tests/lstm_ops_test.py @@ -37,10 +37,9 @@ block_lstm = lstm_ops._block_lstm # pylint: disable=protected-access class LSTMBlockCellTest(test.TestCase): - _use_gpu = False def testNoneDimsWithDynamicRNN(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: batch_size = 4 num_steps = 5 input_dim = 6 @@ -57,7 +56,7 @@ class LSTMBlockCellTest(test.TestCase): sess.run(output, feed) def testLSTMBlockCell(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()) as sess: + with self.test_session(use_gpu=True, graph=ops.Graph()) as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): x = array_ops.zeros([1, 2]) @@ -85,7 +84,7 @@ class LSTMBlockCellTest(test.TestCase): self.assertAllClose(res[4], [[0.24024698, 0.24024698]]) def testCompatibleNames(self): - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()): + with self.test_session(use_gpu=True, graph=ops.Graph()): cell = rnn_cell.LSTMCell(10) pcell = rnn_cell.LSTMCell(10, use_peepholes=True) inputs = [array_ops.zeros([4, 5])] * 6 @@ -96,7 +95,7 @@ class LSTMBlockCellTest(test.TestCase): for v in variables.trainable_variables() } - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()): + with self.test_session(use_gpu=True, graph=ops.Graph()): cell = lstm_ops.LSTMBlockCell(10) pcell = lstm_ops.LSTMBlockCell(10, use_peephole=True) inputs = [array_ops.zeros([4, 5])] * 6 @@ -107,7 +106,7 @@ class LSTMBlockCellTest(test.TestCase): for v in variables.trainable_variables() } - with self.test_session(use_gpu=self._use_gpu, graph=ops.Graph()): + with self.test_session(use_gpu=True, graph=ops.Graph()): cell = lstm_ops.LSTMBlockFusedCell(10) pcell = lstm_ops.LSTMBlockFusedCell(10, use_peephole=True) inputs = [array_ops.zeros([4, 5])] * 6 @@ -122,7 +121,7 @@ class LSTMBlockCellTest(test.TestCase): self.assertEqual(basic_names, fused_names) def testLSTMBasicToBlockCell(self): - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: x = array_ops.zeros([1, 2]) x_values = np.random.randn(1, 2) @@ -172,7 +171,7 @@ class LSTMBlockCellTest(test.TestCase): self.assertAllClose(basic, block) def testLSTMBasicToBlockCellPeeping(self): - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: x = array_ops.zeros([1, 2]) x_values = np.random.randn(1, 2) @@ -225,7 +224,7 @@ class LSTMBlockCellTest(test.TestCase): self.assertAllClose(basic, block) def testLSTMBasicToBlock(self): - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: batch_size = 2 input_size = 3 cell_size = 4 @@ -299,7 +298,7 @@ class LSTMBlockCellTest(test.TestCase): self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) def testLSTMBasicToBlockPeeping(self): - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: batch_size = 2 input_size = 3 cell_size = 4 @@ -387,7 +386,7 @@ class LSTMBlockCellTest(test.TestCase): def testLSTMFusedSequenceLengths(self): """Verify proper support for sequence lengths in LSTMBlockFusedCell.""" - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: batch_size = 3 input_size = 4 cell_size = 5 @@ -469,9 +468,5 @@ class LSTMBlockCellTest(test.TestCase): self.assertAllClose(basic, unfused, rtol=1e-2, atol=1e-2) -class LSTMBlockCellGpuTest(LSTMBlockCellTest): - _use_gpu = True - - if __name__ == "__main__": test.main() diff --git a/tensorflow/python/kernel_tests/diag_op_test.py b/tensorflow/python/kernel_tests/diag_op_test.py index 09d6436f43..4744e68051 100644 --- a/tensorflow/python/kernel_tests/diag_op_test.py +++ b/tensorflow/python/kernel_tests/diag_op_test.py @@ -30,10 +30,9 @@ from tensorflow.python.platform import tf_logging class MatrixDiagTest(test.TestCase): - _use_gpu = False def testVector(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = np.array([1.0, 2.0, 3.0]) mat = np.diag(v) v_diag = array_ops.matrix_diag(v) @@ -41,7 +40,7 @@ class MatrixDiagTest(test.TestCase): self.assertAllEqual(v_diag.eval(), mat) def testBatchVector(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v_batch = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) mat_batch = np.array( @@ -60,14 +59,14 @@ class MatrixDiagTest(test.TestCase): array_ops.matrix_diag(0) def testInvalidShapeAtEval(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("input must be at least 1-dim"): array_ops.matrix_diag(v).eval(feed_dict={v: 0.0}) def testGrad(self): shapes = ((3,), (7, 4)) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): for shape in shapes: x = constant_op.constant(np.random.rand(*shape), np.float32) y = array_ops.matrix_diag(x) @@ -78,15 +77,10 @@ class MatrixDiagTest(test.TestCase): self.assertLess(error, 1e-4) -class MatrixDiagGpuTest(MatrixDiagTest): - _use_gpu = True - - class MatrixSetDiagTest(test.TestCase): - _use_gpu = False def testSquare(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = np.array([1.0, 2.0, 3.0]) mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], @@ -99,7 +93,7 @@ class MatrixSetDiagTest(test.TestCase): self.assertAllEqual(mat_set_diag, output.eval()) def testRectangular(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = np.array([3.0, 4.0]) mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]]) expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]]) @@ -115,7 +109,7 @@ class MatrixSetDiagTest(test.TestCase): self.assertAllEqual(expected, output.eval()) def testSquareBatch(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v_batch = np.array([[-1.0, -2.0, -3.0], [-4.0, -5.0, -6.0]]) mat_batch = np.array( @@ -138,7 +132,7 @@ class MatrixSetDiagTest(test.TestCase): self.assertAllEqual(mat_set_diag_batch, output.eval()) def testRectangularBatch(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]]) mat_batch = np.array( @@ -163,7 +157,7 @@ class MatrixSetDiagTest(test.TestCase): array_ops.matrix_set_diag([[0]], 0) def testInvalidShapeAtEval(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("input must be at least 2-dim"): array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0}) @@ -173,7 +167,7 @@ class MatrixSetDiagTest(test.TestCase): def testGrad(self): shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8)) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): for shape in shapes: x = constant_op.constant( np.random.rand(*shape), dtype=dtypes_lib.float32) @@ -189,7 +183,7 @@ class MatrixSetDiagTest(test.TestCase): self.assertLess(error_x_diag, 1e-4) def testGradWithNoShapeInformation(self): - with self.test_session(use_gpu=self._use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: v = array_ops.placeholder(dtype=dtypes_lib.float32) mat = array_ops.placeholder(dtype=dtypes_lib.float32) grad_input = array_ops.placeholder(dtype=dtypes_lib.float32) @@ -207,15 +201,10 @@ class MatrixSetDiagTest(test.TestCase): grad_vals[0]) -class MatrixSetDiagGpuTest(MatrixSetDiagTest): - _use_gpu = True - - class MatrixDiagPartTest(test.TestCase): - _use_gpu = False def testSquare(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = np.array([1.0, 2.0, 3.0]) mat = np.diag(v) mat_diag = array_ops.matrix_diag_part(mat) @@ -223,7 +212,7 @@ class MatrixDiagPartTest(test.TestCase): self.assertAllEqual(mat_diag.eval(), v) def testRectangular(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) mat_diag = array_ops.matrix_diag_part(mat) self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0])) @@ -232,7 +221,7 @@ class MatrixDiagPartTest(test.TestCase): self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0])) def testSquareBatch(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v_batch = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) mat_batch = np.array( @@ -248,7 +237,7 @@ class MatrixDiagPartTest(test.TestCase): self.assertAllEqual(mat_batch_diag.eval(), v_batch) def testRectangularBatch(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v_batch = np.array([[1.0, 2.0], [4.0, 5.0]]) mat_batch = np.array( @@ -266,14 +255,14 @@ class MatrixDiagPartTest(test.TestCase): array_ops.matrix_diag_part(0) def testInvalidShapeAtEval(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): v = array_ops.placeholder(dtype=dtypes_lib.float32) with self.assertRaisesOpError("input must be at least 2-dim"): array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0}) def testGrad(self): shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3)) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): for shape in shapes: x = constant_op.constant(np.random.rand(*shape), dtype=np.float32) y = array_ops.matrix_diag_part(x) @@ -284,10 +273,6 @@ class MatrixDiagPartTest(test.TestCase): self.assertLess(error, 1e-4) -class MatrixDiagPartGpuTest(MatrixDiagPartTest): - _use_gpu = True - - class DiagTest(test.TestCase): def diagOp(self, diag, dtype, expected_ans, use_gpu=False): diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py index 80a9e0be68..877c2fec3a 100644 --- a/tensorflow/python/kernel_tests/gather_nd_op_test.py +++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py @@ -32,10 +32,9 @@ from tensorflow.python.platform import test class GatherNdTest(test.TestCase): - use_gpu = False def _testSimpleDtype(self, dtype): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype)) indices = constant_op.constant([[4], [4], [0]]) gather_nd_t = array_ops.gather_nd(params, indices) @@ -53,7 +52,7 @@ class GatherNdTest(test.TestCase): self._testSimpleDtype("|S") # byte strings in python2 + 3 def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.ones((3, 3), dtype=np.float32) indices_empty = np.empty((0, 2), dtype=np.int32) @@ -84,7 +83,7 @@ class GatherNdTest(test.TestCase): self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val) def testIndexScalar(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([4, 1]) @@ -94,7 +93,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array(7), gather_nd_val) def testParamsRankLargerThanIndexIndexScalarSlices(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([4]) @@ -104,7 +103,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array([-7, 7]), gather_nd_val) def testParamsRankLargerThanIndexSlices(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T indices = constant_op.constant([[4], [4], [0]]) @@ -115,7 +114,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val) def testHigherRankParamsLargerThanIndexSlices(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -129,7 +128,7 @@ class GatherNdTest(test.TestCase): self.assertAllEqual(params[[4, 4, 0]], gather_nd_val) def testEmptyIndicesLastRankMeansCopyEntireTensor(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -146,7 +145,7 @@ class GatherNdTest(test.TestCase): gather_nd_val) def testHigherRankParamsAndIndicesLargerThanIndexSlices(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): params = np.array( [[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], [[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]], @@ -161,7 +160,7 @@ class GatherNdTest(test.TestCase): gather_nd_val) def testHigherRankParams(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): shape = (10, 20, 5, 1, 17) params = np.random.rand(*shape) indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T @@ -173,7 +172,7 @@ class GatherNdTest(test.TestCase): self.assertEqual([2000], gather_nd_t.get_shape()) def testHigherRankParamsAndIndices(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): shape = (10, 20, 5, 1, 17) params = np.random.rand(*shape) indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T @@ -194,7 +193,7 @@ class GatherNdTest(test.TestCase): self.assertEqual(None, shape[0].value) def testBadIndices(self): - with self.test_session(): + with self.test_session(use_gpu=True): params = [0, 1, 2] indices = [[[0], [7]]] # Make this one higher rank gather_nd = array_ops.gather_nd(params, indices) @@ -204,7 +203,7 @@ class GatherNdTest(test.TestCase): gather_nd.eval() def testBadIndicesWithSlices(self): - with self.test_session(): + with self.test_session(use_gpu=True): params = [[0, 1, 2]] indices = [[[0], [0], [1]]] # Make this one higher rank gather_nd = array_ops.gather_nd(params, indices) @@ -221,7 +220,7 @@ class GatherNdTest(test.TestCase): grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64) grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64) - with self.test_session(): + with self.test_session(use_gpu=True): assert np.array_equal(expected_grads, grads.eval()) def testGradientsRank2Slices(self): @@ -232,7 +231,7 @@ class GatherNdTest(test.TestCase): grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64) grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64) - with self.test_session(): + with self.test_session(use_gpu=True): self.assertAllEqual(expected_grads, grads.eval()) def testGradientsRank3Elements(self): @@ -247,7 +246,7 @@ class GatherNdTest(test.TestCase): grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array( [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64) - with self.test_session(): + with self.test_session(use_gpu=True): self.assertAllEqual(expected_grads, grads.eval()) def testGradientsInt64Indices(self): @@ -262,7 +261,7 @@ class GatherNdTest(test.TestCase): grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0] expected_grads = np.array( [[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64) - with self.test_session(): + with self.test_session(use_gpu=True): self.assertAllEqual(expected_grads, grads.eval()) def testGradientsRank2SlicesWithEmptySpace(self): @@ -283,14 +282,10 @@ class GatherNdTest(test.TestCase): [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]], dtype=np.float64) - with self.test_session(): + with self.test_session(use_gpu=True): self.assertAllEqual(expected_grads, grads.eval()) -class GatherNdGpuTest(GatherNdTest): - use_gpu = True - - class GatherNdOpBenchmark(test.Benchmark): def benchmark_gather_nd_op(self): diff --git a/tensorflow/python/kernel_tests/linalg_ops_test.py b/tensorflow/python/kernel_tests/linalg_ops_test.py index 2d31ac85b0..2a562b6e2c 100644 --- a/tensorflow/python/kernel_tests/linalg_ops_test.py +++ b/tensorflow/python/kernel_tests/linalg_ops_test.py @@ -34,13 +34,12 @@ def _random_pd_matrix(n, rng): class CholeskySolveTest(test.TestCase): - _use_gpu = False def setUp(self): self.rng = np.random.RandomState(0) def test_works_with_five_different_random_pos_def_matrices(self): - with self.test_session(): + with self.test_session(use_gpu=True): for n in range(1, 6): for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]: # Create 2 x n x n matrix @@ -55,17 +54,13 @@ class CholeskySolveTest(test.TestCase): rhs, math_ops.matmul(array, x).eval(), atol=atol) -class CholeskySolveGpuTest(CholeskySolveTest): - _use_gpu = True - - class EyeTest(test.TestCase): def test_non_batch_2x2(self): num_rows = 2 dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, dtype=dtype) self.assertAllEqual((num_rows, num_rows), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) @@ -75,7 +70,7 @@ class EyeTest(test.TestCase): num_columns = 3 dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) @@ -85,7 +80,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual(batch_shape + [num_rows, num_rows], eye.get_shape()) eye_v = eye.eval() @@ -98,7 +93,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): num_rows_ph = array_ops.placeholder(dtypes.int32) batch_shape_ph = array_ops.placeholder(dtypes.int32) eye = linalg_ops.eye(num_rows_ph, batch_shape=batch_shape_ph, dtype=dtype) @@ -115,7 +110,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, @@ -133,7 +128,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): num_rows_ph = array_ops.placeholder(dtypes.int32) num_columns_ph = array_ops.placeholder(dtypes.int32) batch_shape_ph = array_ops.placeholder(dtypes.int32) @@ -154,7 +149,7 @@ class EyeTest(test.TestCase): num_rows = 0 dtype = np.int64 np_eye = np.eye(num_rows).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, dtype=dtype) self.assertAllEqual((num_rows, num_rows), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) @@ -164,7 +159,7 @@ class EyeTest(test.TestCase): num_columns = 0 dtype = np.int64 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) @@ -174,7 +169,7 @@ class EyeTest(test.TestCase): num_columns = 2 dtype = np.int64 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) @@ -184,7 +179,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual((1, 3, 0, 0), eye.get_shape()) eye_v = eye.eval() @@ -198,7 +193,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, @@ -216,7 +211,7 @@ class EyeTest(test.TestCase): batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) - with self.test_session(): + with self.test_session(use_gpu=True): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, diff --git a/tensorflow/python/kernel_tests/multinomial_op_test.py b/tensorflow/python/kernel_tests/multinomial_op_test.py index ab082a1efd..d6e1b2b4c0 100644 --- a/tensorflow/python/kernel_tests/multinomial_op_test.py +++ b/tensorflow/python/kernel_tests/multinomial_op_test.py @@ -51,11 +51,10 @@ native_sampler = random_ops.multinomial class MultinomialTest(test.TestCase): - use_gpu = False def testSmallEntropy(self): random_seed.set_random_seed(1618) - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): # A logit value of -10 corresponds to a probability of ~5e-5. logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]]) num_samples = 1000 @@ -63,7 +62,7 @@ class MultinomialTest(test.TestCase): self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples) def testOneOpMultipleStepsIndependent(self): - with self.test_session(use_gpu=self.use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: sample_op1, _ = self._make_ops(10) # Consecutive runs shouldn't yield identical output. sample1a = sess.run(sample_op1) @@ -71,7 +70,7 @@ class MultinomialTest(test.TestCase): self.assertFalse(np.equal(sample1a, sample1b).all()) def testTwoOpsIndependent(self): - with self.test_session(use_gpu=self.use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: sample_op1, sample_op2 = self._make_ops(32) sample1, sample2 = sess.run([sample_op1, sample_op2]) # We expect sample1 and sample2 to be independent. @@ -79,14 +78,14 @@ class MultinomialTest(test.TestCase): self.assertFalse(np.equal(sample1, sample2).all()) def testTwoOpsSameSeedDrawSameSequences(self): - with self.test_session(use_gpu=self.use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: sample_op1, sample_op2 = self._make_ops(1000, seed=1) sample1, sample2 = sess.run([sample_op1, sample_op2]) self.assertAllEqual(sample1, sample2) def testLargeLogits(self): for neg in [True, False]: - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): logits = np.array([[1000.] * 5]) if neg: logits *= -1 @@ -147,7 +146,7 @@ class MultinomialTest(test.TestCase): Returns: Frequencies from sampled classes; shape [batch_size, num_classes]. """ - with self.test_session(use_gpu=self.use_gpu) as sess: + with self.test_session(use_gpu=True) as sess: random_seed.set_random_seed(1618) op = sampler(constant_op.constant(logits), num_samples) d = sess.run(op) @@ -176,7 +175,7 @@ class MultinomialTest(test.TestCase): def testEmpty(self): classes = 5 - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): for batch in 0, 3: for samples in 0, 7: x = random_ops.multinomial( @@ -184,24 +183,20 @@ class MultinomialTest(test.TestCase): self.assertEqual(x.shape, (batch, samples)) def testEmptyClasses(self): - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): x = random_ops.multinomial(array_ops.zeros([5, 0]), 7) with self.assertRaisesOpError("num_classes should be positive"): x.eval() def testNegativeMinLogits(self): random_seed.set_random_seed(78844) - with self.test_session(use_gpu=self.use_gpu): + with self.test_session(use_gpu=True): logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]]) num_samples = 1000 samples = random_ops.multinomial(logits, num_samples).eval() self.assertAllEqual([[1023] * num_samples], samples) -class MultinomialGpuTest(MultinomialTest): - use_gpu = True - - # Benchmarking code def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters): np.random.seed(1618) # Make it reproducible. diff --git a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py index a43f169df0..dd67919f69 100644 --- a/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py +++ b/tensorflow/python/kernel_tests/parameterized_truncated_normal_op_test.py @@ -104,7 +104,6 @@ def z_test(real, expected, i, num_samples): class ParameterizedTruncatedNormalTest(test.TestCase): - _use_gpu = False z_limit = 6.0 # Stop at moment 10 to avoid numerical errors in the theoretical moments. @@ -116,7 +115,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): # Give up early if we are unable to import it. import scipy.stats # pylint: disable=g-import-not-at-top,unused-variable random_seed.set_random_seed(seed) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval() @@ -140,7 +139,7 @@ class ParameterizedTruncatedNormalTest(test.TestCase): try: import scipy.stats # pylint: disable=g-import-not-at-top random_seed.set_random_seed(seed) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): samples = random_ops.parameterized_truncated_normal(shape, mean, stddev, minval, maxval).eval() @@ -184,10 +183,6 @@ class ParameterizedTruncatedNormalTest(test.TestCase): self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10) -class ParameterizedTruncatedNormalGpuTest(ParameterizedTruncatedNormalTest): - _use_gpu = True - - # Benchmarking code def parameterized_vs_naive(shape, num_iters, use_gpu=False): np.random.seed(1618) # Make it reproducible. diff --git a/tensorflow/python/ops/special_math_ops_test.py b/tensorflow/python/ops/special_math_ops_test.py index 6ada55c518..13cd9b7ba4 100644 --- a/tensorflow/python/ops/special_math_ops_test.py +++ b/tensorflow/python/ops/special_math_ops_test.py @@ -32,13 +32,12 @@ from tensorflow.python.platform import test class LBetaTest(test.TestCase): - _use_gpu = False def test_one_dimensional_arg(self): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_one)).eval()) self.assertAllClose( 0.5, math_ops.exp(special_math_ops.lbeta(x_one_half)).eval()) @@ -48,7 +47,7 @@ class LBetaTest(test.TestCase): # Should evaluate to 1 and 1/2. x_one = [1, 1.] x_one_half = [2, 1.] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one})) @@ -62,7 +61,7 @@ class LBetaTest(test.TestCase): # = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1) # = 1 / 6 expected_beta_x = 1 / 6 * np.ones((3, 2, 3)) - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None]) beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph)) self.assertAllClose(expected_beta_x, beta_ph.eval(feed_dict={x_ph: x_})) @@ -70,7 +69,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_arg(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): self.assertAllClose( [0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval()) self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape()) @@ -78,7 +77,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_arg_dynamic(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): ph = array_ops.placeholder(dtypes.float32) beta_ph = math_ops.exp(special_math_ops.lbeta(ph)) self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half})) @@ -86,7 +85,7 @@ class LBetaTest(test.TestCase): def test_two_dimensional_proper_shape(self): # Should evaluate to 1/2. x_one_half = [[2, 1.], [2, 1.]] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): self.assertAllClose( [0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval()) self.assertEqual( @@ -96,7 +95,7 @@ class LBetaTest(test.TestCase): special_math_ops.lbeta(x_one_half).get_shape()) def test_complicated_shape(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): x = ops.convert_to_tensor(np.random.rand(3, 2, 2)) self.assertAllEqual( (3, 2), array_ops.shape(special_math_ops.lbeta(x)).eval()) @@ -109,13 +108,13 @@ class LBetaTest(test.TestCase): # as the answer, always. x_a = [5.5] x_b = [0.1] - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_a)).eval()) self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_b)).eval()) self.assertEqual((), special_math_ops.lbeta(x_a).get_shape()) def test_empty_rank1_returns_negative_infinity(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): x = constant_op.constant([], shape=[0]) lbeta_x = special_math_ops.lbeta(x) expected_result = constant_op.constant(-np.inf, shape=()) @@ -124,7 +123,7 @@ class LBetaTest(test.TestCase): self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): event_size = 0 for batch_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) @@ -135,7 +134,7 @@ class LBetaTest(test.TestCase): self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) def test_empty_rank2_with_zero_batch_dim_returns_empty(self): - with self.test_session(use_gpu=self._use_gpu): + with self.test_session(use_gpu=True): batch_size = 0 for event_size in [0, 1, 2]: x = constant_op.constant([], shape=[batch_size, event_size]) @@ -147,10 +146,6 @@ class LBetaTest(test.TestCase): self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape()) -class LBetaTestGpu(LBetaTest): - _use_gpu = True - - class EinsumTest(test.TestCase): simple_cases = [ @@ -269,7 +264,7 @@ class EinsumTest(test.TestCase): input_tensors = [constant_op.constant(val) for val in input_vals] output_tensor = special_math_ops.einsum(axes, *input_tensors) - with self.test_session(): + with self.test_session(use_gpu=True): output_value = output_tensor.eval() correct_value = np.einsum(axes, *input_vals) |