aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/kernel_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/kernel_tests')
-rwxr-xr-xtensorflow/python/kernel_tests/__init__.py0
-rw-r--r--tensorflow/python/kernel_tests/argmax_op_test.py61
-rw-r--r--tensorflow/python/kernel_tests/array_ops_test.py45
-rw-r--r--tensorflow/python/kernel_tests/attention_ops_test.py166
-rw-r--r--tensorflow/python/kernel_tests/batch_matmul_op_test.py195
-rw-r--r--tensorflow/python/kernel_tests/bcast_ops_test.py76
-rw-r--r--tensorflow/python/kernel_tests/bias_op_test.py93
-rw-r--r--tensorflow/python/kernel_tests/candidate_sampler_ops_test.py114
-rw-r--r--tensorflow/python/kernel_tests/cast_op_test.py165
-rw-r--r--tensorflow/python/kernel_tests/cholesky_op_test.py74
-rw-r--r--tensorflow/python/kernel_tests/clip_ops_test.py222
-rw-r--r--tensorflow/python/kernel_tests/concat_op_test.py276
-rw-r--r--tensorflow/python/kernel_tests/constant_op_test.py524
-rw-r--r--tensorflow/python/kernel_tests/control_flow_ops_py_test.py1260
-rw-r--r--tensorflow/python/kernel_tests/conv_ops_test.py1009
-rw-r--r--tensorflow/python/kernel_tests/cwise_ops_test.py1187
-rw-r--r--tensorflow/python/kernel_tests/decode_csv_op_test.py148
-rw-r--r--tensorflow/python/kernel_tests/decode_raw_op_test.py44
-rw-r--r--tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py60
-rw-r--r--tensorflow/python/kernel_tests/dense_update_ops_test.py151
-rw-r--r--tensorflow/python/kernel_tests/determinant_op_test.py72
-rw-r--r--tensorflow/python/kernel_tests/diag_op_test.py80
-rw-r--r--tensorflow/python/kernel_tests/dynamic_partition_op_test.py99
-rw-r--r--tensorflow/python/kernel_tests/dynamic_stitch_op_test.py107
-rw-r--r--tensorflow/python/kernel_tests/edit_distance_op_test.py153
-rw-r--r--tensorflow/python/kernel_tests/embedding_ops_test.py422
-rw-r--r--tensorflow/python/kernel_tests/fifo_queue_test.py1043
-rw-r--r--tensorflow/python/kernel_tests/gather_op_test.py71
-rw-r--r--tensorflow/python/kernel_tests/gradient_checker.py251
-rw-r--r--tensorflow/python/kernel_tests/gradient_checker_test.py178
-rw-r--r--tensorflow/python/kernel_tests/identity_op_py_test.py47
-rw-r--r--tensorflow/python/kernel_tests/in_topk_op_test.py36
-rw-r--r--tensorflow/python/kernel_tests/init_ops_test.py252
-rw-r--r--tensorflow/python/kernel_tests/io_ops_test.py53
-rw-r--r--tensorflow/python/kernel_tests/linalg_grad_test.py49
-rw-r--r--tensorflow/python/kernel_tests/listdiff_op_test.py117
-rw-r--r--tensorflow/python/kernel_tests/logging_ops_test.py50
-rw-r--r--tensorflow/python/kernel_tests/lookup_table_op_test.py195
-rw-r--r--tensorflow/python/kernel_tests/lrn_op_test.py101
-rw-r--r--tensorflow/python/kernel_tests/matmul_op_test.py206
-rw-r--r--tensorflow/python/kernel_tests/matrix_inverse_op_test.py79
-rw-r--r--tensorflow/python/kernel_tests/numerics_test.py91
-rw-r--r--tensorflow/python/kernel_tests/pack_op_test.py47
-rw-r--r--tensorflow/python/kernel_tests/pad_op_test.py140
-rw-r--r--tensorflow/python/kernel_tests/parsing_ops_test.py414
-rw-r--r--tensorflow/python/kernel_tests/pooling_ops_test.py819
-rw-r--r--tensorflow/python/kernel_tests/random_ops_test.py242
-rw-r--r--tensorflow/python/kernel_tests/random_shuffle_queue_test.py1054
-rw-r--r--tensorflow/python/kernel_tests/reader_ops_test.py362
-rw-r--r--tensorflow/python/kernel_tests/reduction_ops_test.py533
-rw-r--r--tensorflow/python/kernel_tests/relu_op_test.py181
-rw-r--r--tensorflow/python/kernel_tests/reshape_op_test.py106
-rw-r--r--tensorflow/python/kernel_tests/reverse_sequence_op_test.py109
-rw-r--r--tensorflow/python/kernel_tests/save_restore_ops_test.py21
-rw-r--r--tensorflow/python/kernel_tests/scatter_ops_test.py49
-rw-r--r--tensorflow/python/kernel_tests/segment_reduction_ops_test.py269
-rw-r--r--tensorflow/python/kernel_tests/shape_ops_test.py389
-rw-r--r--tensorflow/python/kernel_tests/slice_op_test.py235
-rw-r--r--tensorflow/python/kernel_tests/softmax_op_test.py65
-rw-r--r--tensorflow/python/kernel_tests/softplus_op_test.py47
-rw-r--r--tensorflow/python/kernel_tests/sparse_concat_op_test.py260
-rw-r--r--tensorflow/python/kernel_tests/sparse_matmul_op_test.py82
-rw-r--r--tensorflow/python/kernel_tests/sparse_reorder_op_test.py56
-rw-r--r--tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py111
-rw-r--r--tensorflow/python/kernel_tests/sparsemask_op_test.py32
-rw-r--r--tensorflow/python/kernel_tests/split_op_test.py132
-rw-r--r--tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py34
-rw-r--r--tensorflow/python/kernel_tests/string_to_number_op_test.py66
-rw-r--r--tensorflow/python/kernel_tests/summary_image_op_test.py63
-rw-r--r--tensorflow/python/kernel_tests/summary_ops_test.py83
-rw-r--r--tensorflow/python/kernel_tests/topk_op_test.py52
-rw-r--r--tensorflow/python/kernel_tests/transpose_op_test.py176
-rw-r--r--tensorflow/python/kernel_tests/unique_op_test.py22
-rw-r--r--tensorflow/python/kernel_tests/unpack_op_test.py56
-rw-r--r--tensorflow/python/kernel_tests/variable_ops_test.py225
-rw-r--r--tensorflow/python/kernel_tests/variable_scope_test.py160
-rw-r--r--tensorflow/python/kernel_tests/variables_test.py242
-rw-r--r--tensorflow/python/kernel_tests/where_op_test.py43
-rw-r--r--tensorflow/python/kernel_tests/xent_op_test.py110
79 files changed, 16709 insertions, 0 deletions
diff --git a/tensorflow/python/kernel_tests/__init__.py b/tensorflow/python/kernel_tests/__init__.py
new file mode 100755
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tensorflow/python/kernel_tests/__init__.py
diff --git a/tensorflow/python/kernel_tests/argmax_op_test.py b/tensorflow/python/kernel_tests/argmax_op_test.py
new file mode 100644
index 0000000000..2cd6101a87
--- /dev/null
+++ b/tensorflow/python/kernel_tests/argmax_op_test.py
@@ -0,0 +1,61 @@
+"""Tests for tensorflow.ops.argmax_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+class ArgMaxTest(tf.test.TestCase):
+
+ def _testArg(self, method, x, dimension,
+ expected_values, use_gpu=False, expected_err_re=None):
+ with self.test_session(use_gpu=use_gpu):
+ ans = method(x, dimension=dimension)
+ if expected_err_re is None:
+ tf_ans = ans.eval()
+ self.assertAllEqual(tf_ans, expected_values)
+ self.assertShapeEqual(expected_values, ans)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ ans.eval()
+
+ def _testBothArg(self, method, x, dimension,
+ expected_values, expected_err_re=None):
+ self._testArg(method, x, dimension,
+ expected_values, True, expected_err_re)
+ self._testArg(method, x, dimension,
+ expected_values, False, expected_err_re)
+
+ def _testBasic(self, dtype):
+ x = np.asarray(100*np.random.randn(200), dtype=dtype)
+
+ # Check that argmin and argmax match numpy along the primary
+ # dimension
+ self._testBothArg(tf.argmax, x, 0, x.argmax())
+ self._testBothArg(tf.argmin, x, 0, x.argmin())
+
+ def _testDim(self, dtype):
+ x = np.asarray(100*np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
+
+ # Check that argmin and argmax match numpy along all dimensions
+ for dim in range(5):
+ self._testBothArg(tf.argmax, x, dim, x.argmax(dim))
+ self._testBothArg(tf.argmin, x, dim, x.argmin(dim))
+
+ def testFloat(self):
+ self._testBasic(np.float32)
+ self._testDim(np.float32)
+
+ def testDouble(self):
+ self._testBasic(np.float64)
+ self._testDim(np.float64)
+
+ def testInt32(self):
+ self._testBasic(np.int32)
+ self._testDim(np.int32)
+
+ def testInt64(self):
+ self._testBasic(np.int64)
+ self._testDim(np.int64)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py
new file mode 100644
index 0000000000..108cc7599e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/array_ops_test.py
@@ -0,0 +1,45 @@
+"""Tests for array_ops."""
+import math
+
+import tensorflow.python.platform
+
+import numpy as np
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.platform import googletest
+
+
+class OperatorShapeTest(test_util.TensorFlowTestCase):
+
+ def testExpandScalar(self):
+ scalar = 'hello'
+ scalar_expanded = array_ops.expand_dims(scalar, [0])
+ self.assertEqual(scalar_expanded.get_shape(), (1,))
+
+ def testSqueeze(self):
+ scalar = 'hello'
+ scalar_squeezed = array_ops.squeeze(scalar, ())
+ self.assertEqual(scalar_squeezed.get_shape(), ())
+
+
+class ReverseTest(test_util.TensorFlowTestCase):
+
+ def testReverse0DimAuto(self):
+ x_np = 4
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ x_tf = array_ops.reverse(x_np, []).eval()
+ self.assertAllEqual(x_tf, x_np)
+
+ def testReverse1DimAuto(self):
+ x_np = [1, 4, 9]
+
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ x_tf = array_ops.reverse(x_np, [True]).eval()
+ self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tensorflow/python/kernel_tests/attention_ops_test.py b/tensorflow/python/kernel_tests/attention_ops_test.py
new file mode 100644
index 0000000000..5541c541b2
--- /dev/null
+++ b/tensorflow/python/kernel_tests/attention_ops_test.py
@@ -0,0 +1,166 @@
+"""Tests for tensorflow.ops.attention_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+from tensorflow.python.ops import attention_ops
+
+
+class ExtractGlimpseTest(tf.test.TestCase):
+
+ def _VerifyValues(
+ self, tensor_in_sizes, glimpse_sizes, offsets, expected_rows,
+ expected_cols):
+ """Verifies the output values of the glimpse extraction kernel.
+
+ Args:
+ tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
+ glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
+ offsets: Relative location of the center of the glimpse in the input
+ image expressed as [row_offset, col_offset].
+ expected_rows: A list containing the expected row numbers (None for
+ out of bound entries that are expected to be replaced by uniform
+ random entries in [0,1) ).
+ expected_cols: Same as expected_rows, but for column numbers.
+ """
+
+ rows = tensor_in_sizes[0]
+ cols = tensor_in_sizes[1]
+ # Row Tensor with entries by row.
+ # [[ 1 1 1 ... ]
+ # [ 2 2 2 ... ]
+ # [ 3 3 3 ... ]
+ # [ ...
+ # ]
+ t_rows = tf.tile(
+ [[1.0 * r] for r in range(1, rows + 1)], [1, cols],
+ name='tile_rows')
+
+ # Shuffle to switch to a convention of (batch_size, height, width, depth).
+ t_rows_4d = tf.transpose(
+ tf.expand_dims(
+ tf.expand_dims(t_rows, 0), 3), [0, 2, 1, 3])
+
+ # Column Tensor with entries by column.
+ # [[ 1 2 3 4 ... ]
+ # [ 1 2 3 4 ... ]
+ # [ 1 2 3 4 ... ]
+ # [ ... ]
+ # ]
+ t_cols = tf.tile(
+ [[1.0 * r for r in range(1, cols + 1)]],
+ [rows, 1], name='tile_cols')
+
+ # Shuffle to switch to a convention of (batch_size, height, width, depth).
+ t_cols_4d = tf.transpose(
+ tf.expand_dims(
+ tf.expand_dims(t_cols, 0), 3), [0, 2, 1, 3])
+
+ # extract_glimpses from Row and Column Tensor, respectively.
+ # Switch order for glimpse_sizes and offsets to switch from (row, col)
+ # convention to tensorflows (height, width) convention.
+ t1 = tf.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
+ t2 = tf.constant([offsets[1], offsets[0]], shape=[1, 2])
+ glimpse_rows = (tf.transpose(
+ attention_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
+ glimpse_cols = (tf.transpose(
+ attention_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
+
+ # Evaluate the Tensorflow Graph.
+ with self.test_session() as sess:
+ value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols])
+
+ # Check dimensions of returned glimpse.
+ self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
+ self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
+ self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
+ self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
+
+ # Check entries.
+ min_random_val = 0
+ max_random_val = max(rows, cols)
+ for i in range(0, glimpse_sizes[0]):
+ for j in range(0, glimpse_sizes[1]):
+ if expected_rows[i] is None or expected_cols[j] is None:
+ self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
+ self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
+ self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
+ self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
+ else:
+ self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
+ self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
+
+ def testCenterGlimpse(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[3, 5],
+ offsets=[0.0, 0.0],
+ expected_rows=[20, 21, 22],
+ expected_cols=[29, 30, 31, 32, 33])
+
+ def testLargeCenterGlimpse(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[41, 61],
+ offsets=[0.0, 0.0],
+ expected_rows=range(1, 42),
+ expected_cols=range(1, 62))
+
+ def testTooLargeCenterGlimpse(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[43, 63],
+ offsets=[0.0, 0.0],
+ expected_rows=[None] + range(1, 42) + [None],
+ expected_cols=[None] + range(1, 62) + [None])
+
+ def testGlimpseFullOverlap(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[3, 5],
+ offsets=[0.1, 0.3],
+ expected_rows=[22, 23, 24],
+ expected_cols=[38, 39, 40, 41, 42])
+
+ def testGlimpseFullOverlap2(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 3],
+ offsets=[-0.7, -0.7],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[8, 9, 10])
+
+ def testGlimpseBeforeLeftMargin(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 5],
+ offsets=[-0.7, -0.9],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[1, 2, 3, 4, 5])
+
+ def testGlimpseLowerRightCorner(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[7, 5],
+ offsets=[1.0, 1.0],
+ expected_rows=[38, 39, 40, 41, None, None, None],
+ expected_cols=[59, 60, 61, None, None])
+
+ def testGlimpseNoOverlap(self):
+ self._VerifyValues(tensor_in_sizes=[20, 30],
+ glimpse_sizes=[3, 3],
+ offsets=[-2.0, 2.0],
+ expected_rows=[None, None, None],
+ expected_cols=[None, None, None])
+
+ def testGlimpseOnLeftMargin(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[11, 7],
+ offsets=[-0.7, -1.0],
+ expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ expected_cols=[None, None, None, 1, 2, 3, 4])
+
+ def testGlimpseUpperMargin(self):
+ self._VerifyValues(tensor_in_sizes=[41, 61],
+ glimpse_sizes=[7, 5],
+ offsets=[-1, 0.9],
+ expected_rows=[None, None, None, 1, 2, 3, 4],
+ expected_cols=[56, 57, 58, 59, 60])
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
new file mode 100644
index 0000000000..8ae37fec3a
--- /dev/null
+++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py
@@ -0,0 +1,195 @@
+"""Tests for tensorflow.ops.tf.BatchMatMul."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class BatchMatmulOpTest(tf.test.TestCase):
+
+ # Uses numpy to compute batch_matmul(x, y, adj_x, adj_y).
+ def _npBatchMatmul(self, x, y, adj_x, adj_y):
+ assert x.ndim >= 3
+ assert y.ndim >= 3
+ # output's shape depends on adj[0] and adj[1]
+ d0 = x.shape[-2] if not adj_x else x.shape[-1]
+ d2 = y.shape[-1] if not adj_y else y.shape[-2]
+ batch_dims = x.shape[:-2]
+ num = np.prod(batch_dims)
+ z = np.empty(list(batch_dims) + [d0, d2], dtype=x.dtype)
+ xr = x.reshape([num, x.shape[-2], x.shape[-1]])
+ yr = y.reshape([num, y.shape[-2], y.shape[-1]])
+ zr = z.reshape([num, z.shape[-2], z.shape[-1]])
+ for i in range(num):
+ a = np.matrix(xr[i, :, :])
+ if adj_x:
+ a = a.transpose().conj()
+ b = np.matrix(yr[i, :, :])
+ if adj_y:
+ b = b.transpose().conj()
+ zr[i, :, :] = a * b
+ return z
+
+ # Test _npBatchMatMul works.
+ def testSimpleNpVersion(self):
+ x = np.array([0., 1., 2., 3.]).reshape([1, 2, 2])
+ y = np.array([1., 2., 3., 4.]).reshape([1, 2, 2])
+ z0 = self._npBatchMatmul(x, y, False, False)
+ z1 = np.array([3., 4., 11., 16.]).reshape([1, 2, 2])
+ self.assertTrue(np.array_equal(z0, z1))
+
+ x = np.array([1., (1j), (-1.), (-1j)]).reshape([1, 2, 2])
+ y = x * np.complex(1, 1) # rotate x 90 degree
+ z0 = self._npBatchMatmul(x, y, False, False)
+ z1 = np.array([2., (2.j), -2., (-2.j)]).reshape([1, 2, 2])
+ self.assertTrue(np.array_equal(z0, z1))
+
+ z0 = self._npBatchMatmul(x, y, False, True)
+ z1 = np.array([(2.-2.j), (-2.+2.j), (-2.+2.j), (2.-2.j)]).reshape([1, 2, 2])
+ self.assertTrue(np.array_equal(z0, z1))
+
+ z0 = self._npBatchMatmul(x, y, True, False)
+ z1 = np.array([(2.+2.j), (-2.+2.j), (2.-2.j), (2.+2.j)]).reshape([1, 2, 2])
+ self.assertTrue(np.array_equal(z0, z1))
+
+ # Compares _tfpBatchMatmul(x, y, alpha, adj) and _npBatchMatMul(x, y, alpha,
+ # adj)
+ def _compare(self, x, y, adj_x, adj_y, use_gpu=False):
+ with self.test_session(use_gpu=use_gpu):
+ z0 = tf.batch_matmul(x, y, adj_x=adj_x, adj_y=adj_y)
+ z0_val = z0.eval()
+ z1 = self._npBatchMatmul(x, y, adj_x, adj_y)
+ self.assertShapeEqual(z1, z0)
+ if z0_val.size != 0:
+ err = (np.abs(z0_val - z1) / np.maximum(1, np.abs(z0_val))).max()
+ tf.logging.info("error = %f", err)
+ self.assertTrue(err < 1e-4)
+
+ # Returns a random float np of "shape".
+ def _randFloat(self, shape):
+ vals = np.random.normal(0, 1, np.prod(shape)).reshape(shape)
+ return np.array(vals, dtype=np.float32)
+
+ def testSimpleFloat(self):
+ for use_gpu in [False, True]:
+ self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 3, 5]),
+ False, False, use_gpu)
+ self._compare(self._randFloat([7, 2, 3]), self._randFloat([7, 5, 3]),
+ False, True, use_gpu)
+ self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 3, 5]),
+ True, False, use_gpu)
+ self._compare(self._randFloat([7, 3, 2]), self._randFloat([7, 5, 3]),
+ True, True, use_gpu)
+
+ def testLargeFloat(self):
+ for use_gpu in [False, True]:
+ self._compare(self._randFloat([10, 64, 75]),
+ self._randFloat([10, 75, 30]), False, False, use_gpu)
+ self._compare(self._randFloat([10, 75, 64]),
+ self._randFloat([10, 75, 30]), True, False, use_gpu)
+ self._compare(self._randFloat([10, 64, 75]),
+ self._randFloat([10, 30, 75]), False, True, use_gpu)
+ self._compare(self._randFloat([10, 75, 64]),
+ self._randFloat([10, 30, 75]), True, True, use_gpu)
+
+ def testHighNDims(self):
+ for use_gpu in [False, True]:
+ self._compare(self._randFloat([5, 7, 2, 3]),
+ self._randFloat([5, 7, 3, 5]), False, False, use_gpu)
+ self._compare(self._randFloat([5, 7, 3, 2]),
+ self._randFloat([5, 7, 3, 5]), True, False, use_gpu)
+ self._compare(self._randFloat([5, 7, 2, 3]),
+ self._randFloat([5, 7, 5, 3]), False, True, use_gpu)
+ self._compare(self._randFloat([5, 7, 3, 2]),
+ self._randFloat([5, 7, 5, 3]), True, True, use_gpu)
+
+ # Returns a random complex numpy array of "shape".
+ def _randComplex(self, shape):
+ real = np.random.normal(0, 1, np.prod(shape))
+ imag = np.random.normal(0, 1, np.prod(shape))
+ vals = [np.complex(v[0], v[1]) for v in zip(real, imag)]
+ return np.array(vals, dtype=np.complex64).reshape(shape)
+
+ def testSimpleComplex(self):
+ self._compare(self._randComplex([7, 2, 3]),
+ self._randComplex([7, 3, 5]), False, False)
+ self._compare(self._randComplex([7, 2, 3]),
+ self._randComplex([7, 5, 3]), False, True)
+ self._compare(self._randComplex([7, 3, 2]),
+ self._randComplex([7, 3, 5]), True, False)
+ self._compare(self._randComplex([7, 3, 2]),
+ self._randComplex([7, 5, 3]), True, True)
+
+ def testLargeComplex(self):
+ self._compare(self._randComplex([10, 64, 75]),
+ self._randComplex([10, 75, 30]), False,
+ False)
+ self._compare(self._randComplex([10, 64, 75]),
+ self._randComplex([10, 30, 75]), False, True)
+ self._compare(self._randComplex([10, 75, 64]),
+ self._randComplex([10, 75, 30]), True, False)
+ self._compare(self._randComplex([10, 75, 64]),
+ self._randComplex([10, 30, 75]), True, True)
+
+ def testEmpty(self):
+ self._compare(np.empty([0, 3, 2]).astype(np.float32),
+ np.empty([0, 2, 4]).astype(np.float32), False, False)
+ self._compare(np.empty([3, 2, 0]).astype(np.float32),
+ np.empty([3, 0, 5]).astype(np.float32), False, False)
+ self._compare(np.empty([3, 0, 2]).astype(np.float32),
+ np.empty([3, 2, 5]).astype(np.float32), False, False)
+ self._compare(np.empty([3, 3, 2]).astype(np.float32),
+ np.empty([3, 2, 0]).astype(np.float32), False, False)
+
+
+class BatchMatmulGradientTest(tf.test.TestCase):
+
+ # loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
+ # gradient checker.
+ def _checkGrad(self, x, y, adj_x, adj_y):
+ assert 3 == x.ndim
+ assert 3 == y.ndim
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ z = tf.batch_matmul(inx, iny, adj_x, adj_y)
+ loss = tf.reduce_sum(z)
+ epsilon = 1e-2
+ ((x_jacob_t, x_jacob_n), (y_jacob_t, y_jacob_n)) = gc.ComputeGradient(
+ [inx, iny], [x.shape, y.shape], loss, [1],
+ x_init_value=[x, y], delta=epsilon)
+
+ tf.logging.info("x_jacob_t = %s", x_jacob_t.reshape(x.shape))
+ tf.logging.info("x_jacob_n = %s", x_jacob_n.reshape(x.shape))
+ self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
+ tf.logging.info("y_jacob_t = %s", y_jacob_t.reshape(y.shape))
+ tf.logging.info("y_jacob_n = %s", y_jacob_n.reshape(y.shape))
+ self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=epsilon)
+
+ # Tests a batched matmul of x, and y: x is a 3D tensor of shape [b,
+ # n, k] y is a 3D tensor of shape [b, k, m] the batched matmul
+ # computes z of shape [b, n, m], where z[i, :, :] = x[i, :, :]
+ # matmul y[i, :, :]
+ def _compare(self, b, n, k, m):
+ x = np.random.normal(0, 1, b * n * k).astype(np.float32).reshape([b, n, k])
+ y = np.random.normal(0, 1, b * k * m).astype(np.float32).reshape([b, k, m])
+ self._checkGrad(x, y, False, False)
+ self._checkGrad(x.reshape([b, k, n]), y, True, False)
+ self._checkGrad(x, y.reshape([b, m, k]), False, True)
+ self._checkGrad(x.reshape([b, k, n]), y.reshape([b, m, k]), True, True)
+
+ def testSmall(self):
+ self._compare(1, 2, 3, 5)
+
+ def testMedium(self):
+ self._compare(3, 4, 7, 10)
+
+ # Can't do testLarge using very large inputs because gradient
+ # checker will take way too long time.
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/bcast_ops_test.py b/tensorflow/python/kernel_tests/bcast_ops_test.py
new file mode 100644
index 0000000000..c62a910496
--- /dev/null
+++ b/tensorflow/python/kernel_tests/bcast_ops_test.py
@@ -0,0 +1,76 @@
+"""Tests for tensorflow.kernels.bcast_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
+
+
+class BcastOpsTest(tf.test.TestCase):
+
+ def _GetGradientArgs(self, xs, ys):
+ with self.test_session() as sess:
+ return sess.run(_broadcast_gradient_args(xs, ys))
+
+ def testBasic(self):
+ r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0, 1, 2])
+
+ r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
+ self.assertAllEqual(r0, [0, 1, 2])
+ self.assertAllEqual(r1, [])
+
+ r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0, 1])
+
+ r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
+ self.assertAllEqual(r0, [0, 1])
+ self.assertAllEqual(r1, [])
+
+ r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0])
+
+ r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
+ self.assertAllEqual(r0, [0])
+ self.assertAllEqual(r1, [])
+
+ r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0, 2])
+
+ r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
+ self.assertAllEqual(r0, [0, 2])
+ self.assertAllEqual(r1, [])
+
+ r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
+ self.assertAllEqual(r0, [1])
+ self.assertAllEqual(r1, [0, 2])
+
+ r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
+ self.assertAllEqual(r0, [0, 2])
+ self.assertAllEqual(r1, [1])
+
+ def testZeroDims(self):
+ r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0, 1])
+
+ r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
+ self.assertAllEqual(r0, [0, 1])
+ self.assertAllEqual(r1, [])
+
+ r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
+ self.assertAllEqual(r0, [])
+ self.assertAllEqual(r1, [0, 1, 3])
+
+ r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
+ self.assertAllEqual(r0, [0, 1, 3])
+ self.assertAllEqual(r1, [])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/bias_op_test.py b/tensorflow/python/kernel_tests/bias_op_test.py
new file mode 100644
index 0000000000..f3a26e2490
--- /dev/null
+++ b/tensorflow/python/kernel_tests/bias_op_test.py
@@ -0,0 +1,93 @@
+"""Functional tests for BiasAdd."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class BiasAddTest(tf.test.TestCase):
+
+ def _npBias(self, inputs, bias):
+ assert len(bias.shape) == 1
+ print inputs.shape
+ print bias.shape
+ assert inputs.shape[-1] == bias.shape[0]
+ return inputs + bias.reshape(([1] * (len(inputs.shape) - 1))
+ + [bias.shape[0]])
+
+ def testNpBias(self):
+ self.assertAllClose(np.array([[11, 22, 33], [41, 52, 63]]),
+ self._npBias(np.array([[10, 20, 30], [40, 50, 60]]),
+ np.array([1, 2, 3])))
+
+ def _testBias(self, np_inputs, np_bias, use_gpu=False):
+ np_val = self._npBias(np_inputs, np_bias)
+ with self.test_session(use_gpu=use_gpu):
+ tf_val = tf.nn.bias_add(np_inputs, np_bias).eval()
+ self.assertAllClose(np_val, tf_val)
+
+ def _testAll(self, np_inputs, np_bias):
+ self._testBias(np_inputs, np_bias, use_gpu=False)
+ if np_inputs.dtype == np.float32 or np_inputs.dtype == np.float64:
+ self._testBias(np_inputs, np_bias, use_gpu=True)
+
+ def testInputDims(self):
+ with self.assertRaises(ValueError):
+ tf.nn.bias_add([1, 2], [1])
+
+ def testBiasVec(self):
+ with self.assertRaises(ValueError):
+ tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1, 2], shape=[1, 2]))
+
+ def testBiasInputsMatch(self):
+ with self.assertRaises(ValueError):
+ tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1], shape=[1]))
+
+ def testIntTypes(self):
+ for t in [np.int8, np.int16, np.int32, np.int64]:
+ self._testAll(np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
+ np.array([1, 2, 3]).astype(t))
+
+ def testFloatTypes(self):
+ for t in [np.float32, np.float64]:
+ self._testAll(np.random.rand(4, 3, 3).astype(t),
+ np.random.rand(3).astype(t))
+
+ def testGradientTensor(self):
+ with self.test_session():
+ t = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
+ dtype=tf.float64)
+ b = tf.constant([1.3, 2.4], dtype=tf.float64)
+ bo = tf.nn.bias_add(t, b)
+ err = gradient_checker.ComputeGradientError(t, [3, 2], bo, [3, 2])
+ print "bias add tensor gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradientBias(self):
+ with self.test_session():
+ t = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
+ dtype=tf.float64)
+ b = tf.constant([1.3, 2.4], dtype=tf.float64)
+ bo = tf.nn.bias_add(t, b)
+ err = gradient_checker.ComputeGradientError(b, [2], bo, [3, 2])
+ print "bias add bias gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradientTensor4D(self):
+ with self.test_session():
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
+ t = tf.constant(x, shape=s, dtype=tf.float32)
+ b = tf.constant([1.3, 2.4], dtype=tf.float32)
+ bo = tf.nn.bias_add(t, b)
+ err = gradient_checker.ComputeGradientError(t, s, bo, s, x_init_value=x)
+ print "bias add tensor gradient err = ", err
+ self.assertLess(err, 1e-3)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py b/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
new file mode 100644
index 0000000000..a36b8587d5
--- /dev/null
+++ b/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py
@@ -0,0 +1,114 @@
+"""Tests for CandidateSamplerOp."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class RangeSamplerOpsTest(tf.test.TestCase):
+
+ BATCH_SIZE = 3
+ NUM_TRUE = 2
+ RANGE = 5
+ NUM_SAMPLED = RANGE
+
+ TRUE_LABELS = [[1, 2], [0, 4], [3, 3]]
+
+ def testTrueCandidates(self):
+ with self.test_session() as sess:
+ indices = tf.constant([0, 0, 1, 1, 2, 2])
+ true_candidates_vec = tf.constant([1, 2, 0, 4, 3, 3])
+ true_candidates_matrix = tf.reshape(
+ true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
+ indices_val, true_candidates_val = sess.run(
+ [indices, true_candidates_matrix])
+
+ self.assertAllEqual(indices_val, [0, 0, 1, 1, 2, 2])
+ self.assertAllEqual(true_candidates_val, self.TRUE_LABELS)
+
+ def testSampledCandidates(self):
+ with self.test_session():
+ true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
+ dtype=tf.int64)
+ sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
+ true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
+ result = sampled_candidates.eval()
+
+ expected_ids = [0, 1, 2, 3, 4]
+ self.assertAllEqual(result, expected_ids)
+ self.assertEqual(sampled_candidates.get_shape(), [self.NUM_SAMPLED])
+
+ def testTrueLogExpectedCount(self):
+ with self.test_session():
+ true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
+ dtype=tf.int64)
+ _, true_expected_count, _ = tf.nn.all_candidate_sampler(
+ true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
+ true_log_expected_count = tf.log(true_expected_count)
+ result = true_log_expected_count.eval()
+
+ self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
+ self.assertEqual(true_expected_count.get_shape(), [self.BATCH_SIZE,
+ self.NUM_TRUE])
+ self.assertEqual(true_log_expected_count.get_shape(), [self.BATCH_SIZE,
+ self.NUM_TRUE])
+
+ def testSampledLogExpectedCount(self):
+ with self.test_session():
+ true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
+ dtype=tf.int64)
+ _, _, sampled_expected_count = tf.nn.all_candidate_sampler(
+ true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
+ sampled_log_expected_count = tf.log(sampled_expected_count)
+ result = sampled_log_expected_count.eval()
+
+ self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
+ self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
+ self.assertEqual(sampled_log_expected_count.get_shape(), [self.NUM_SAMPLED])
+
+ def testAccidentalHits(self):
+ with self.test_session() as sess:
+ true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
+ dtype=tf.int64)
+ sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
+ true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
+ accidental_hits = tf.nn.compute_accidental_hits(
+ true_classes, sampled_candidates, self.NUM_TRUE)
+ indices, ids, weights = sess.run(accidental_hits)
+
+ self.assertEqual(1, accidental_hits[0].get_shape().ndims)
+ self.assertEqual(1, accidental_hits[1].get_shape().ndims)
+ self.assertEqual(1, accidental_hits[2].get_shape().ndims)
+ for index, id_, weight in zip(indices, ids, weights):
+ self.assertTrue(id_ in self.TRUE_LABELS[index])
+ self.assertLess(weight, -1.0e37)
+
+ def testSeed(self):
+
+ def draw(seed):
+ with self.test_session():
+ true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
+ dtype=tf.int64)
+ sampled, _, _ = tf.nn.log_uniform_candidate_sampler(
+ true_classes,
+ self.NUM_TRUE,
+ self.NUM_SAMPLED,
+ True,
+ 5,
+ seed=seed)
+ return sampled.eval()
+ # Non-zero seed. Repeatable.
+ for seed in [1, 12, 123, 1234]:
+ self.assertAllEqual(draw(seed), draw(seed))
+ # Seed=0 means random seeds.
+ num_same = 0
+ for _ in range(10):
+ if np.allclose(draw(None), draw(None)):
+ num_same += 1
+ # Accounts for the fact that the same random seed may be picked
+ # twice very rarely.
+ self.assertLessEqual(num_same, 2)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/cast_op_test.py b/tensorflow/python/kernel_tests/cast_op_test.py
new file mode 100644
index 0000000000..21e8f71198
--- /dev/null
+++ b/tensorflow/python/kernel_tests/cast_op_test.py
@@ -0,0 +1,165 @@
+"""Tests for tensorflow.ops.tf.cast."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class CastOpTest(tf.test.TestCase):
+
+ def _toDataType(self, dtype):
+ """Returns TensorFlow data type for numpy type."""
+ if dtype == np.float32:
+ return tf.float32
+ elif dtype == np.float64:
+ return tf.float64
+ elif dtype == np.int32:
+ return tf.int32
+ elif dtype == np.int64:
+ return tf.int64
+ elif dtype == np.bool:
+ return tf.bool
+ else:
+ return None
+
+ def _cast(self, x, dtype, use_gpu=False):
+ with self.test_session(use_gpu=use_gpu):
+ val = tf.constant(x, self._toDataType(np.array([x]).dtype))
+ return tf.cast(val, self._toDataType(dtype), name="cast").eval()
+
+ def _test(self, x, dtype, use_gpu=False):
+ """Tests cast(x) to dtype behaves the same as numpy.astype."""
+ np_ans = x.astype(dtype)
+ tf_ans = self._cast(x, dtype, use_gpu)
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def _testTypes(self, x, use_gpu=False):
+ """Tests cast(x) to different tf."""
+ if use_gpu:
+ type_list = [np.float32, np.float64, np.int64]
+ else:
+ type_list = [np.float32, np.float64, np.int32, np.int64]
+ for from_type in type_list:
+ for to_type in type_list:
+ self._test(x.astype(from_type), to_type, use_gpu)
+
+ self._test(x.astype(np.bool), np.float32, use_gpu)
+ self._test(x.astype(np.uint8), np.float32, use_gpu)
+ if not use_gpu:
+ self._test(x.astype(np.bool), np.int32, use_gpu)
+ self._test(x.astype(np.int32), np.int32, use_gpu)
+
+ def _testAll(self, x):
+ self._testTypes(x, use_gpu=False)
+ if x.dtype == np.float32 or x.dtype == np.float64:
+ self._testTypes(x, use_gpu=True)
+
+ def testBasic(self):
+ self._testAll(np.arange(-10, 10).reshape(2, 10))
+ self._testAll(np.linspace(-10, 10, 17))
+
+ def testSmallValues(self):
+ f4 = np.finfo(np.float32)
+ f8 = np.finfo(np.float64)
+ self._testAll(np.array([0, -1, 1, -f4.resolution, f4.resolution,
+ f8.resolution, -f8.resolution]))
+
+ def testBfloat16(self):
+ a = np.random.uniform(-100, 100, 100).astype(np.float32)
+ with self.test_session(use_gpu=False):
+ b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
+ self.assertAllClose(a, b.eval(), rtol=1/128.)
+ with self.test_session(use_gpu=True):
+ b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
+ self.assertAllClose(a, b.eval(), rtol=1/128.)
+
+ def testRandom(self):
+ self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
+ self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
+
+ # Special values like int32max, int64min, inf, -inf, nan casted to
+ # integer values in somewhat unexpected ways. And they behave
+ # differently on CPU and GPU.
+ def _compare(self, x, dst_dtype, expected, use_gpu=False):
+ np.testing.assert_equal(self._cast(x, dst_dtype, use_gpu=use_gpu),
+ dst_dtype(expected))
+
+ def testIntToFloatBoundary(self):
+ i4 = np.iinfo(np.int32)
+ i8 = np.iinfo(np.int64)
+
+ self._compare(i4.min, np.float32, i4.min, False)
+ self._compare(i4.max, np.float32, i4.max, False)
+ self._compare(i8.min, np.float32, i8.min, False)
+ self._compare(i8.max, np.float32, i8.max, False)
+ self._compare(i4.min, np.float64, i4.min, False)
+ self._compare(i4.max, np.float64, i4.max, False)
+ self._compare(i8.min, np.float64, i8.min, False)
+ self._compare(i8.max, np.float64, i8.max, False)
+ # NOTE: GPU does not support int32/int64 for casting.
+
+ def testInfNan(self):
+ i4 = np.iinfo(np.int32)
+ i8 = np.iinfo(np.int64)
+
+ self._compare(np.inf, np.float32, np.inf, False)
+ self._compare(np.inf, np.float64, np.inf, False)
+ self._compare(np.inf, np.int32, i4.min, False)
+ self._compare(np.inf, np.int64, i8.min, False)
+ self._compare(-np.inf, np.float32, -np.inf, False)
+ self._compare(-np.inf, np.float64, -np.inf, False)
+ self._compare(-np.inf, np.int32, i4.min, False)
+ self._compare(-np.inf, np.int64, i8.min, False)
+ self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
+ self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
+ self._compare(np.nan, np.int32, i4.min, False)
+ self._compare(np.nan, np.int64, i8.min, False)
+
+ self._compare(np.inf, np.float32, np.inf, True)
+ self._compare(np.inf, np.float64, np.inf, True)
+ self._compare(-np.inf, np.float32, -np.inf, True)
+ self._compare(-np.inf, np.float64, -np.inf, True)
+ self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
+ self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
+
+ def _OpError(self, x, dtype, err):
+ with self.test_session():
+ with self.assertRaisesOpError(err):
+ tf.cast(x, dtype).eval()
+
+ def testNotImplemented(self):
+ self._OpError(np.arange(0, 10), tf.string,
+ "Cast.*int64.*string.*")
+
+ def testGradients(self):
+ t = [tf.float32, tf.float64]
+ for src_t in t:
+ for dst_t in t:
+ with self.test_session():
+ x = tf.constant(1.0, src_t)
+ z = tf.identity(x)
+ y = tf.cast(z, dst_t)
+ err = gc.ComputeGradientError(x, [1], y, [1])
+ self.assertLess(err, 1e-3)
+
+
+class SparseTensorCastTest(tf.test.TestCase):
+
+ def testCast(self):
+ indices = tf.constant([[0L], [1L], [2L]])
+ values = tf.constant(np.array([1, 2, 3], np.int64))
+ shape = tf.constant([3L])
+ st = tf.SparseTensor(indices, values, shape)
+ st_cast = tf.cast(st, tf.float32)
+ with self.test_session():
+ self.assertAllEqual(st_cast.indices.eval(), [[0L], [1L], [2L]])
+ self.assertAllEqual(st_cast.values.eval(),
+ np.array([1, 2, 3], np.float32))
+ self.assertAllEqual(st_cast.shape.eval(), [3L])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/cholesky_op_test.py b/tensorflow/python/kernel_tests/cholesky_op_test.py
new file mode 100644
index 0000000000..17e8d116be
--- /dev/null
+++ b/tensorflow/python/kernel_tests/cholesky_op_test.py
@@ -0,0 +1,74 @@
+"""Tests for tensorflow.ops.tf.Cholesky."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class CholeskyOpTest(tf.test.TestCase):
+
+ def _verifyCholesky(self, x):
+ with self.test_session() as sess:
+ # Verify that LL^T == x.
+ if x.ndim == 2:
+ chol = tf.cholesky(x)
+ verification = tf.matmul(chol,
+ chol,
+ transpose_a=False,
+ transpose_b=True)
+ else:
+ chol = tf.batch_cholesky(x)
+ verification = tf.batch_matmul(chol, chol, adj_x=False, adj_y=True)
+ chol_np, verification_np = sess.run([chol, verification])
+ self.assertAllClose(x, verification_np)
+ self.assertShapeEqual(x, chol)
+ # Check that the cholesky is lower triangular, and has positive diagonal
+ # elements.
+ if chol_np.shape[-1] > 0:
+ chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
+ chol_np.shape[-1]))
+ for chol_matrix in chol_reshaped:
+ self.assertAllClose(chol_matrix, np.tril(chol_matrix))
+ self.assertTrue((np.diag(chol_matrix) > 0.0).all())
+
+ def testBasic(self):
+ self._verifyCholesky(np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]))
+
+ def testBatch(self):
+ simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
+ self._verifyCholesky(simple_array)
+ self._verifyCholesky(np.vstack((simple_array, simple_array)))
+ odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
+ self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
+
+ # Generate random positive-definite matrices.
+ matrices = np.random.rand(10, 5, 5)
+ for i in xrange(10):
+ matrices[i] = np.dot(matrices[i].T, matrices[i])
+ self._verifyCholesky(matrices)
+
+ def testNonSquareMatrix(self):
+ with self.assertRaises(ValueError):
+ tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
+
+ def testWrongDimensions(self):
+ tensor3 = tf.constant([1., 2.])
+ with self.assertRaises(ValueError):
+ tf.cholesky(tensor3)
+
+ def testNotInvertible(self):
+ # The input should be invertible.
+ with self.test_session():
+ with self.assertRaisesOpError("LLT decomposition was not successful. The "
+ "input might not be valid."):
+ # All rows of the matrix below add to zero
+ self._verifyCholesky(np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1.,
+ 1.]]))
+
+ def testEmpty(self):
+ self._verifyCholesky(np.empty([0, 2, 2]))
+ self._verifyCholesky(np.empty([2, 0, 0]))
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/clip_ops_test.py b/tensorflow/python/kernel_tests/clip_ops_test.py
new file mode 100644
index 0000000000..46bba7514d
--- /dev/null
+++ b/tensorflow/python/kernel_tests/clip_ops_test.py
@@ -0,0 +1,222 @@
+"""Tests for tensorflow.ops.clip_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class ClipTest(tf.test.TestCase):
+
+ # ClipByValue test
+ def testClipByValue(self):
+ with self.test_session():
+ x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
+ np_ans = [[-4.4, 2.0, 3.0],
+ [4.0, 4.4, 4.4]]
+ clip_value = 4.4
+ ans = tf.clip_by_value(x, -clip_value, clip_value)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByValueNonFinite(self):
+ with self.test_session():
+ x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
+ np_ans = [float('NaN'), 4.0, -4.0]
+ clip_value = 4.0
+ ans = tf.clip_by_value(x, -clip_value, clip_value)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ # ClipByNorm tests
+ def testClipByNormClipped(self):
+ # Norm clipping when clip_norm < 5
+ with self.test_session():
+ x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ # Norm of x = sqrt(3^2 + 4^2) = 5
+ np_ans = [[-2.4, 0.0, 0.0],
+ [3.2, 0.0, 0.0]]
+ clip_norm = 4.0
+ ans = tf.clip_by_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByNormNotClipped(self):
+ # No norm clipping when clip_norm >= 5
+ with self.test_session():
+ x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ # Norm of x = sqrt(3^2 + 4^2) = 5
+ np_ans = [[-3.0, 0.0, 0.0],
+ [4.0, 0.0, 0.0]]
+ clip_norm = 6.0
+ ans = tf.clip_by_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByNormZero(self):
+ # No norm clipping when norm = 0
+ with self.test_session():
+ x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ # Norm = 0, no changes
+ np_ans = [[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]]
+ clip_norm = 6.0
+ ans = tf.clip_by_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByGlobalNormClipped(self):
+ # Norm clipping when clip_norm < 5
+ with self.test_session():
+ x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = tf.constant([1.0, -2.0])
+ # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
+ clip_norm = 4.0
+
+ # Answers are the original tensors scaled by 4.0/5.0
+ np_ans_0 = [[-1.6, 0.0, 0.0],
+ [3.2, 0.0, 0.0]]
+ np_ans_1 = [0.8, -1.6]
+
+ ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
+ tf_ans_1 = ans[0].eval()
+ tf_ans_2 = ans[1].eval()
+ tf_norm = norm.eval()
+
+ self.assertAllClose(tf_norm, 5.0)
+ self.assertAllClose(np_ans_0, tf_ans_1)
+ self.assertAllClose(np_ans_1, tf_ans_2)
+
+ def testClipByGlobalNormSupportsNone(self):
+ # Norm clipping when clip_norm < 5
+ with self.test_session():
+ x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = tf.constant([1.0, -2.0])
+ # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
+ clip_norm = 4.0
+
+ # Answers are the original tensors scaled by 4.0/5.0
+ np_ans_0 = [[-1.6, 0.0, 0.0],
+ [3.2, 0.0, 0.0]]
+ np_ans_1 = [0.8, -1.6]
+
+ ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
+ self.assertTrue(ans[1] is None)
+ self.assertTrue(ans[3] is None)
+ tf_ans_1 = ans[0].eval()
+ tf_ans_2 = ans[2].eval()
+ tf_norm = norm.eval()
+
+ self.assertAllClose(tf_norm, 5.0)
+ self.assertAllClose(np_ans_0, tf_ans_1)
+ self.assertAllClose(np_ans_1, tf_ans_2)
+
+ # ClipByGlobalNorm tests
+ def testClipByGlobalNormWithIndexedSlicesClipped(self):
+ # Norm clipping when clip_norm < 5
+ with self.test_session():
+ x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
+ tf.constant([3, 4]))
+ # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
+ clip_norm = 4.0
+
+ # Answers are the original tensors scaled by 4.0/5.0
+ np_ans_0 = [[-1.6, 0.0, 0.0],
+ [3.2, 0.0, 0.0]]
+ np_ans_1 = [0.8, -1.6]
+
+ ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ tf_ans_1 = ans[0].eval()
+ tf_ans_2 = ans[1].values.eval()
+ tf_norm = norm.eval()
+
+ self.assertAllClose(tf_norm, 5.0)
+ self.assertAllClose(np_ans_0, tf_ans_1)
+ self.assertAllClose(np_ans_1, tf_ans_2)
+
+ def testClipByGlobalNormNotClipped(self):
+ # No norm clipping when clip_norm >= 5
+ with self.test_session():
+ x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ x1 = tf.constant([1.0, -2.0])
+ # Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
+ np_ans_0 = [[-2.0, 0.0, 0.0],
+ [4.0, 0.0, 0.0]]
+ np_ans_1 = [1.0, -2.0]
+ clip_norm = 6.0
+
+ ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ tf_ans_1 = ans[0].eval()
+ tf_ans_2 = ans[1].eval()
+ tf_norm = norm.eval()
+
+ self.assertAllClose(tf_norm, 5.0)
+ self.assertAllClose(np_ans_0, tf_ans_1)
+ self.assertAllClose(np_ans_1, tf_ans_2)
+
+ def testClipByGlobalNormZero(self):
+ # No norm clipping when norm = 0
+ with self.test_session():
+ x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ x1 = tf.constant([0.0, 0.0])
+ # Norm = 0, no changes
+ np_ans_0 = [[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]]
+ np_ans_1 = [0.0, 0.0]
+ clip_norm = 6.0
+
+ ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
+ tf_ans_1 = ans[0].eval()
+ tf_ans_2 = ans[1].eval()
+ tf_norm = norm.eval()
+
+ self.assertAllClose(tf_norm, 0.0)
+ self.assertAllClose(np_ans_0, tf_ans_1)
+ self.assertAllClose(np_ans_1, tf_ans_2)
+
+ def testClipByAverageNormClipped(self):
+ # Norm clipping when average clip_norm < 0.83333333
+ with self.test_session():
+ x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
+ np_ans = [[-2.88, 0.0, 0.0],
+ [3.84, 0.0, 0.0]]
+ clip_norm = 0.8
+ ans = tf.clip_by_average_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByAverageNormNotClipped(self):
+ # No norm clipping when average clip_norm >= 0.83333333
+ with self.test_session():
+ x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
+ # Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
+ np_ans = [[-3.0, 0.0, 0.0],
+ [4.0, 0.0, 0.0]]
+ clip_norm = 0.9
+ ans = tf.clip_by_average_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testClipByAverageNormZero(self):
+ # No norm clipping when average clip_norm = 0
+ with self.test_session():
+ x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
+ # Average norm = 0, no changes
+ np_ans = [[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]]
+ clip_norm = 0.9
+ ans = tf.clip_by_average_norm(x, clip_norm)
+ tf_ans = ans.eval()
+
+ self.assertAllClose(np_ans, tf_ans)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py
new file mode 100644
index 0000000000..3f6c43f0a6
--- /dev/null
+++ b/tensorflow/python/kernel_tests/concat_op_test.py
@@ -0,0 +1,276 @@
+"""Functional tests for Concat Op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class ConcatOpTest(tf.test.TestCase):
+
+ def testHStack(self):
+ with self.test_session():
+ p1 = tf.placeholder(tf.float32, shape=[4, 4])
+ p2 = tf.placeholder(tf.float32, shape=[4, 4])
+ c = tf.concat(0, [p1, p2])
+ params = {
+ p1: np.random.rand(4, 4).astype("f"),
+ p2: np.random.rand(4, 4).astype("f")
+ }
+ result = c.eval(feed_dict=params)
+
+ self.assertEqual(result.shape, c.get_shape())
+ self.assertAllEqual(result[:4, :], params[p1])
+ self.assertAllEqual(result[4:, :], params[p2])
+
+ def testVStack(self):
+ with self.test_session():
+ p1 = tf.placeholder(tf.float32, shape=[4, 4])
+ p2 = tf.placeholder(tf.float32, shape=[4, 4])
+ c = tf.concat(1, [p1, p2])
+ params = {
+ p1: np.random.rand(4, 4).astype("f"),
+ p2: np.random.rand(4, 4).astype("f")
+ }
+ result = c.eval(feed_dict=params)
+
+ self.assertEqual(result.shape, c.get_shape())
+ self.assertAllEqual(result[:, :4], params[p1])
+ self.assertAllEqual(result[:, 4:], params[p2])
+
+ def testInt32GPU(self):
+ with self.test_session(use_gpu=True):
+ p1 = np.random.rand(2, 3).astype("i")
+ p2 = np.random.rand(2, 3).astype("i")
+ x1 = tf.constant(p1)
+ x2 = tf.constant(p2)
+ c = tf.concat(0, [x1, x2])
+ result = c.eval()
+ self.assertAllEqual(result[:2, :], p1)
+ self.assertAllEqual(result[2:, :], p2)
+
+ def testRefType(self):
+ with self.test_session():
+ p1 = tf.placeholder(tf.float32_ref, shape=[4, 4])
+ p2 = tf.placeholder(tf.float32_ref, shape=[4, 4])
+ c = tf.concat(0, [p1, p2])
+ params = {
+ p1: np.random.rand(4, 4).astype("f"),
+ p2: np.random.rand(4, 4).astype("f")
+ }
+ result = c.eval(feed_dict=params)
+
+ self.assertEqual(result.shape, c.get_shape())
+ self.assertAllEqual(result[:4, :], params[p1])
+ self.assertAllEqual(result[4:, :], params[p2])
+
+ def _testRandom(self, dtype, use_gpu=False):
+ # Random dims of rank 5
+ shape = np.random.randint(1, 5, size=5)
+ # Random number of tensors, but always > 1.
+ num_tensors = np.random.randint(2, 10)
+ # Random dim to concat on
+ concat_dim = np.random.randint(5)
+ params = {}
+ with self.test_session(use_gpu=use_gpu):
+ p = []
+ for i in np.arange(num_tensors):
+ input_shape = shape
+ input_shape[concat_dim] = np.random.randint(1, 5)
+ placeholder = tf.placeholder(dtype, shape=input_shape)
+ p.append(placeholder)
+
+ t = dtype.as_numpy_dtype
+ params[placeholder] = np.random.rand(*input_shape).astype(t)
+
+ c = tf.concat(concat_dim, p)
+ result = c.eval(feed_dict=params)
+
+ self.assertEqual(result.shape, c.get_shape())
+ cur_offset = 0
+
+ for i in np.arange(num_tensors):
+ # The index into the result is the ':' along all dimensions
+ # except the concat_dim. slice(0, size) is used for ':', and
+ # a list of slices is used to index into result.
+ ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
+ ind[concat_dim] = slice(cur_offset,
+ cur_offset + params[p[i]].shape[concat_dim])
+ cur_offset += params[p[i]].shape[concat_dim]
+ self.assertAllEqual(result[ind], params[p[i]])
+
+ def testRandom(self):
+ self._testRandom(tf.float32)
+ self._testRandom(tf.int16)
+ self._testRandom(tf.int32, use_gpu=True)
+ # Note that the following does not work since bfloat16 is not supported in
+ # numpy.
+ # self._testRandom(tf.bfloat16)
+
+ def _testGradientsSimple(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = []
+ inp_tensors = []
+ for x in [1, 2, 6]:
+ shape = [10, x, 2]
+ t = np.random.rand(*shape).astype("f")
+ inp.append(t)
+ inp_tensors.append(
+ tf.constant([float(y) for y in t.flatten()],
+ shape=shape, dtype=tf.float32))
+ c = tf.concat(1, inp_tensors)
+ output_shape = [10, 9, 2]
+ grad_inp = np.random.rand(*output_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=output_shape)
+ grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ concated_grad = tf.concat(1, grad)
+ result = concated_grad.eval()
+
+ self.assertAllEqual(result, grad_inp)
+
+ def testGradientsSimpleAll(self):
+ self._testGradientsSimple(use_gpu=False)
+ self._testGradientsSimple(use_gpu=True)
+
+ def _testGradientsFirstDim(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = []
+ inp_tensors = []
+ for x in [1, 2, 6]:
+ shape = [x, 10, 2]
+ t = np.random.rand(*shape).astype("f")
+ inp.append(t)
+ inp_tensors.append(
+ tf.constant([float(y) for y in t.flatten()],
+ shape=shape, dtype=tf.float32))
+ c = tf.concat(0, inp_tensors)
+ output_shape = [9, 10, 2]
+ grad_inp = np.random.rand(*output_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=output_shape)
+ grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ concated_grad = tf.concat(0, grad)
+ result = concated_grad.eval()
+
+ self.assertAllEqual(result, grad_inp)
+
+ def testGradientsFirstDimAll(self):
+ self._testGradientsFirstDim(use_gpu=False)
+ self._testGradientsFirstDim(use_gpu=True)
+
+ def _testGradientsLastDim(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = []
+ inp_tensors = []
+ for x in [1, 2, 6]:
+ shape = [10, 2, x]
+ t = np.random.rand(*shape).astype("f")
+ inp.append(t)
+ inp_tensors.append(
+ tf.constant([float(y) for y in t.flatten()],
+ shape=shape, dtype=tf.float32))
+ c = tf.concat(2, inp_tensors)
+ output_shape = [10, 2, 9]
+ grad_inp = np.random.rand(*output_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=output_shape)
+ grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ concated_grad = tf.concat(2, grad)
+ result = concated_grad.eval()
+
+ self.assertAllEqual(result, grad_inp)
+
+ def testGradientsLastDimAll(self):
+ self._testGradientsLastDim(use_gpu=False)
+ self._testGradientsLastDim(use_gpu=True)
+
+ def _RunAndVerifyGradientsRandom(self, use_gpu):
+ # Random dims of rank 5
+ input_shape = np.random.randint(1, 5, size=5)
+ # Random number of tensors
+ num_tensors = np.random.randint(1, 10)
+ # Random dim to concat on
+ concat_dim = np.random.randint(5)
+ concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
+ with self.test_session(use_gpu=use_gpu):
+ inp = []
+ inp_tensors = []
+ for x in concat_dim_sizes:
+ shape = input_shape
+ shape[concat_dim] = x
+ t = np.random.rand(*shape).astype("f")
+ inp.append(t)
+ inp_tensors.append(
+ tf.constant([float(y) for y in t.flatten()],
+ shape=shape, dtype=tf.float32))
+ c = tf.concat(concat_dim, inp_tensors)
+ output_shape = input_shape
+ output_shape[concat_dim] = concat_dim_sizes.sum()
+ grad_inp = np.random.rand(*output_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=output_shape)
+ grad = tf.gradients([c], inp_tensors, [grad_tensor])
+ concated_grad = tf.concat(concat_dim, grad)
+ result = concated_grad.eval()
+
+ self.assertAllEqual(result, grad_inp)
+
+ def testGradientsRandom(self):
+ for _ in range(5):
+ self._RunAndVerifyGradientsRandom(use_gpu=False)
+ self._RunAndVerifyGradientsRandom(use_gpu=True)
+
+ def testShapeError(self):
+ # Rank doesn't match.
+ with self.assertRaises(ValueError):
+ tf.concat(1, [tf.constant(10.0, shape=[4, 4, 4, 4]),
+ tf.constant(20.0, shape=[4, 4, 4])])
+
+ # Dimensions don't match in a non-concat dim.
+ with self.assertRaises(ValueError):
+ tf.concat(1, [tf.constant(10.0, shape=[1, 2, 1]),
+ tf.constant(20.0, shape=[3, 2, 1])])
+
+ # concat_dim out of range.
+ with self.assertRaises(ValueError):
+ tf.concat(3, [tf.constant(10.0, shape=[4, 4, 4]),
+ tf.constant(20.0, shape=[4, 4, 4])])
+
+ def testShapeWithUnknownConcatDim(self):
+ p1 = tf.placeholder(tf.float32)
+ c1 = tf.constant(10.0, shape=[4, 4, 4, 4])
+ p2 = tf.placeholder(tf.float32)
+ c2 = tf.constant(20.0, shape=[4, 4, 4, 4])
+ dim = tf.placeholder(tf.int32)
+ concat = tf.concat(dim, [p1, c1, p2, c2])
+ self.assertEqual(4, concat.get_shape().ndims)
+
+ # Rank doesn't match.
+ c3 = tf.constant(30.0, shape=[4, 4, 4])
+ with self.assertRaises(ValueError):
+ tf.concat(dim, [p1, c1, p2, c3])
+
+ def testZeroSize(self):
+ # Verify that concat doesn't crash and burn for zero size inputs
+ np.random.seed(7)
+ for use_gpu in False, True:
+ with self.test_session(use_gpu=use_gpu) as sess:
+ for shape0 in (), (2,):
+ axis = len(shape0)
+ for shape1 in (), (3,):
+ for n0 in 0, 1, 2:
+ for n1 in 0, 1, 2:
+ x0 = np.random.randn(*(shape0 + (n0,) + shape1))
+ x1 = np.random.randn(*(shape0 + (n1,) + shape1))
+ correct = np.concatenate([x0, x1], axis=axis)
+ xs = map(tf.constant, [x0, x1])
+ c = tf.concat(axis, xs)
+ self.assertAllEqual(c.eval(), correct)
+ # Check gradients
+ dc = np.random.randn(*c.get_shape().as_list())
+ dxs = sess.run(tf.gradients(c, xs, dc))
+ self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/constant_op_test.py b/tensorflow/python/kernel_tests/constant_op_test.py
new file mode 100644
index 0000000000..92f9b5fe4a
--- /dev/null
+++ b/tensorflow/python/kernel_tests/constant_op_test.py
@@ -0,0 +1,524 @@
+"""Tests for ConstantOp."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import gen_array_ops
+
+
+class ConstantTest(tf.test.TestCase):
+
+ def _testCpu(self, x):
+ np_ans = np.array(x)
+ with self.test_session(use_gpu=False):
+ tf_ans = tf.convert_to_tensor(x).eval()
+ if np_ans.dtype in [np.float32, np.float64, np.complex64]:
+ self.assertAllClose(np_ans, tf_ans)
+ else:
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def _testGpu(self, x):
+ np_ans = np.array(x)
+ with self.test_session(use_gpu=True):
+ tf_ans = tf.convert_to_tensor(x).eval()
+ if np_ans.dtype in [np.float32, np.float64, np.complex64]:
+ self.assertAllClose(np_ans, tf_ans)
+ else:
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def _testAll(self, x):
+ self._testCpu(x)
+ self._testGpu(x)
+
+ def testFloat(self):
+ self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
+ self._testAll(
+ np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
+ self._testAll(np.empty((2, 0, 5)).astype(np.float32))
+
+ def testDouble(self):
+ self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
+ self._testAll(
+ np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
+ self._testAll(np.empty((2, 0, 5)).astype(np.float64))
+
+ def testInt32(self):
+ self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
+ self._testAll(
+ (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
+ self._testAll(np.empty((2, 0, 5)).astype(np.int32))
+
+ def testInt64(self):
+ self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
+ self._testAll(
+ (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
+ self._testAll(np.empty((2, 0, 5)).astype(np.int64))
+
+ def testSComplex(self):
+ self._testAll(
+ np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
+ np.complex64))
+ self._testAll(np.complex(
+ 1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
+ np.complex64))
+ self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
+
+ def testString(self):
+ self._testCpu(np.array([str(x) for x in np.arange(-15, 15)]).reshape(
+ [2, 3, 5]))
+ self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
+
+ def testStringWithNulls(self):
+ with self.test_session():
+ val = tf.convert_to_tensor("\0\0\0\0").eval()
+ self.assertEqual(len(val), 4)
+ self.assertEqual(val, "\0\0\0\0")
+
+ with self.test_session():
+ val = tf.convert_to_tensor("xx\0xx").eval()
+ self.assertEqual(len(val), 5)
+ self.assertAllEqual(val, "xx\0xx")
+ nested = [["\0\0\0\0", "xx\0xx"], ["\0_\0_\0_\0", "\0"]]
+
+ with self.test_session():
+ val = tf.convert_to_tensor(nested).eval()
+ # NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
+ # numpy array, which loses the null terminators.
+ self.assertEqual(val.tolist(), nested)
+
+ def testExplicitShapeNumPy(self):
+ with tf.Graph().as_default():
+ c = tf.constant(
+ np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
+ shape=[2, 3, 5])
+ self.assertEqual(c.get_shape(), [2, 3, 5])
+
+ def testImplicitShapeNumPy(self):
+ with tf.Graph().as_default():
+ c = tf.constant(
+ np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
+ self.assertEqual(c.get_shape(), [2, 3, 5])
+
+ def testExplicitShapeList(self):
+ with tf.Graph().as_default():
+ c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
+ self.assertEqual(c.get_shape(), [7])
+
+ def testImplicitShapeList(self):
+ with tf.Graph().as_default():
+ c = tf.constant([1, 2, 3, 4, 5, 6, 7])
+ self.assertEqual(c.get_shape(), [7])
+
+ def testExplicitShapeNumber(self):
+ with tf.Graph().as_default():
+ c = tf.constant(1, shape=[1])
+ self.assertEqual(c.get_shape(), [1])
+
+ def testImplicitShapeNumber(self):
+ with tf.Graph().as_default():
+ c = tf.constant(1)
+ self.assertEqual(c.get_shape(), [])
+
+ def testShapeInconsistent(self):
+ with tf.Graph().as_default():
+ c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
+ self.assertEqual(c.get_shape(), [10])
+
+ # pylint: disable=g-long-lambda
+ def testShapeWrong(self):
+ with tf.Graph().as_default():
+ with self.assertRaisesWithPredicateMatch(
+ ValueError,
+ lambda e: ("Too many elements provided. Needed at most 5, "
+ "but received 7" == str(e))):
+ tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
+ # pylint: enable=g-long-lambda
+
+ def testTooLargeConstant(self):
+ with tf.Graph().as_default():
+ large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
+ with self.assertRaisesRegexp(
+ ValueError,
+ "Cannot create an Operation with a NodeDef larger than 2GB."):
+ c = tf.constant(large_array)
+
+ def testTooLargeGraph(self):
+ with tf.Graph().as_default() as g:
+ large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
+ c = tf.constant(large_array)
+ d = tf.constant(large_array)
+ with self.assertRaisesRegexp(
+ ValueError, "GraphDef cannot be larger than 2GB."):
+ g.as_graph_def()
+
+ def testSparseValuesRaiseErrors(self):
+ with self.assertRaisesRegexp(ValueError,
+ "setting an array element with a sequence"):
+ c = tf.constant([[1, 2], [3]], dtype=tf.int32)
+
+ with self.assertRaisesRegexp(ValueError, "must be a dense"):
+ c = tf.constant([[1, 2], [3]])
+
+ with self.assertRaisesRegexp(ValueError, "must be a dense"):
+ c = tf.constant([[1, 2], [3], [4, 5]])
+
+
+class AsTensorTest(tf.test.TestCase):
+
+ def testAsTensorForTensorInput(self):
+ with tf.Graph().as_default():
+ t = tf.constant(10.0)
+ x = tf.convert_to_tensor(t)
+ self.assertIs(t, x)
+
+ def testAsTensorForNonTensorInput(self):
+ with tf.Graph().as_default():
+ x = tf.convert_to_tensor(10.0)
+ self.assertTrue(isinstance(x, tf.Tensor))
+
+ def testAsTensorForShapeInput(self):
+ with self.test_session():
+ x = tf.convert_to_tensor(tf.TensorShape([]))
+ self.assertEqual(tf.int32, x.dtype)
+ self.assertAllEqual([], x.eval())
+
+ x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
+ self.assertEqual(tf.int32, x.dtype)
+ self.assertAllEqual([1, 2, 3], x.eval())
+
+ x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
+ self.assertEqual(tf.int64, x.dtype)
+ self.assertAllEqual([1, 2, 3], x.eval())
+
+ x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
+ self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
+
+ with self.assertRaisesRegexp(ValueError, "partially known"):
+ tf.convert_to_tensor(tf.TensorShape(None))
+
+ with self.assertRaisesRegexp(ValueError, "partially known"):
+ tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
+
+ with self.assertRaises(TypeError):
+ tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.float32)
+
+ def testAsTensorForDimensionInput(self):
+ with self.test_session():
+ x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1])
+ self.assertEqual(tf.int32, x.dtype)
+ self.assertAllEqual(2, x.eval())
+
+ x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.int64)
+ self.assertEqual(tf.int64, x.dtype)
+ self.assertAllEqual(2, x.eval())
+
+ with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
+ tf.convert_to_tensor(tf.TensorShape(None)[1])
+
+ with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
+ tf.convert_to_tensor(tf.TensorShape([1, None, 64])[1])
+
+ with self.assertRaises(TypeError):
+ tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.float32)
+
+
+class IdentityOpTest(tf.test.TestCase):
+
+ def testIdTensor(self):
+ with tf.Graph().as_default():
+ x = tf.constant(2.0, shape=[6], name="input")
+ id_op = tf.identity(x, name="id")
+ self.assertTrue(isinstance(id_op.op.inputs[0], tf.Tensor))
+ self.assertProtoEquals(
+ "name: 'id' op: 'Identity' input: 'input' "
+ "attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def)
+
+
+class ZerosTest(tf.test.TestCase):
+
+ def _Zeros(self, shape):
+ with self.test_session():
+ ret = tf.zeros(shape)
+ self.assertEqual(shape, ret.get_shape())
+ return ret.eval()
+
+ def testConst(self):
+ self.assertTrue(np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] *
+ 2)))
+
+ def testDynamicSizes(self):
+ np_ans = np.array([[0] * 3] * 2)
+ with self.test_session():
+ # Creates a tensor of 2 x 3.
+ d = tf.fill([2, 3], 12., name="fill")
+ # Constructs a tensor of zeros of the same dimensions as "d".
+ z = tf.zeros(tf.shape(d))
+ out = z.eval()
+ self.assertAllEqual(np_ans, out)
+ self.assertShapeEqual(np_ans, d)
+ self.assertShapeEqual(np_ans, z)
+
+ def testDtype(self):
+ with self.test_session():
+ d = tf.fill([2, 3], 12., name="fill")
+ self.assertEqual(d.get_shape(), [2, 3])
+ # Test default type for both constant size and dynamic size
+ z = tf.zeros([2, 3])
+ self.assertEquals(z.dtype, tf.float32)
+ self.assertEqual([2, 3], z.get_shape())
+ z = tf.zeros(tf.shape(d))
+ self.assertEquals(z.dtype, tf.float32)
+ self.assertEqual([2, 3], z.get_shape())
+ # Test explicit type control
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ z = tf.zeros([2, 3], dtype=dtype)
+ self.assertEquals(z.dtype, dtype)
+ self.assertEquals([2, 3], z.get_shape())
+ z = tf.zeros(tf.shape(d), dtype=dtype)
+ self.assertEquals(z.dtype, dtype)
+ self.assertEquals([2, 3], z.get_shape())
+
+
+class ZerosLikeTest(tf.test.TestCase):
+
+ def testZerosLike(self):
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ numpy_dtype = dtype.as_numpy_dtype
+ with self.test_session():
+ # Creates a tensor of non-zero values with shape 2 x 3.
+ d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ # Constructs a tensor of zeros of the same dimensions and type as "d".
+ z_var = tf.zeros_like(d)
+ # Test that the type is correct
+ self.assertEquals(z_var.dtype, dtype)
+ z_value = z_var.eval()
+
+ # Test that the value is correct
+ self.assertTrue(np.array_equal(z_value, np.array([[0] * 3] * 2)))
+ self.assertEqual([2, 3], z_var.get_shape())
+
+ def testGenZerosLike(self):
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ numpy_dtype = dtype.as_numpy_dtype
+ with self.test_session():
+ # Creates a tensor of non-zero values with shape 2 x 3.
+ d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ # Constructs a tensor of zeros of the same dimensions and type as "d".
+ z_var = gen_array_ops._zeros_like(d)
+ # Test that the type is correct
+ self.assertEquals(z_var.dtype, dtype)
+ z_value = z_var.eval()
+
+ # Test that the value is correct
+ self.assertTrue(np.array_equal(z_value, np.array([[0] * 3] * 2)))
+ self.assertEqual([2, 3], z_var.get_shape())
+
+
+class OnesTest(tf.test.TestCase):
+
+ def _Ones(self, shape):
+ with self.test_session():
+ ret = tf.ones(shape)
+ self.assertEqual(shape, ret.get_shape())
+ return ret.eval()
+
+ def testConst(self):
+ self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
+
+ def testDynamicSizes(self):
+ np_ans = np.array([[1] * 3] * 2)
+ with self.test_session():
+ # Creates a tensor of 2 x 3.
+ d = tf.fill([2, 3], 12., name="fill")
+ # Constructs a tensor of ones of the same dimensions as "d".
+ z = tf.ones(tf.shape(d))
+ out = z.eval()
+ self.assertAllEqual(np_ans, out)
+ self.assertShapeEqual(np_ans, d)
+ self.assertShapeEqual(np_ans, z)
+
+ def testDtype(self):
+ with self.test_session():
+ d = tf.fill([2, 3], 12., name="fill")
+ self.assertEqual(d.get_shape(), [2, 3])
+ # Test default type for both constant size and dynamic size
+ z = tf.ones([2, 3])
+ self.assertEquals(z.dtype, tf.float32)
+ self.assertEqual([2, 3], z.get_shape())
+ z = tf.ones(tf.shape(d))
+ self.assertEquals(z.dtype, tf.float32)
+ self.assertEqual([2, 3], z.get_shape())
+ # Test explicit type control
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ z = tf.ones([2, 3], dtype=dtype)
+ self.assertEquals(z.dtype, dtype)
+ self.assertEqual([2, 3], z.get_shape())
+ z = tf.ones(tf.shape(d), dtype=dtype)
+ self.assertEquals(z.dtype, dtype)
+ self.assertEqual([2, 3], z.get_shape())
+
+
+class OnesLikeTest(tf.test.TestCase):
+
+ def testOnesLike(self):
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ numpy_dtype = dtype.as_numpy_dtype
+ with self.test_session():
+ # Creates a tensor of non-zero values with shape 2 x 3.
+ d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ # Constructs a tensor of zeros of the same dimensions and type as "d".
+ z_var = tf.ones_like(d)
+ # Test that the type is correct
+ self.assertEquals(z_var.dtype, dtype)
+ z_value = z_var.eval()
+
+ # Test that the value is correct
+ self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
+ self.assertEqual([2, 3], z_var.get_shape())
+
+ def testGenOnesLike(self):
+ for dtype in [tf.float32, tf.float64, tf.int32,
+ tf.uint8, tf.int16, tf.int8,
+ tf.complex64, tf.int64]:
+ numpy_dtype = dtype.as_numpy_dtype
+ with self.test_session():
+ # Creates a tensor of non-zero values with shape 2 x 3.
+ d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
+ # Constructs a tensor of zeros of the same dimensions and type as "d".
+ z_var = tf.ones_like(d)
+ # Test that the type is correct
+ self.assertEquals(z_var.dtype, dtype)
+ z_value = z_var.eval()
+
+ # Test that the value is correct
+ self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
+ self.assertEqual([2, 3], z_var.get_shape())
+
+
+class FillTest(tf.test.TestCase):
+
+ def _compare(self, dims, val, np_ans, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.fill(dims, val, name="fill")
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ # Fill does not set the shape.
+ # self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, dims, val, np_ans):
+ self._compare(dims, val, np_ans, False)
+ self._compare(dims, val, np_ans, True)
+
+ def testFillFloat(self):
+ np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
+ self._compareAll([2, 3], np_ans[0][0], np_ans)
+
+ def testFillDouble(self):
+ np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
+ self._compareAll([2, 3], np_ans[0][0], np_ans)
+
+ def testFillInt32(self):
+ np_ans = np.array([[42] * 3] * 2).astype(np.int32)
+ self._compareAll([2, 3], np_ans[0][0], np_ans)
+
+ def testFillInt64(self):
+ np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
+ self._compareAll([2, 3], np_ans[0][0], np_ans)
+
+ def testFillComplex(self):
+ np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
+ self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
+
+ def testFillString(self):
+ np_ans = np.array([["yolo"] * 3] * 2)
+ with self.test_session(use_gpu=False):
+ tf_ans = tf.fill([2, 3], np_ans[0][0], name="fill").eval()
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def testShapeFunctionEdgeCases(self):
+ # Non-vector dimensions.
+ with self.assertRaises(ValueError):
+ tf.fill([[0, 1], [2, 3]], 1.0)
+
+ # Non-scalar value.
+ with self.assertRaises(ValueError):
+ tf.fill([3, 2], [1.0, 2.0])
+
+ # Partial dimension information.
+ f = tf.fill(
+ tf.placeholder(tf.int32, shape=(4,)), 3.0)
+ self.assertEqual([None, None, None, None], f.get_shape().as_list())
+
+
+class PlaceholderTest(tf.test.TestCase):
+
+ def testDtype(self):
+ with self.test_session():
+ p = tf.placeholder(tf.float32, name="p")
+ p_identity = tf.identity(p)
+ feed_array = np.random.rand(10, 10)
+ self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
+ feed_array)
+
+ with self.assertRaisesOpError(
+ "must feed a value for placeholder tensor 'p' with dtype float"):
+ p_identity.eval()
+
+ def testShape(self):
+ with self.test_session():
+ p = tf.placeholder(tf.float32, shape=(10, 10), name="p")
+ p_identity = tf.identity(p)
+ feed_array = np.random.rand(10, 10)
+ self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
+ feed_array)
+
+ with self.assertRaisesOpError(
+ "must feed a value for placeholder tensor 'p' with dtype float and "
+ "shape dim { size: 10 } dim { size: 10 }"):
+ p_identity.eval()
+
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Cannot feed value of shape" in e.message):
+ p_identity.eval(feed_dict={p: feed_array[:5, :5]})
+
+ def testPartialShape(self):
+ with self.test_session():
+ p = tf.placeholder(tf.float32, shape=[None, 3], name="p")
+ p_identity = tf.identity(p)
+ feed_array = np.random.rand(10, 3)
+ self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
+ feed_array)
+
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Cannot feed value of shape" in e.message):
+ p_identity.eval(feed_dict={p: feed_array[:5, :2]})
+
+ def testControlDependency(self):
+ with self.test_session():
+ p = tf.placeholder(tf.int32, shape=[], name="p")
+ with tf.control_dependencies([p]):
+ c = tf.constant(5, tf.int32)
+ d = tf.mul(p, c)
+ self.assertEqual(10, d.eval(feed_dict={p: 2}))
+
+ def testFillNegative(self):
+ with self.test_session():
+ for shape in (-1,), (2, -1), (-1, 2):
+ with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
+ " must be nonnegative"):
+ tf.fill(shape, 7).eval()
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
new file mode 100644
index 0000000000..adf3552739
--- /dev/null
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -0,0 +1,1260 @@
+# pylint: disable=g-long-lambda
+"""Tests for tensorflow.ops.control_flow_ops."""
+import math
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gradients
+from tensorflow.python.pywrap_tensorflow import StatusNotOK
+
+def check_op_order(graph):
+ """Sanity check on the ordering of op id."""
+
+ for op in graph.get_operations():
+ for v in op.inputs:
+ assert v.op._id < op._id or op.type == "Merge", (
+ "The id of %s must be less than the id of %s" % (v.op.name, op.name))
+ return True
+
+
+def check_consumers(graph):
+ """Sanity check on the consumer list of the tensors."""
+
+ consumer_count = {}
+ for op in graph.get_operations():
+ for v in op.inputs:
+ cnt = consumer_count.get(v, 0)
+ consumer_count[v] = cnt + 1
+ for k, v in consumer_count.iteritems():
+ if len(k.consumers()) != v:
+ return False
+ return True
+
+
+def isum(s):
+ i = tf.constant(0, name="i")
+ c = lambda i, s: tf.less(i, 10)
+ b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
+ _, r_s = control_flow_ops.While(c, b, [i, s])
+ return r_s
+
+
+class ControlFlowTest(tf.test.TestCase):
+
+ def testRefIdentity(self):
+ with self.test_session():
+ v = tf.Variable(7)
+
+ v = control_flow_ops._Identity(v)
+ op = tf.assign(v, 9)
+ v2 = control_flow_ops.with_dependencies([op], v)
+
+ self.assertTrue(check_op_order(v.graph))
+ self.assertTrue(isinstance(v2, tf.Tensor))
+ tf.initialize_all_variables().run()
+ self.assertEqual(9, v2.eval())
+
+ def testRefEnter(self):
+ with self.test_session():
+ v = tf.Variable(7)
+
+ enter_v = control_flow_ops._Enter(v, "foo_1")
+ nine = tf.constant(9)
+ enter_nine = control_flow_ops.enter(nine, "foo_1")
+ op = tf.assign(enter_v, enter_nine)
+ v2 = control_flow_ops.with_dependencies([op], enter_v)
+ v3 = control_flow_ops.exit(v2)
+ tf.initialize_all_variables().run()
+ self.assertEqual(9, v3.eval())
+
+ def testRefSwitch(self):
+ with self.test_session():
+ v = tf.Variable(7)
+
+ p = tf.constant(True)
+ v1 = control_flow_ops._SwitchRefOrTensor(v, p)
+ v2 = tf.assign(v1[1], 9)
+ tf.initialize_all_variables().run()
+ self.assertEqual(9, v2.eval())
+
+ def testEnterExit_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ enter_op = control_flow_ops.enter(data, "foo_1", False)
+ exit_op = control_flow_ops.exit(enter_op)
+
+ result = exit_op.eval()
+ self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), result)
+
+ def testEnterMulExit_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ enter_data = control_flow_ops.enter(data, "foo_1", False)
+ five = tf.constant(5)
+ enter_five = control_flow_ops.enter(five, "foo_1", False)
+ mul_op = tf.mul(enter_data, enter_five)
+ exit_op = control_flow_ops.exit(mul_op)
+
+ result = exit_op.eval()
+ self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
+
+ def testEnterNextExit_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ enter_op = control_flow_ops.enter(data, "foo_1", False)
+ next_op = control_flow_ops.next_iteration(enter_op)
+ exit_op = control_flow_ops.exit(next_op)
+
+ result = exit_op.eval()
+ self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), result)
+
+ def testSwitchMergeIndexedSlices(self):
+ with self.test_session():
+ values = tf.constant([1, 2, 3, 4, 5, 6])
+ indices = tf.constant([0, 2, 4, 6, 8, 10])
+ data = tf.IndexedSlices(values, indices)
+ pred = tf.convert_to_tensor(True)
+ switch_op = control_flow_ops.switch(data, pred)
+ merge_op = control_flow_ops.merge(switch_op)[0]
+
+ val = merge_op.values.eval()
+ ind = merge_op.indices.eval()
+ self.assertAllEqual(np.arange(1, 7), val)
+ self.assertAllEqual(np.arange(0, 12, 2), ind)
+
+ def _testSwitchMerge_1(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(True, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ merge_op = control_flow_ops.merge(switch_op)[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.arange(1, 7), result)
+
+ def testSwitchMerge_1(self):
+ self._testSwitchMerge_1(use_gpu=False)
+ self._testSwitchMerge_1(use_gpu=True)
+
+ def testSwitchDeadBranch(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(True, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ dead_branch = tf.identity(switch_op[0])
+
+ with self.assertRaisesWithPredicateMatch(
+ StatusNotOK, lambda e: 'The tensor returned for' in str(e)):
+ dead_branch.eval()
+
+ def testSwitchMergeIdentity_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(True, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ merge_op = control_flow_ops.merge(switch_op)[0]
+ id_op = tf.identity(merge_op)
+
+ result = id_op.eval()
+ self.assertAllEqual(np.arange(1, 7), result)
+
+ def testSwitchMergeLess_0(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ zero = tf.constant(0)
+ one = tf.constant(1)
+ less_op = tf.less(zero, one)
+ switch_op = control_flow_ops.switch(data, less_op)
+ merge_op = control_flow_ops.merge(switch_op)[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.arange(1, 7), result)
+
+ def testSwitchMergeLess_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ zero = tf.convert_to_tensor(0)
+ one = tf.convert_to_tensor(1)
+ less_op = tf.less(zero, one)
+ switch_op = control_flow_ops.switch(data, less_op)
+ merge_op = control_flow_ops.merge(switch_op)[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.arange(1, 7), result)
+
+ def testSwitchMergeAddIdentity_0(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(False, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ one = tf.constant(1)
+ add_op = tf.add(switch_op[0], one)
+ id_op = tf.identity(switch_op[1])
+ merge_op = control_flow_ops.merge([add_op, id_op])[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
+
+ def testSwitchMergeAddIdentity_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(True, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ one = tf.constant(1)
+ add_op = tf.add(switch_op[0], one)
+ id_op = tf.identity(switch_op[1])
+ merge_op = control_flow_ops.merge([add_op, id_op])[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.arange(1, 7), result)
+
+ def testSwitchMergeAddMul_0(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(False, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ one = tf.constant(1)
+ add_op = tf.add(switch_op[0], one)
+ five = tf.constant(5)
+ mul_op = tf.mul(switch_op[1], five)
+ merge_op = control_flow_ops.merge([add_op, mul_op])[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
+
+ def testSwitchMergeAddMul_1(self):
+ with self.test_session():
+ data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ports = tf.convert_to_tensor(True, name="ports")
+ switch_op = control_flow_ops.switch(data, ports)
+ one = tf.constant(1)
+ add_op = tf.add(switch_op[0], one)
+ five = tf.constant(5)
+ mul_op = tf.mul(switch_op[1], five)
+ merge_op = control_flow_ops.merge([add_op, mul_op])[0]
+
+ result = merge_op.eval()
+ self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
+
+ def testLoop_false(self):
+ with self.test_session():
+ false = tf.convert_to_tensor(False)
+ n = tf.constant(10)
+
+ enter_false = control_flow_ops.enter(false, "foo_1", False)
+ enter_n = control_flow_ops.enter(n, "foo_1", False)
+
+ merge_n = control_flow_ops.merge([enter_n], name="merge_n")[0]
+ switch_n = control_flow_ops.switch(merge_n, enter_false)
+ exit_n = control_flow_ops.exit(switch_n[0])
+
+ result = exit_n.eval()
+ self.assertAllEqual(10, result)
+
+ def testLoop_false_1(self):
+ with self.test_session():
+ false = tf.convert_to_tensor(False)
+ n = tf.constant(10)
+
+ enter_false = control_flow_ops.enter(false, "foo_1", False)
+ enter_n = control_flow_ops.enter(n, "foo_1", False)
+
+ merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
+ switch_n = control_flow_ops.switch(merge_n, enter_false)
+ exit_n = control_flow_ops.exit(switch_n[0])
+ next_n = control_flow_ops.next_iteration(switch_n[0])
+ merge_n.op._update_input(1, next_n)
+
+ result = exit_n.eval()
+ self.assertAllEqual(10, result)
+
+ def testLoop_1(self):
+ with self.test_session():
+ zero = tf.convert_to_tensor(0)
+ one = tf.convert_to_tensor(1)
+ n = tf.constant(10)
+
+ enter_zero = control_flow_ops.enter(zero, "foo_1", False)
+ enter_one = control_flow_ops.enter(one, "foo_1", False)
+ enter_n = control_flow_ops.enter(n, "foo_1", False)
+ merge_zero = control_flow_ops.merge([enter_zero, enter_zero],
+ name="merge_zero")[0]
+ merge_one = control_flow_ops.merge([enter_one, enter_one],
+ name="merge_one")[0]
+ merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
+ less_op = tf.less(merge_n, merge_n)
+ cond_op = control_flow_ops.loop_cond(less_op)
+ switch_zero = control_flow_ops.switch(merge_zero, cond_op)
+ switch_one = control_flow_ops.switch(merge_one, cond_op)
+ switch_n = control_flow_ops.switch(merge_n, cond_op)
+ next_zero = control_flow_ops.next_iteration(switch_zero[1])
+ next_one = control_flow_ops.next_iteration(switch_one[1])
+ next_n = control_flow_ops.next_iteration(switch_n[1])
+ merge_zero.op._update_input(1, next_zero)
+ merge_one.op._update_input(1, next_one)
+ merge_n.op._update_input(1, next_n)
+ exit_n = control_flow_ops.exit(switch_n[0])
+
+ result = exit_n.eval()
+ self.assertAllEqual(10, result)
+
+ def testCondIndexedSlices(self):
+ with self.test_session():
+ values = tf.constant(10)
+ indices = tf.constant(0)
+ x = tf.IndexedSlices(values, indices)
+ pred = tf.less(1, 2)
+ fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
+ fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ val = r.values.eval()
+ ind = r.indices.eval()
+ self.assertTrue(check_op_order(x.values.graph))
+ self.assertAllEqual(11, val)
+ self.assertAllEqual(0, ind)
+
+ def testCondIndexedSlicesDifferentTypes(self):
+ with self.test_session():
+ values = tf.constant(10)
+ i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
+ i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
+ x = tf.IndexedSlices(values, i_32)
+ pred = tf.less(1, 2)
+ fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
+ fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ val = r.values.eval()
+ ind = r.indices.eval()
+ self.assertTrue(check_op_order(x.values.graph))
+ self.assertAllEqual(11, val)
+ self.assertAllEqual(0, ind)
+ self.assertTrue(ind.dtype == np.int64)
+
+ def _testCond_1(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ x = tf.constant(10)
+ pred = tf.less(1, 2)
+ fn1 = lambda: tf.add(x, 1)
+ fn2 = lambda: tf.sub(x, 1)
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ result = r.eval()
+ self.assertTrue(check_op_order(x.graph))
+ self.assertAllEqual(11, result)
+
+ def testCond_1(self):
+ self._testCond_1(use_gpu=False)
+ self._testCond_1(use_gpu=True)
+
+ def testCond_2(self):
+ with self.test_session():
+ x = tf.constant(10)
+ r = control_flow_ops.cond(tf.less(1, 0), lambda: tf.add(x, 1),
+ lambda: tf.sub(x, 1))
+ result = r.eval()
+ self.assertTrue(check_op_order(x.graph))
+ self.assertAllEqual(9, result)
+
+ def testCond_3(self):
+ with self.test_session():
+ x = tf.constant(10)
+ pred = tf.less(1, 2)
+ fn1 = lambda: tf.add(x, 1)
+ fn2 = lambda: tf.sub(x, 1)
+ fn3 = lambda: tf.add(control_flow_ops.cond(pred, fn1, fn2), 1)
+ r = control_flow_ops.cond(pred, fn3, fn2)
+
+ result = r.eval()
+ self.assertTrue(check_op_order(x.graph))
+ self.assertAllEqual(12, result)
+
+ def testCond_4(self):
+ with self.test_session():
+ v1 = tf.Variable(7)
+ v2 = tf.Variable(7)
+ v3 = tf.Variable(7)
+
+ age = tf.constant(3)
+ max_age = tf.constant(2)
+ pred = tf.greater(age, max_age)
+ fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
+ fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ tf.initialize_all_variables().run()
+ self.assertEqual(len(r), 2)
+ result = r[1].eval()
+ self.assertTrue(check_op_order(age.graph))
+ self.assertAllEqual(True, result)
+ self.assertAllEqual(7, v1.eval())
+ self.assertAllEqual(2, v2.eval())
+ self.assertAllEqual(7, v3.eval())
+
+ def testCond_5(self):
+ with self.test_session():
+ alive = tf.constant(True, name="alive")
+ count = tf.constant(0, name="count")
+
+ def body(i):
+ return control_flow_ops.cond(
+ alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
+ lambda: [alive, count])
+
+ for i in range(10):
+ alive, count = body(i)
+ self.assertAllEqual(4, count.eval())
+
+ def testCond_6(self):
+ with self.test_session():
+ v1 = tf.Variable([7])
+
+ age = tf.constant(3)
+ pred = tf.greater(age, 4)
+ fn1 = lambda: age
+ fn2 = lambda: v1
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ tf.initialize_all_variables().run()
+ result = r.eval()
+ self.assertAllEqual(np.array([7]), result)
+
+ def testCondGrad_1(self):
+ with self.test_session():
+ x = tf.constant(10.0, name="x")
+ pred = tf.less(1, 2)
+ fn1 = lambda: tf.identity(x)
+ fn2 = lambda: tf.identity(x)
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ grad = tf.gradients(r, [x])[0]
+ result = grad.eval()
+ self.assertAllEqual(1.0, result)
+
+ def testCondGrad_2(self):
+ with self.test_session():
+ c = tf.placeholder(tf.int32, shape=[])
+ x = tf.constant(10.0)
+ pred = tf.less(c, 2)
+ fn1 = lambda: tf.mul(x, 42.0)
+ fn2 = lambda: tf.mul(x, 3.0)
+ r = control_flow_ops.cond(pred, fn1, fn2)
+
+ grad = tf.gradients(r, [x])[0]
+ self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
+ self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
+
+ def testCondGrad_Gather(self):
+ with self.test_session() as sess:
+ v1 = tf.Variable([1.0, 42.0])
+ c = tf.placeholder(tf.int32, shape=[])
+ pred = tf.less(c, 2)
+ fn1 = lambda: tf.identity(v1)
+ fn2 = lambda: tf.gather(v1, [1, 1])
+ r = control_flow_ops.cond(pred, fn1, fn2)
+ grad = tf.gradients(r, [v1])[0]
+ tf.initialize_all_variables().run()
+ # Should just be [1, 1], but possibly a sparse representation
+ gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
+ dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
+ ]
+ self.assertAllEqual(dense_gv, [1.0, 1.0])
+ # Should be [0, 2], as the else forwards v1[1] twice
+ gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
+ dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
+ ]
+ self.assertAllEqual(dense_gv, [0.0, 2.0])
+
+ def testWhileGrad_1(self):
+ with self.test_session():
+ v = tf.constant(2.0, name="v")
+ c = lambda v: tf.less(v, 100.0)
+ b = tf.square
+ r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
+
+ r = tf.gradients(r, v)
+ result = r[0].eval()
+ self.assertEqual(1024.0, result)
+
+ def testWhileGrad_2(self):
+ with self.test_session():
+ a = tf.constant(3.0, name="a")
+ v = tf.constant(2.0, name="v")
+ c = lambda v: tf.less(v, 100.0)
+ b = lambda v: tf.mul(v, a)
+ r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
+
+ r = tf.gradients(r, a)
+ result = r[0].eval()
+ self.assertEqual(216.0, result)
+
+ def testWhileGrad_3(self):
+ with self.test_session():
+ a = tf.constant(3.0, name="a")
+ v = tf.constant(2.0, name="v")
+ c = lambda v: tf.less(v, 100.0)
+ b = lambda v: tf.mul(v, a)
+ r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
+
+ r = tf.gradients(r, v)
+ result = r[0].eval()
+ self.assertEqual(81.0, result)
+
+ def testWhileGrad_4(self):
+ with self.test_session():
+ a = tf.Variable(3.0)
+ v = tf.constant(2.0, name="v")
+ c = lambda v: tf.less(v, 100.0)
+ b = lambda v: tf.mul(v, a)
+ r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
+
+ r = tf.gradients(r, a)
+ tf.initialize_all_variables().run()
+ result = r[0].eval()
+ self.assertEqual(216.0, result)
+
+ def testWhileGrad_5(self):
+ with self.test_session():
+ x = tf.constant(3.0, name="x")
+ y = tf.constant(2.0, name="y")
+ c = lambda x, y: tf.less(x, 100.0)
+
+ def b(x, y):
+ y1 = tf.add(x, y)
+ x1 = tf.mul(x, y1)
+ return x1, y1
+
+ r = control_flow_ops.While(c, b, [x, y], parallel_iterations=1)
+
+ # Must use the complete r.
+ r = tf.gradients(r, x)
+ result = r[0].eval()
+ self.assertEqual(304.0, result)
+
+ def testWhileGrad_6(self):
+ with self.test_session():
+ i = tf.constant(0, name="i")
+ x = tf.constant(2.0, name="x")
+ c = lambda i, x: tf.less(i, 10)
+
+ def b(i, x):
+ x = tf.mul(x, 2.0)
+ i = tf.add(i, 1)
+ return i, x
+
+ r = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
+
+ # Must use the complete r.
+ r = tf.gradients(r, x)
+ r = r[0].eval()
+ self.assertEqual(1024.0, r)
+
+ def testWhileGrad_7(self):
+ with self.test_session():
+ v = tf.constant(2.0, name="v")
+ c = lambda v: tf.less(v, 100.0)
+ b = tf.square
+ r = control_flow_ops.While(c, b, [v], parallel_iterations=1,
+ back_prop=False)
+ r = tf.add(r, v)
+ r = tf.gradients(r, v)
+ result = r[0].eval()
+ self.assertEqual(1.0, result)
+
+ # Microbenchmark: 10,000 iterations took 0.21s.
+ def testWhile_1(self):
+ with self.test_session():
+ n = tf.constant(0)
+ c = lambda x: tf.less(x, 10000)
+ b = lambda x: tf.add(x, 1)
+ r = control_flow_ops.While(c, b, [n], parallel_iterations=20)
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertEqual(10000, result)
+
+ def testWhile_2(self):
+ with self.test_session():
+ s = tf.constant(0)
+ r = isum(s)
+
+ result = r.eval()
+ self.assertTrue(check_op_order(s.graph))
+ self.assertAllEqual(45, result)
+
+ # Have more than 10 parallel iterations and hence exercise k-bound
+ # most of the time.
+ def testWhile_3(self):
+ with self.test_session():
+
+ def compute(i, m, c, o):
+ m, c = [tf.add(m, 1), tf.add(c, 1)]
+ o = tf.add(o, m)
+ o = tf.add(o, c)
+ i = tf.add(i, 1)
+ return [i, m, c, o]
+
+ i = tf.convert_to_tensor(0)
+ m = tf.convert_to_tensor(0)
+ c = tf.convert_to_tensor(0)
+ o = tf.convert_to_tensor(0)
+ d = tf.convert_to_tensor(100)
+ r = control_flow_ops.While(
+ lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
+ result = r[3].eval()
+ self.assertTrue(check_op_order(i.graph))
+ self.assertAllEqual(10100, result)
+
+ def testWhile_4(self):
+ with self.test_session():
+
+ def compute(i, m, c, o):
+ m, c = [tf.gather(x, i), tf.gather(x, i)]
+ o = tf.add(o, m)
+ o = tf.add(o, c)
+ i = tf.add(i, 1)
+ return [i, m, c, o]
+
+ i = tf.convert_to_tensor(0)
+ m = tf.convert_to_tensor(0)
+ c = tf.convert_to_tensor(0)
+ o = tf.convert_to_tensor(0)
+ x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
+ s = tf.size(x)
+ r = control_flow_ops.While(
+ lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
+ result = r[3].eval()
+ self.assertTrue(check_op_order(i.graph))
+ self.assertAllEqual(42, result)
+
+ def testWhile_5(self):
+ with self.test_session():
+
+ def compute(i, c, o):
+ c = tf.slice(x, tf.expand_dims(i, 0), [1])
+ o = tf.concat(0, [o, c])
+ i = tf.add(i, 1)
+ return [i, c, o]
+
+ i = tf.convert_to_tensor(0)
+ c = tf.convert_to_tensor(0)
+ o = tf.convert_to_tensor([0])
+ x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
+ s = tf.size(x)
+ r = control_flow_ops.While(
+ lambda i, c, o: tf.less(i, s), compute, [i, c, o])
+ result = r[2].eval()
+ self.assertTrue(check_op_order(i.graph))
+ self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
+
+ def _testWhile_Gpu_1(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ n = tf.constant(1.0)
+ c = lambda x: tf.less(x, 10.0)
+ b = lambda x: tf.add(x, 1.0)
+ r = control_flow_ops.While(c, b, [n])
+
+ result = r.eval()
+ self.assertEqual(10.0, result)
+
+ def testWhile_Gpu_1(self):
+ self._testWhile_Gpu_1(use_gpu=False)
+ self._testWhile_Gpu_1(use_gpu=True)
+
+ def _testWhile_Gpu_2(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ n = tf.constant(1.0)
+ c = lambda x: tf.less(x, 10.0)
+ def b(x):
+ with tf.device("/cpu:0"):
+ return tf.add(x, 1.0)
+ r = control_flow_ops.While(c, b, [n])
+
+ result = r.eval()
+ self.assertEqual(10.0, result)
+
+ def testWhile_Gpu_2(self):
+ self._testWhile_Gpu_1(use_gpu=False)
+ self._testWhile_Gpu_1(use_gpu=True)
+
+ def testWhileWithControl_1(self):
+ with self.test_session():
+ n = tf.constant(0)
+ r = tf.constant(0)
+ condition = lambda n_, r_: tf.less(n_, 10)
+
+ def body(n_, r_):
+ n_ = tf.add(n_, 1)
+ with r_.graph.control_dependencies([r_]):
+ r_ = tf.constant(12)
+ return [n_, r_]
+
+ res = control_flow_ops.While(condition,
+ body,
+ [n, r],
+ parallel_iterations=1)
+ result = res[1].eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(12, result)
+
+ def testWhileWithControl_2(self):
+ with self.test_session():
+ r = tf.constant(0)
+ condition = lambda r_: tf.less(r_, 10)
+
+ def body(r_):
+ with r_.graph.control_dependencies([r_]):
+ r_ = tf.constant(12)
+ return [r_]
+
+ res = control_flow_ops.While(condition, body, [r], parallel_iterations=1)
+ result = res.eval()
+ self.assertTrue(check_op_order(r.graph))
+ self.assertAllEqual(12, result)
+
+ def testCondWhile_1(self):
+ with self.test_session():
+ n = tf.convert_to_tensor(0, name="n")
+ c = lambda x: tf.less(x, 10)
+ b = lambda x: tf.add(x, 1)
+ r = control_flow_ops.cond(tf.less(0, 1),
+ lambda: control_flow_ops.While(c, b, [n]),
+ lambda: n)
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(10, result)
+
+ def testCondWhile_2(self):
+ with self.test_session():
+ n = tf.convert_to_tensor(0)
+ c = lambda x: tf.less(x, 10)
+ b = lambda x: tf.add(x, 1)
+ r = control_flow_ops.cond(tf.less(1, 0), lambda: tf.add(n, 1),
+ lambda: control_flow_ops.While(c, b, [n]))
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(10, result)
+
+ def testWhileCond_1(self):
+ with self.test_session():
+ i = tf.convert_to_tensor(0, name="i")
+ n = tf.convert_to_tensor(10, name="n")
+ one = tf.convert_to_tensor(1, name="one")
+ c = lambda x: tf.less(x, n)
+ b = lambda x: control_flow_ops.cond(tf.constant(True),
+ lambda: tf.add(x, one),
+ lambda: tf.sub(x, one))
+ r = control_flow_ops.While(c, b, [i])
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(10, result)
+
+ def testWhileCond_2(self):
+ with self.test_session():
+ n = tf.convert_to_tensor(0, name="n")
+ c = lambda x: tf.less(x, 10)
+ b = lambda x: control_flow_ops.cond(tf.constant(True),
+ lambda: tf.add(x, 1),
+ lambda: n)
+ r = control_flow_ops.While(c, b, [n])
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(10, result)
+
+ def testWhileCond_3(self):
+ with self.test_session():
+ n = tf.convert_to_tensor(0)
+ c = lambda x: tf.less(x, 10)
+ b = lambda x: control_flow_ops.cond(tf.less(0, 1),
+ lambda: tf.add(x, 1),
+ lambda: tf.sub(x, 1))
+ r = control_flow_ops.While(c, b, [n])
+
+ result = r.eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(10, result)
+
+ # NOTE: It is ok to have parallel_iterations > 1
+ def testWhileUpdateVariable_1(self):
+ with self.test_session():
+ select = tf.Variable([3.0, 4.0, 5.0])
+ n = tf.constant(0)
+
+ def loop_iterator(j):
+ return tf.less(j, 3)
+
+ def loop_body(j):
+ ns = tf.scatter_update(select, j, 10.0)
+ nj = tf.add(j, 1)
+ op = control_flow_ops.group(ns)
+ nj = control_flow_ops.with_dependencies([op], nj)
+ return [nj]
+
+ r = control_flow_ops.While(loop_iterator,
+ loop_body,
+ [n],
+ parallel_iterations=1)
+ self.assertTrue(check_op_order(n.graph))
+ tf.initialize_all_variables().run()
+ self.assertEqual(3, r.eval())
+ result = select.eval()
+ self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result)
+
+ def testWhileUpdateVariable_2(self):
+ with self.test_session():
+ select1 = tf.Variable([3.0, 4.0, 5.0])
+ select2 = tf.Variable([3.0, 4.0, 5.0])
+ n = tf.constant(0)
+
+ def loop_iterator(j):
+ return tf.less(j, 3)
+
+ def loop_body(j):
+ ns1 = tf.scatter_update(select1, j, 10.0)
+ ns2 = tf.scatter_update(select2, j, 10.0)
+ nj = tf.add(j, 1)
+ op = control_flow_ops.group(ns1, ns2)
+ nj = control_flow_ops.with_dependencies([op], nj)
+ return [nj]
+
+ r = control_flow_ops.While(loop_iterator,
+ loop_body,
+ [n],
+ parallel_iterations=1)
+ self.assertTrue(check_op_order(n.graph))
+ tf.initialize_all_variables().run()
+ self.assertEqual(3, r.eval())
+ result1 = select1.eval()
+ self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result1)
+ result2 = select2.eval()
+ self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result2)
+
+ def testWhileUpdateVariable_3(self):
+ with self.test_session():
+ select = tf.Variable([3.0, 4.0, 5.0])
+ n = tf.constant(0)
+
+ def loop_iterator(j, _):
+ return tf.less(j, 3)
+
+ def loop_body(j, _):
+ ns = tf.scatter_update(select, j, 10.0)
+ nj = tf.add(j, 1)
+ return [nj, ns]
+
+ r = control_flow_ops.While(loop_iterator,
+ loop_body,
+ [n, tf.identity(select)],
+ parallel_iterations=1)
+ tf.initialize_all_variables().run()
+ result = r[1].eval()
+ self.assertTrue(check_op_order(n.graph))
+ self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result)
+
+ # b/24814703
+ def testWhileUpdateVariable_4(self):
+ with self.test_session():
+ var_a = tf.Variable(0, name="a")
+ var_b = tf.Variable(0, name="b")
+ tf.initialize_all_variables().run()
+
+ c = tf.constant(0, name="c")
+ asn1 = tf.assign_add(var_a, 1, name="a_add")
+ # Loop condition
+ def pred(i):
+ return tf.less(i, 10)
+ # Loop body
+ def loop_body(i):
+ asn2 = tf.assign_add(var_b, asn1, name="b_add")
+ with tf.control_dependencies([asn2]):
+ ni = tf.add(i, 1, name="i_add")
+ return ni
+
+ lpa = control_flow_ops.While(pred, loop_body, [c],
+ parallel_iterations=1)
+
+ self.assertEqual(0, var_b.eval())
+ lpa.eval() # Run the loop
+ self.assertEqual(10, var_b.eval())
+
+ # b/24736492
+ def testWhileUpdateVariable_5(self):
+ with self.test_session():
+ # Create some variables.
+ var_a = tf.Variable(0, name="a")
+ var_b = tf.Variable(0, name="b")
+ tf.initialize_all_variables().run()
+
+ # Change condition to check var_b
+ def pred(i):
+ return tf.less(var_b, 10)
+
+ # Change body to increment var_b
+ def loop_body(i):
+ asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
+ asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
+ with tf.control_dependencies([asn1, asn2]):
+ inc_b = tf.identity(var_b)
+ return inc_b
+
+ lpa = control_flow_ops.While(pred, loop_body, [var_b], 1, name="loop")
+
+ self.assertEqual(0, var_b.eval())
+ lpa.eval() # Run the loop
+ self.assertEqual(10, var_a.eval())
+ self.assertEqual(10, var_b.eval())
+
+ def testWhileQueue_1(self):
+ with self.test_session():
+ q = tf.FIFOQueue(-1, tf.int32)
+ i = tf.constant(0)
+
+ def c(i):
+ return tf.less(i, 10)
+
+ def b(i):
+ ni = tf.add(i, 1)
+ ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
+ return ni
+
+ r = control_flow_ops.While(c, b, [i], parallel_iterations=1)
+ self.assertEqual([10], r.eval())
+ for i in xrange(10):
+ self.assertEqual([i], q.dequeue().eval())
+
+ def testFold_1(self):
+ with self.test_session():
+ elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ r = control_flow_ops.fold(
+ lambda a, x: tf.mul(tf.add(a, x), 2), elems, [1])
+ result = r.eval()
+ self.assertTrue(check_op_order(elems.graph))
+ self.assertAllEqual(np.array([208]), result)
+
+ def testFold_2(self):
+ with self.test_session():
+ elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
+ ten = tf.convert_to_tensor(10)
+
+ def compute(a, x):
+ r = tf.mul(x, ten)
+ return tf.add(a, r)
+
+ r = control_flow_ops.fold(compute, elems, [1])
+ result = r.eval()
+ self.assertTrue(check_op_order(elems.graph))
+ self.assertAllEqual([201], result)
+
+ def testOneValueCond(self):
+ with self.test_session():
+ c = tf.placeholder(tf.int32, shape=[])
+ one = tf.convert_to_tensor(1, name="one")
+ two = tf.convert_to_tensor(2, name="two")
+ p = tf.greater_equal(c, 1)
+ i = control_flow_ops.cond(p, lambda: one, lambda: two)
+ self.assertTrue(isinstance(i, tf.Tensor))
+
+ # True case: c = 2 is >= 1
+ self.assertEqual([1], i.eval(feed_dict={c: 2}))
+
+ # False case: c = 0 is not >= 1
+ self.assertEqual([2], i.eval(feed_dict={c: 0}))
+
+ def testExampleCond(self):
+ with self.test_session():
+ x = tf.convert_to_tensor([-2.0, 2.0], name="x")
+ d = tf.placeholder(tf.int32, shape=[])
+
+ def l2():
+ return tf.sqrt(tf.reduce_sum(tf.square(x)))
+
+ def l1():
+ return tf.reduce_sum(tf.abs(x))
+
+ i = control_flow_ops.cond(tf.equal(d, 2), l2, l1)
+ self.assertEqual(4.0, i.eval(feed_dict={d: 1}))
+ self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
+
+ def testOneOpCond(self):
+ with self.test_session():
+ v = tf.Variable(0)
+ c = tf.convert_to_tensor(0)
+ one = tf.convert_to_tensor(1)
+ two = tf.convert_to_tensor(2)
+ p = tf.greater_equal(c, 1)
+
+ def a():
+ return tf.assign(v, one)
+
+ def b():
+ return tf.assign(v, two)
+
+ i = control_flow_ops.cond(p, a, b)
+ self.assertTrue(isinstance(i, tf.Tensor))
+ tf.initialize_all_variables().run()
+
+ self.assertEqual(0, v.eval())
+
+ # True case: c = 2 is >= 1, v is set to 1.
+ self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
+ self.assertEqual(1, v.eval())
+
+ # False case: c = 0 is not >= 1, v is set to 2.
+ self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
+ self.assertEqual(2, v.eval())
+
+ def testWithOpsDependencies(self):
+ with self.test_session() as sess:
+ v = tf.Variable(0.0)
+ c = tf.constant(10)
+
+ # Fetching v directly will result in an uninitialized error
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ sess.run([c, v])
+
+ # Use a control dependency to ensure init_variable is run
+ # while asking for c
+ real_v = control_flow_ops.with_dependencies(name="real_tensor",
+ output_tensor=v,
+ dependencies=[v.initializer])
+ c_val, real_v_val = sess.run([c, real_v])
+
+ # Ensure the result of 'real_c' is the same as 'c'
+ self.assertAllEqual(10, c_val)
+
+ # Ensure that 'v' is initialized
+ self.assertAllClose(0.0, real_v_val)
+
+ def testWithTensorDependencies(self):
+ with self.test_session():
+ v = tf.Variable(0.0)
+ c1 = tf.constant(10)
+ c2 = tf.constant(20)
+
+ # c1_with_init_v depends on the init op for v
+ c1_with_init_v = control_flow_ops.with_dependencies(
+ name="c1_with_init_v",
+ output_tensor=c1,
+ dependencies=[v.initializer])
+ # c2_with_c1 depends on the value of c1_with_init_v
+ c2_with_c1_dep = control_flow_ops.with_dependencies(
+ name="c2_with_c1_dep",
+ output_tensor=c2,
+ dependencies=[c1_with_init_v])
+
+ # Fetching v directly will result in an uninitialized error
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v.eval()
+
+ # Get the value of 'c2_with_c1_dep', which should cause 'v'
+ # to be initialized.
+ self.assertAllEqual(20, c2_with_c1_dep.eval())
+
+ # Ensure that 'v' is initialized
+ self.assertAllClose(0.0, v.eval())
+
+ def testWithIndexedSlicesDependencies(self):
+ with self.test_session():
+ v = tf.Variable(
+ np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
+ v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
+ gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
+ v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
+ v_at_1)
+ gather_v_at_1_after_init = tf.gather(
+ v_at_1_after_init.values, v_at_1_after_init.indices)
+
+ # Fetching gather_v_at_1 will result in an uninitialized error
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ gather_v_at_1.eval()
+
+ # Getting gather_v_at_1_after_init will work, and initialize v.
+ self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
+
+ # Double check that 'v' is initialized
+ self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
+
+ def testDependenciesDevice(self):
+ with tf.Graph().as_default():
+ # device set on tensor => same device on dep.
+ with tf.device("/job:ps"):
+ vd = tf.Variable([0.0])
+ with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
+ self.assertTrue("/job:ps" in with_vd_dep.device)
+
+ # No device set on tensor => no device on dep.
+ vnod = tf.Variable([0.0])
+ with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
+ vnod)
+ self.assertEquals(None, with_vnod_dep.device)
+
+ # device set on tensor, default device on graph => default device on dep.
+ vdef = tf.Variable([0.0])
+ with tf.device("/job:worker/gpu:1"):
+ with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
+ vdef)
+ self.assertEquals("/job:worker/gpu:1", with_vdef_dep.device)
+
+ def testGroup(self):
+ with self.test_session() as sess:
+ v1 = tf.Variable([0.0])
+ v2 = tf.Variable([1.0])
+
+ # Group init1 and init2 and run.
+ init = control_flow_ops.group(v1.initializer, v2.initializer)
+ # Fetching v1 directly will result in an uninitialized error
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v1.eval()
+
+ # Runs "init" before fetching v1 and v2.
+ init.run()
+ v1_val, v2_val = sess.run([v1, v2])
+
+ # Ensure that v1 and v2 are initialized
+ self.assertAllClose([0.0], v1_val)
+ self.assertAllClose([1.0], v2_val)
+
+ def testMergeShapes(self):
+ # All inputs unknown.
+ p1 = tf.placeholder(tf.float32)
+ p2 = tf.placeholder(tf.float32)
+ p3 = tf.placeholder(tf.float32)
+ m, index = control_flow_ops.merge([p1, p2, p3])
+ self.assertIs(None, m.get_shape().ndims)
+ self.assertEqual([], index.get_shape())
+
+ # All inputs known but different.
+ p1 = tf.placeholder(tf.float32, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32, shape=[2, 1])
+ m, index = control_flow_ops.merge([p1, p2])
+ self.assertIs(None, m.get_shape().ndims)
+ self.assertEqual([], index.get_shape())
+
+ # All inputs known but same.
+ p1 = tf.placeholder(tf.float32, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32, shape=[1, 2])
+ m, index = control_flow_ops.merge([p1, p2])
+ self.assertEqual([1, 2], m.get_shape())
+ self.assertEqual([], index.get_shape())
+
+ # Possibly the same but not guaranteed.
+ p1 = tf.placeholder(tf.float32, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32)
+ p2.set_shape([None, 2])
+ m, index = control_flow_ops.merge([p1, p2])
+ self.assertIs(None, m.get_shape().ndims)
+ self.assertEqual([], index.get_shape())
+
+ def testRefSelect(self):
+ index = tf.placeholder(tf.int32)
+
+ # All inputs unknown.
+ p1 = tf.placeholder(tf.float32_ref)
+ p2 = tf.placeholder(tf.float32_ref)
+ p3 = tf.placeholder(tf.float32_ref)
+ s = control_flow_ops.ref_select(index, [p1, p2, p3])
+ self.assertIs(None, s.get_shape().ndims)
+
+ # All inputs known but different.
+ p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32_ref, shape=[2, 1])
+ s = control_flow_ops.ref_select(index, [p1, p2])
+ self.assertIs(None, s.get_shape().ndims)
+
+ # All inputs known but same.
+ p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32_ref, shape=[1, 2])
+ s = control_flow_ops.ref_select(index, [p1, p2])
+ self.assertEqual([1, 2], s.get_shape())
+
+ # Possibly the same but not guaranteed.
+ p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
+ p2 = tf.placeholder(tf.float32_ref)
+ p2.set_shape([None, 2])
+ s = control_flow_ops.ref_select(index, [p1, p2])
+ self.assertEqual(None, s.get_shape())
+
+
+class TupleTest(tf.test.TestCase):
+
+ def testTensors(self):
+ for v1_first in [True, False]:
+ with self.test_session():
+ v1 = tf.Variable([1.0])
+ add1 = tf.add(
+ control_flow_ops.with_dependencies([v1.initializer], v1),
+ 2.0)
+ v2 = tf.Variable([10.0])
+ add2 = tf.add(control_flow_ops.with_dependencies([v2.initializer],
+ v2),
+ 20.0)
+ t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
+
+ # v1 is not initialized.
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v1.eval()
+
+ # v2 is not initialized.
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v2.eval()
+
+ if v1_first:
+ # Getting t1 initializes v2.
+ self.assertAllClose([3.0], t1.eval())
+ self.assertAllClose([10.0], v2.eval())
+ else:
+ # Getting t2 initializes v1.
+ self.assertAllClose([30.0], t2.eval())
+ self.assertAllClose([1.0], v1.eval())
+
+ def testIndexedSlices(self):
+ for v1_first in [True, False]:
+ with self.test_session():
+ v1 = tf.Variable(
+ np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
+ np.float32))
+ v1_at_1 = tf.IndexedSlices(
+ control_flow_ops.with_dependencies([v1.initializer], v1),
+ tf.constant([1]))
+
+ v2 = tf.Variable(
+ np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
+ np.float32))
+ v2_at_1 = tf.IndexedSlices(
+ control_flow_ops.with_dependencies([v2.initializer], v2),
+ tf.constant([1]))
+
+ st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
+ g1 = tf.gather(st1.values, st1.indices)
+ g2 = tf.gather(st2.values, st2.indices)
+
+ # v1 is not initialized.
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v1.eval()
+
+ # v2 is not initialized.
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ v2.eval()
+
+ if v1_first:
+ # Getting g1 initializes v2.
+ self.assertAllClose([[10.0, 11.0]], g1.eval())
+ self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
+ v2.eval())
+ else:
+ # Getting g2 initializes v1.
+ self.assertAllClose([[10.1, 11.1]], g2.eval())
+ self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
+ v1.eval())
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
new file mode 100644
index 0000000000..7f5d419c98
--- /dev/null
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -0,0 +1,1009 @@
+"""Functional tests for convolutional operations."""
+import math
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+def GetInceptionShapes():
+ """Iterator for the convolution shapes used in the Inception 2015 model.
+
+ Yields:
+ Tuple (input_size, filter_size, out_size, stride, padding), the convolution
+ parameters of Inception layers.
+ """
+ input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
+ [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
+ [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
+ [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
+ [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
+ [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
+ [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
+ [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
+ [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
+ [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
+ [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
+ [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
+ [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
+ [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
+ [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
+ [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
+ [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
+ [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
+ [4, 147, 147, 24]]
+ filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
+ [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
+ [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
+ [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
+ [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
+ [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
+ [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
+ [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
+ [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
+ [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
+ [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
+ [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
+ [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
+ [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
+ [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
+ [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
+ [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
+ [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
+ [1, 1, 24, 64]]
+ out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
+ [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
+ [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
+ [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
+ [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
+ [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
+ [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
+ [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
+ [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
+ [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
+ [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
+ [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
+ [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
+ [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
+ [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
+ [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
+ [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
+ [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
+ [4, 147, 147, 64]]
+ strides = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ # pylint: disable=invalid-name
+ VALID = "VALID"
+ SAME = "SAME"
+ # pylint: enable=invalid-name
+ paddings = [SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID,
+ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
+ SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
+ SAME, SAME, SAME, SAME, VALID, VALID, VALID]
+ for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes,
+ strides, paddings):
+ yield i, f, o, s, p
+
+
+class Conv2DTest(tf.test.TestCase):
+
+ def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
+ padding, use_gpu):
+ """Verifies the output values of the convolution function.
+
+ Args:
+ tensor_in_sizes: Input tensor dimensions in
+ [batch, input_rows, input_cols, input_depth].
+ filter_in_sizes: Filter tensor dimensions in
+ [kernel_rows, kernel_cols, input_depth, output_depth].
+ stride: Stride.
+ padding: Padding type.
+ use_gpu: True if the operations should be run on GPU
+ Returns:
+ Symbolic tensor value that can be used to execute the computation
+ """
+ total_size_1 = 1
+ total_size_2 = 1
+ for s in tensor_in_sizes:
+ total_size_1 *= s
+ for s in filter_in_sizes:
+ total_size_2 *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
+ x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t2 = tf.constant(x2, shape=filter_in_sizes)
+ conv = tf.nn.conv2d(t1, t2,
+ strides=[1, stride, stride, 1],
+ padding=padding)
+ return conv
+
+ def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes,
+ stride, padding):
+ """Verifies that CPU and GPU produce the same values.
+
+ Args:
+ tensor_in_sizes: Input tensor dimensions in
+ [batch, input_rows, input_cols, input_depth].
+ filter_in_sizes: Filter tensor dimensions in
+ [kernel_rows, kernel_cols, input_depth, output_depth].
+ stride: Stride.
+ padding: Padding type.
+ """
+ x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
+ x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
+ def _SetupVal(use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t2 = tf.constant(x2, shape=filter_in_sizes)
+ conv = tf.nn.conv2d(t1, t2, strides=[1, stride, stride, 1],
+ padding=padding)
+ return conv
+ gpu_tensor = _SetupVal(use_gpu=True)
+ cpu_tensor = _SetupVal(use_gpu=False)
+ with self.test_session() as sess:
+ (gpu_value, cpu_value) = sess.run([gpu_tensor, cpu_tensor])
+ self.assertAllClose(cpu_value, gpu_value, rtol=1e-5, atol=1e-5)
+
+ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride,
+ padding, expected):
+ tensor_cpu = self._SetupValuesForDevice(tensor_in_sizes, filter_in_sizes,
+ stride, padding, use_gpu=False)
+ tensor_gpu = self._SetupValuesForDevice(tensor_in_sizes, filter_in_sizes,
+ stride, padding, use_gpu=True)
+ with self.test_session() as sess:
+ tensors = [tensor_cpu, tensor_gpu]
+ (value_cpu, value_gpu) = sess.run(tensors)
+ values = [value_cpu, value_gpu]
+ for i in range(len(tensors)):
+ conv = tensors[i]
+ value = values[i]
+ print "expected = ", expected
+ print "actual = ", value
+ self.assertArrayNear(expected, np.ravel(value), 1e-5)
+ self.assertShapeEqual(value, conv)
+
+ def testConv2D1x1Filter(self):
+ expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
+ 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
+ 312.0]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[1, 1, 3, 3],
+ stride=1, padding="VALID",
+ expected=expected_output)
+
+ def testConv2D2x2Filter(self):
+ # The outputs are computed using third_party/py/IPython/notebook.
+ expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ stride=1, padding="VALID",
+ expected=expected_output)
+
+ def testConv2D1x2Filter(self):
+ # The outputs are computed using third_party/py/IPython/notebook.
+ expected_output = [231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0,
+ 765.0, 840.0, 843.0, 936.0, 1029.0]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[1, 2, 3, 3],
+ stride=1, padding="VALID",
+ expected=expected_output)
+
+ def testConv2D2x2FilterStride2(self):
+ expected_output = [2271.0, 2367.0, 2463.0]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ stride=2, padding="VALID",
+ expected=expected_output)
+
+ def testConv2D2x2FilterStride2Same(self):
+ expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
+ filter_in_sizes=[2, 2, 3, 3],
+ stride=2, padding="SAME",
+ expected=expected_output)
+
+ # Testing for backprops
+ def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
+ stride, padding, expected, use_gpu):
+ total_output_size = 1
+ total_filter_size = 1
+ for s in output_sizes:
+ total_output_size *= s
+ for s in filter_sizes:
+ total_filter_size *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
+ x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
+ t1 = tf.constant(x1, shape=filter_sizes)
+ t2 = tf.constant(x2, shape=output_sizes)
+ conv = tf.nn.conv2d_backprop_input(t0, t1, t2,
+ strides=[1, stride, stride, 1],
+ padding=padding)
+ # "values" consists of two tensors for two backprops
+ value = sess.run(conv)
+ self.assertShapeEqual(value, conv)
+ print "expected = ", expected
+ print "actual = ", value
+ self.assertArrayNear(expected, value.flatten(), 1e-5)
+
+ def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
+ stride, padding):
+ x1 = np.random.rand(*filter_sizes).astype(np.float32)
+ x2 = np.random.rand(*output_sizes).astype(np.float32)
+ def _GetVal(use_gpu):
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
+ t1 = tf.constant(x1, shape=filter_sizes)
+ t2 = tf.constant(x2, shape=output_sizes)
+ conv = tf.nn.conv2d_backprop_input(t0, t1, t2,
+ strides=[1, stride, stride, 1],
+ padding=padding)
+ ret = conv.eval()
+ self.assertShapeEqual(ret, conv)
+ return ret
+ gpu_value = _GetVal(use_gpu=True)
+ cpu_value = _GetVal(use_gpu=False)
+ self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
+
+ def testConv2D2x2Depth1ValidBackpropInput(self):
+ expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
+ self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ stride=1, padding="VALID",
+ expected=expected_output, use_gpu=False)
+ self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ stride=1, padding="VALID",
+ expected=expected_output, use_gpu=True)
+
+ def testConv2D2x2Depth3ValidBackpropInput(self):
+ expected_output = [14.0, 32.0, 50.0,
+ 100.0, 163.0, 226.0,
+ 167.0, 212.0, 257.0,
+ 122.0, 140.0, 158.0,
+ 478.0, 541.0, 604.0,
+ 437.0, 482.0, 527.0]
+ self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ stride=1, padding="VALID",
+ expected=expected_output, use_gpu=False)
+ self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ stride=1, padding="VALID",
+ expected=expected_output, use_gpu=True)
+
+ # Testing for backprops
+ def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
+ stride, padding, expected, use_gpu):
+ total_input_size = 1
+ total_output_size = 1
+ for s in input_sizes:
+ total_input_size *= s
+ for s in output_sizes:
+ total_output_size *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
+ x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t0 = tf.constant(x0, shape=input_sizes)
+ t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = tf.constant(x2, shape=output_sizes)
+ conv = tf.nn.conv2d_backprop_filter(t0, t1, t2,
+ strides=[1, stride, stride, 1],
+ padding=padding)
+ value = sess.run(conv)
+ self.assertShapeEqual(value, conv)
+ print "expected = ", expected
+ print "actual = ", value
+ self.assertArrayNear(expected, value.flatten(), 1e-5)
+
+ def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
+ stride, padding):
+ x0 = np.random.rand(*input_sizes).astype(np.float32)
+ x2 = np.random.rand(*output_sizes).astype(np.float32)
+ def _GetVal(use_gpu):
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t0 = tf.constant(x0, shape=input_sizes)
+ t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
+ t2 = tf.constant(x2, shape=output_sizes)
+ conv = tf.nn.conv2d_backprop_filter(t0, t1, t2,
+ strides=[1, stride, stride, 1],
+ padding=padding)
+ ret = conv.eval()
+ self.assertShapeEqual(ret, conv)
+ return ret
+ gpu_value = _GetVal(use_gpu=True)
+ cpu_value = _GetVal(use_gpu=False)
+ self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
+
+ def testConv2D2x2Depth1ValidBackpropFilter(self):
+ expected = [5.0, 8.0, 14.0, 17.0]
+ self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ stride=1, padding="VALID",
+ expected=expected, use_gpu=False)
+ self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 1],
+ filter_sizes=[2, 2, 1, 1],
+ output_sizes=[1, 1, 2, 1],
+ stride=1, padding="VALID",
+ expected=expected, use_gpu=True)
+
+ def testConv2D2x2Depth3ValidBackpropFilter(self):
+ expected = [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0,
+ 32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0,
+ 62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0,
+ 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0]
+ self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ stride=1, padding="VALID",
+ expected=expected, use_gpu=False)
+ self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 3],
+ filter_sizes=[2, 2, 3, 3],
+ output_sizes=[1, 1, 2, 3],
+ stride=1, padding="VALID",
+ expected=expected, use_gpu=True)
+
+ # Gradient checkers
+ def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
+ filter_cols, in_depth, out_depth, stride,
+ padding, test_input, use_gpu):
+ input_shape = [batch, input_rows, input_cols, in_depth]
+ filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
+ # TODO(yangke): re-factor the computation of output shape.
+ if padding == "VALID":
+ output_rows = int(math.ceil((input_rows - filter_rows + 1.0) / stride))
+ output_cols = int(math.ceil((input_cols - filter_cols + 1.0) / stride))
+ else:
+ output_rows = int(math.ceil(float(input_rows) / stride))
+ output_cols = int(math.ceil(float(input_cols) / stride))
+ output_shape = [batch, output_rows, output_cols, out_depth]
+ input_size = 1
+ for x in input_shape:
+ input_size *= x
+ filter_size = 1
+ for x in filter_shape:
+ filter_size *= x
+ input_data = [x * 1.0 / input_size for x in range(0, input_size)]
+ filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
+ with self.test_session(use_gpu=use_gpu):
+ # Conv2DGrad functions are not compiled for double due to
+ # a problem in the way Eigen's Conv2DGrad works for double.
+ # So we disable the DOUBLE path. We should re-enable this
+ # when double support returns for CPU and/or GPU.
+ # data_type = tf.float64
+ # tolerance = 1e-8
+
+ data_type = tf.float32
+ tolerance = 0.002
+
+ input_tensor = tf.constant(input_data, shape=input_shape,
+ dtype=data_type, name="input")
+ filter_tensor = tf.constant(filter_data, shape=filter_shape,
+ dtype=data_type, name="filter")
+ conv = tf.nn.conv2d(input_tensor, filter_tensor,
+ [1, stride, stride, 1], padding,
+ name="conv")
+ self.assertEqual(output_shape, conv.get_shape())
+ if test_input:
+ err = gc.ComputeGradientError(input_tensor, input_shape,
+ conv, output_shape)
+ else:
+ err = gc.ComputeGradientError(filter_tensor, filter_shape,
+ conv, output_shape)
+ print "conv_2d gradient error = ", err
+ self.assertLess(err, tolerance)
+
+ def testInputGradientValidPaddingStrideOne(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientValidPaddingStrideOne(self):
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="VALID",
+ test_input=False,
+ use_gpu=True)
+
+ def testInputGradientValidPaddingStrideTwo(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=4,
+ input_cols=5,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=4,
+ input_cols=5,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientValidPaddingStrideTwo(self):
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="VALID",
+ test_input=False,
+ use_gpu=True)
+
+ def testInputGradientValidPaddingStrideThree(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride=3,
+ padding="VALID",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride=3,
+ padding="VALID",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientValidPaddingStrideThree(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="VALID",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="VALID",
+ test_input=False,
+ use_gpu=True)
+
+ def testInputGradientSamePaddingStrideOne(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="SAME",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="SAME",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientSamePaddingStrideOne(self):
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="SAME",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=1,
+ padding="SAME",
+ test_input=False,
+ use_gpu=True)
+
+ def testInputGradientSamePaddingStrideTwo(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=3,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=5,
+ input_cols=4,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=3,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientSamePaddingStrideTwo(self):
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=4,
+ input_rows=6,
+ input_cols=5,
+ filter_rows=2,
+ filter_cols=2,
+ in_depth=2,
+ out_depth=3,
+ stride=2,
+ padding="SAME",
+ test_input=False,
+ use_gpu=True)
+
+ def testInputGradientSamePaddingStrideThree(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride=3,
+ padding="SAME",
+ test_input=True,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=7,
+ input_cols=6,
+ filter_rows=3,
+ filter_cols=3,
+ in_depth=4,
+ out_depth=5,
+ stride=3,
+ padding="SAME",
+ test_input=True,
+ use_gpu=True)
+
+ def testFilterGradientSamePaddingStrideThree(self):
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="SAME",
+ test_input=False,
+ use_gpu=False)
+ self.ConstructAndTestGradient(
+ batch=2,
+ input_rows=8,
+ input_cols=7,
+ filter_rows=4,
+ filter_cols=4,
+ in_depth=2,
+ out_depth=3,
+ stride=3,
+ padding="SAME",
+ test_input=False,
+ use_gpu=True)
+
+ def testShapeFunctionEdgeCases(self):
+ # All shapes unknown.
+ c1 = tf.nn.conv2d(tf.placeholder(tf.float32),
+ tf.placeholder(tf.float32),
+ strides=[1, 1, 1, 1], padding="SAME")
+ self.assertEqual([None, None, None, None], c1.get_shape().as_list())
+
+ # Incorrect input shape.
+ with self.assertRaises(ValueError):
+ tf.nn.conv2d(tf.placeholder(tf.float32, shape=[1, 3]),
+ tf.placeholder(tf.float32),
+ strides=[1, 1, 1, 1], padding="SAME")
+
+ # Incorrect filter shape.
+ with self.assertRaises(ValueError):
+ tf.nn.conv2d(tf.placeholder(tf.float32),
+ tf.placeholder(tf.float32, shape=[1, 3]),
+ strides=[1, 1, 1, 1], padding="SAME")
+
+ # Depth mismatch.
+ with self.assertRaises(ValueError):
+ tf.nn.conv2d(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ tf.placeholder(tf.float32,
+ shape=[4, 4, 2, 2]),
+ strides=[1, 1, 1, 1], padding="SAME")
+
+ # Illegal strides.
+ with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
+ tf.nn.conv2d(tf.placeholder(tf.float32),
+ tf.placeholder(tf.float32),
+ strides=[2, 1, 1, 1], padding="SAME")
+ with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
+ tf.nn.conv2d(tf.placeholder(tf.float32),
+ tf.placeholder(tf.float32),
+ strides=[1, 1, 1, 2], padding="SAME")
+
+ # Filter larger than input.
+ with self.assertRaisesRegexp(ValueError,
+ "filter must not be larger than the input"):
+ tf.nn.conv2d(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ tf.placeholder(tf.float32,
+ shape=[20, 21, 3, 2]),
+ strides=[1, 1, 1, 1], padding="SAME")
+ with self.assertRaisesRegexp(ValueError,
+ "filter must not be larger than the input"):
+ tf.nn.conv2d(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ tf.placeholder(tf.float32,
+ shape=[21, 20, 3, 2]),
+ strides=[1, 1, 1, 1], padding="SAME")
+
+ # Stride larger than filter.
+ with self.assertRaisesRegexp(ValueError,
+ "stride must be less than or equal to filter"):
+ tf.nn.conv2d(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ tf.placeholder(tf.float32,
+ shape=[4, 5, 3, 2]),
+ strides=[1, 5, 5, 1], padding="SAME")
+ with self.assertRaisesRegexp(ValueError,
+ "stride must be less than or equal to filter"):
+ tf.nn.conv2d(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ tf.placeholder(tf.float32,
+ shape=[5, 4, 3, 2]),
+ strides=[1, 5, 5, 1], padding="SAME")
+
+ # Invalid rectangular stride.
+ with self.assertRaisesRegexp(ValueError,
+ "equal length strides in the row and column"):
+ tf.nn.conv2d(tf.placeholder(tf.float32),
+ tf.placeholder(tf.float32),
+ strides=[1, 3, 7, 1], padding="SAME")
+
+
+# This is only a very simple test. More comprehensive tests live in
+# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
+# where we compare the numeric results of the depthwise conv op with the
+# depthwise weighted sum transformer in dist_belief.
+class DepthwiseConv2DTest(tf.test.TestCase):
+
+ def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride,
+ padding, expected):
+ """Verifies the output values of the convolution function.
+
+ Args:
+ tensor_in_sizes: Input tensor dimensions in
+ [batch, input_rows, input_cols, input_depth].
+ filter_in_sizes: Filter tensor dimensions in
+ [filter_rows, filter_cols, input_depth, depth_multiplier].
+ stride: Stride.
+ padding: Padding type.
+ expected: An array containing the expected operation outputs.
+ """
+ total_size_1 = 1
+ total_size_2 = 1
+ for s in tensor_in_sizes:
+ total_size_1 *= s
+ for s in filter_in_sizes:
+ total_size_2 *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
+ x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
+ with self.test_session() as sess:
+ t1 = tf.constant(x1, shape=tensor_in_sizes)
+ t1.set_shape(tensor_in_sizes)
+ t2 = tf.constant(x2, shape=filter_in_sizes)
+ conv = tf.nn.depthwise_conv2d(t1, t2, strides=[1, stride, stride, 1],
+ padding=padding)
+ value = sess.run(conv)
+ print "value = ", value
+ self.assertArrayNear(expected, np.ravel(value), 1e-5)
+ self.assertShapeEqual(value, conv)
+
+ def testConv2D2x2Filter(self):
+ # The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
+ #
+ # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
+ # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
+ # We can view this as two inputs
+ #
+ # input depth 0:
+ #
+ # [ 1.0, 3.0, 5.0 ]
+ # [ 7.0, 9.0, 11.0 ]
+ #
+ # input depth 1:
+ #
+ # [ 2.0, 4.0, 6.0 ]
+ # [ 8.0, 10.0, 12.0 ]
+ #
+ # The filter looks like this (it has two 2 x 2 patches, each generating 2
+ # depths):
+ #
+ # filter #0:
+ #
+ # [ (1.0, 3.0), ( 5.0, 7.0)]
+ # [ (9.0, 11.0), (13.0, 15.0)]
+ #
+ # filter #1:
+ #
+ # [ ( 2.0, 4.0), ( 6.0, 8.0)]
+ # [ (10.0, 12.0), (14.0, 16.0)]
+ #
+ # So the outputs are:
+ #
+ # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
+ # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
+ # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
+ # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
+ # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
+ # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
+ # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
+ # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
+ #
+ # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
+ # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
+ # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
+ # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
+ # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
+ # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
+ # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
+ # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
+ expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
+ self._VerifyValues(tensor_in_sizes=[1, 2, 3, 2],
+ filter_in_sizes=[2, 2, 2, 2],
+ stride=1, padding="VALID",
+ expected=expected_output)
+
+
+class SeparableConv2DTest(tf.test.TestCase):
+
+ def _InitValues(self, sizes):
+ """Initializes values for input tensors.
+
+ Args:
+ sizes: Tensor dimensions.
+
+ Returns:
+ Tensor initialized to values.
+ """
+ total_size = 1
+ for s in sizes:
+ total_size *= s
+ x = [f * 0.5 for f in range(1, total_size + 1)]
+ return tf.constant(x, shape=sizes)
+
+ def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes,
+ pointwise_filter_in_sizes, stride, padding, expected):
+ """Verifies the output values of the separable convolution function.
+
+ Args:
+ tensor_in_sizes: Input tensor dimensions.
+ depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
+ pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
+ stride: Stride.
+ padding: Padding type.
+ expected: An array containing the expected operation outputs.
+ """
+ with self.test_session() as sess:
+ t1 = self._InitValues(tensor_in_sizes)
+ f1 = self._InitValues(depthwise_filter_in_sizes)
+ f1.set_shape(depthwise_filter_in_sizes)
+ f2 = self._InitValues(pointwise_filter_in_sizes)
+ conv = tf.nn.separable_conv2d(t1, f1, f2, strides=[1, stride, stride, 1],
+ padding=padding)
+ value = sess.run(conv)
+ print "value = ", value
+ self.assertArrayNear(expected, np.ravel(value), 1e-5)
+ self.assertShapeEqual(value, conv)
+
+ def testSeparableConv2D(self):
+ # The output is the result of two convolutions:
+ # First with tensor_in[1, 4, 4, 3] * filter1[2, 2, 3, 3].
+ # Second with intermediate_out[4, 4, 3, 3] * filter2[1, 1, 3, 6].
+ # Complexity is O(3*3*2*2 + 3*6*1*1] as opposed to O(3*6*2*2).
+ expected_output = [
+ 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
+ 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
+ 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
+ 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
+ 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
+ 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
+ 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
+ 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
+ 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
+ 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
+ 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
+ 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
+ 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
+ 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75]
+
+ self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
+ depthwise_filter_in_sizes=[2, 2, 2, 3],
+ pointwise_filter_in_sizes=[1, 1, 6, 7],
+ stride=1, padding="SAME",
+ expected=expected_output)
+
+
+def GetInceptionFwdTest(input_size, filter_size, stride, padding):
+ def Test(self):
+ tf.logging.info("Testing InceptionFwd %s", (input_size, filter_size,
+ stride, padding))
+ self._CompareFwdValues(input_size, filter_size, stride, padding)
+ return Test
+
+
+def GetInceptionBackInputTest(input_size, filter_size, output_size,
+ stride, padding):
+ def Test(self):
+ tf.logging.info("Testing InceptionBackInput %s",
+ (input_size, filter_size, output_size, stride, padding))
+ self._CompareBackpropInput(input_size, filter_size, output_size,
+ stride, padding)
+ return Test
+
+
+def GetInceptionBackFilterTest(input_size, filter_size, output_size,
+ stride, padding):
+ def Test(self):
+ tf.logging.info("Testing InceptionBackFilter %s",
+ (input_size, filter_size, output_size, stride, padding))
+ self._CompareBackFilter(input_size, filter_size, output_size,
+ stride, padding)
+ return Test
+
+
+if __name__ == "__main__":
+ for index, (input_size_, filter_size_, output_size_, stride_,
+ padding_) in enumerate(GetInceptionShapes()):
+ setattr(Conv2DTest, "testInceptionFwd_" + str(index),
+ GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
+ setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
+ GetInceptionBackInputTest(input_size_, filter_size_, output_size_,
+ stride_, padding_))
+ setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
+ GetInceptionBackFilterTest(input_size_, filter_size_, output_size_,
+ stride_, padding_))
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
new file mode 100644
index 0000000000..22491f231a
--- /dev/null
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -0,0 +1,1187 @@
+"""Functional tests for coefficient-wise operations.
+"""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+_ADD = lambda x, y: x + y
+_SUB = lambda x, y: x - y
+_MUL = lambda x, y: x * y
+_DIV = lambda x, y: x / y
+_MOD = lambda x, y: x % y
+_NEG = lambda x: -x
+_ABS = abs
+
+_LT = lambda x, y: x < y
+_LE = lambda x, y: x <= y
+_GT = lambda x, y: x > y
+_GE = lambda x, y: x >= y
+
+_AND = lambda x, y: x & y
+_OR = lambda x, y: x | y
+_XOR = lambda x, y: x ^ y
+_INV = lambda x: ~x
+
+
+class UnaryOpTest(tf.test.TestCase):
+
+ def _compareCpu(self, x, np_func, tf_func):
+ np_ans = np_func(x)
+ with self.test_session(use_gpu=False):
+ inx = tf.convert_to_tensor(x)
+ y = tf_func(inx)
+ tf_cpu = y.eval()
+ self.assertShapeEqual(np_ans, y)
+ self.assertAllClose(np_ans, tf_cpu)
+ if x.dtype == np.float32:
+ s = list(np.shape(x))
+ jacob_t, jacob_n = gc.ComputeGradient(inx, s, y, s, x_init_value=x)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ s = list(np.shape(x))
+ jacob_t, jacob_n = gc.ComputeGradient(inx, s, y, s, x_init_value=x)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _compareGpu(self, x, np_func, tf_func):
+ np_ans = np_func(x)
+ with self.test_session(use_gpu=True):
+ result = tf_func(tf.convert_to_tensor(x))
+ tf_gpu = result.eval()
+ self.assertShapeEqual(np_ans, result)
+ self.assertAllClose(np_ans, tf_gpu)
+ # TODO(zhifengc/ke): make gradient checker work on GPU.
+
+ def _compareBoth(self, x, np_func, tf_func):
+ self._compareCpu(x, np_func, tf_func)
+ self._compareGpu(x, np_func, tf_func)
+
+ def _inv(self, x):
+ return 1.0 / x
+
+ def _rsqrt(self, x):
+ return self._inv(np.sqrt(x))
+
+ def _sigmoid(self, x):
+ return 1.0 / (1.0 + np.exp(-x))
+
+ def testFloatBasic(self):
+ x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
+ y = (x + .5).astype(np.float32) # no zero
+ z = (x + 15.5).astype(np.float32) # all positive
+ self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, _ABS)
+ self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, _NEG)
+ self._compareBoth(y, self._inv, tf.inv)
+ self._compareBoth(x, np.square, tf.square)
+ self._compareBoth(z, np.sqrt, tf.sqrt)
+ self._compareBoth(z, self._rsqrt, tf.rsqrt)
+ self._compareBoth(x, np.exp, tf.exp)
+ self._compareBoth(z, np.log, tf.log)
+ self._compareBoth(x, np.tanh, tf.tanh)
+ self._compareBoth(x, self._sigmoid, tf.sigmoid)
+ self._compareBoth(y, np.sign, tf.sign)
+ self._compareBoth(x, np.sin, tf.sin)
+ self._compareBoth(x, np.cos, tf.cos)
+
+ def testFloatTanhEdge(self):
+ x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
+ self._compareBoth(x, np.tanh, tf.tanh)
+ x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
+ self._compareBoth(x, np.tanh, tf.tanh)
+
+ def testFloatEmpty(self):
+ x = np.empty((2, 0, 5), dtype=np.float32)
+ self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, _ABS)
+ self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, _NEG)
+ self._compareBoth(x, self._inv, tf.inv)
+ self._compareBoth(x, np.square, tf.square)
+ self._compareBoth(x, np.sqrt, tf.sqrt)
+ self._compareBoth(x, self._rsqrt, tf.rsqrt)
+ self._compareBoth(x, np.exp, tf.exp)
+ self._compareBoth(x, np.log, tf.log)
+ self._compareBoth(x, np.tanh, tf.tanh)
+ self._compareBoth(x, self._sigmoid, tf.sigmoid)
+ self._compareBoth(x, np.sign, tf.sign)
+ self._compareBoth(x, np.sin, tf.sin)
+ self._compareBoth(x, np.cos, tf.cos)
+
+ def testDoubleBasic(self):
+ x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
+ y = (x + .5).astype(np.float64) # no zero
+ z = (x + 15.5).astype(np.float64) # all positive
+ self._compareBoth(x, np.abs, tf.abs)
+ self._compareBoth(x, np.abs, _ABS)
+ self._compareBoth(x, np.negative, tf.neg)
+ self._compareBoth(x, np.negative, _NEG)
+ self._compareBoth(y, self._inv, tf.inv)
+ self._compareBoth(x, np.square, tf.square)
+ self._compareBoth(z, np.sqrt, tf.sqrt)
+ self._compareBoth(z, self._rsqrt, tf.rsqrt)
+ self._compareBoth(x, np.exp, tf.exp)
+ self._compareBoth(z, np.log, tf.log)
+ self._compareBoth(x, np.tanh, tf.tanh)
+ self._compareBoth(x, self._sigmoid, tf.sigmoid)
+ self._compareBoth(y, np.sign, tf.sign)
+ self._compareBoth(x, np.sin, tf.sin)
+ self._compareBoth(x, np.cos, tf.cos)
+
+ def testInt32Basic(self):
+ x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
+ self._compareCpu(x, np.abs, tf.abs)
+ self._compareCpu(x, np.abs, _ABS)
+ self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, _NEG)
+ self._compareCpu(x, np.square, tf.square)
+ self._compareCpu(x, np.sign, tf.sign)
+
+ def testInt64Basic(self):
+ x = np.arange(
+ -6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
+ self._compareCpu(x, np.abs, tf.abs)
+ self._compareCpu(x, np.abs, _ABS)
+ self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, _NEG)
+ self._compareCpu(x, np.square, tf.square)
+ self._compareCpu(x, np.sign, tf.sign)
+
+ def testComplex64Basic(self):
+ x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
+ np.complex64)
+ y = x + 0.5 # no zeros
+ self._compareCpu(x, np.abs, tf.abs)
+ self._compareCpu(x, np.abs, _ABS)
+ self._compareCpu(x, np.negative, tf.neg)
+ self._compareCpu(x, np.negative, _NEG)
+ self._compareCpu(y, self._inv, tf.inv)
+ self._compareCpu(x, np.square, tf.square)
+ self._compareCpu(x, np.sqrt, tf.sqrt)
+ self._compareCpu(y, self._rsqrt, tf.rsqrt)
+ self._compareCpu(x, np.exp, tf.exp)
+ self._compareCpu(y, np.log, tf.log)
+ self._compareCpu(x, np.tanh, tf.tanh)
+ self._compareCpu(x, self._sigmoid, tf.sigmoid)
+ self._compareCpu(x, np.sin, tf.sin)
+ self._compareCpu(x, np.cos, tf.cos)
+
+
+class BinaryOpTest(tf.test.TestCase):
+
+ def _compareCpu(self, x, y, np_func, tf_func):
+ np_ans = np_func(x, y)
+ with self.test_session(use_gpu=False):
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf_func(inx, iny)
+ tf_cpu = out.eval()
+ # Test that the op takes precedence over numpy operators.
+ np_left = tf_func(x, iny).eval()
+ np_right = tf_func(inx, y).eval()
+
+ self.assertAllClose(np_ans, tf_cpu)
+ self.assertAllClose(np_ans, np_left)
+ self.assertAllClose(np_ans, np_right)
+ self.assertShapeEqual(np_ans, out)
+
+ def _compareGradientX(self, x, y, np_func, tf_func):
+ z = np_func(x, y)
+ zs = list(z.shape)
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf_func(inx, iny)
+ xs = list(x.shape)
+ jacob_t, jacob_n = gc.ComputeGradient(inx, xs, out, zs, x_init_value=x)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _compareGradientY(self, x, y, np_func, tf_func):
+ z = np_func(x, y)
+ zs = list(z.shape)
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf_func(inx, iny)
+ ys = list(np.shape(y))
+ jacob_t, jacob_n = gc.ComputeGradient(iny, ys, out, zs, x_init_value=y)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _compareGpu(self, x, y, np_func, tf_func):
+ np_ans = np_func(x, y)
+ with self.test_session(use_gpu=True):
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf_func(inx, iny)
+ tf_gpu = out.eval()
+ self.assertAllClose(np_ans, tf_gpu)
+ self.assertShapeEqual(np_ans, out)
+ # TODO(zhifengc/ke): make gradient checker work on GPU.
+
+ def _compareBoth(self, x, y, np_func, tf_func):
+ self._compareCpu(x, y, np_func, tf_func)
+ if x.dtype == np.float32 or x.dtype == np.float64:
+ self._compareGradientX(x, y, np_func, tf_func)
+ self._compareGradientY(x, y, np_func, tf_func)
+ self._compareGpu(x, y, np_func, tf_func)
+
+ def testFloatBasic(self):
+ x = np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(np.float32)
+ y = np.linspace(20, -20, 6).reshape(1, 3, 2).astype(np.float32)
+ self._compareBoth(x, y, np.add, tf.add)
+ self._compareBoth(x, y, np.subtract, tf.sub)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y + 0.1, np.divide, tf.div)
+ self._compareBoth(x, y, np.add, _ADD)
+ self._compareBoth(x, y, np.subtract, _SUB)
+ self._compareBoth(x, y, np.multiply, _MUL)
+ self._compareBoth(x, y + 0.1, np.divide, _DIV)
+
+ def testFloatDifferentShapes(self):
+ x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
+ y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
+ with self.test_session() as sess:
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ s = tf.reduce_sum(inx * iny)
+ gx, gy = sess.run(tf.gradients(s, [inx, iny]))
+ # gx is simply the broadcasted y
+ self.assertAllEqual(gx, np.array([1, 1, 2, 2])
+ .reshape(2, 2).astype(np.float32))
+ # gy is x's column summed up
+ self.assertAllEqual(gy, np.array([3, 7]).
+ reshape(2, 1).astype(np.float32))
+
+ def testDoubleBasic(self):
+ x = np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(np.float64)
+ y = np.linspace(20, -20, 6).reshape(1, 3, 2).astype(np.float64)
+ self._compareBoth(x, y, np.add, tf.add)
+ self._compareBoth(x, y, np.subtract, tf.sub)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y + 0.1, np.divide, tf.div)
+ self._compareBoth(x, y, np.add, _ADD)
+ self._compareBoth(x, y, np.subtract, _SUB)
+ self._compareBoth(x, y, np.multiply, _MUL)
+ self._compareBoth(x, y + 0.1, np.divide, _DIV)
+
+ def testInt8Basic(self):
+ x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
+ y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y, np.multiply, _MUL)
+
+ def testInt16Basic(self):
+ x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
+ y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ self._compareBoth(x, y, np.multiply, _MUL)
+
+ def testInt32Basic(self):
+ x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
+ y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
+ self._compareBoth(x, y, np.add, tf.add)
+ self._compareBoth(x, y, np.subtract, tf.sub)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ # NOTE: int32 division is ill-defined.
+ self._compareBoth(x, y, np.divide, tf.div)
+ self._compareBoth(x, y, np.mod, tf.mod)
+ self._compareBoth(x, y, np.add, _ADD)
+ self._compareBoth(x, y, np.subtract, _SUB)
+ self._compareBoth(x, y, np.multiply, _MUL)
+ # NOTE: int32 division is ill-defined.
+ self._compareBoth(x, y, np.divide, _DIV)
+ self._compareBoth(x, y, np.mod, _MOD)
+
+ def testInt64Basic(self):
+ x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
+ y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
+ self._compareBoth(x, y, np.subtract, tf.sub)
+ self._compareBoth(x, y, np.multiply, tf.mul)
+ # NOTE: int64 division is ill-defined.
+ self._compareBoth(x, y, np.divide, tf.div)
+ self._compareBoth(x, y, np.mod, tf.mod)
+ self._compareBoth(x, y, np.subtract, _SUB)
+ self._compareBoth(x, y, np.multiply, _MUL)
+ # NOTE: int64 division is ill-defined.
+ self._compareBoth(x, y, np.divide, _DIV)
+ self._compareBoth(x, y, np.mod, _MOD)
+
+ def testComplex64Basic(self):
+ x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
+ np.complex64)
+ y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
+ np.complex64)
+ self._compareCpu(x, y, np.add, tf.add)
+ self._compareCpu(x, y, np.subtract, tf.sub)
+ self._compareCpu(x, y, np.multiply, tf.mul)
+ self._compareCpu(x, y + 0.1, np.divide, tf.div)
+ self._compareCpu(x, y, np.add, _ADD)
+ self._compareCpu(x, y, np.subtract, _SUB)
+ self._compareCpu(x, y, np.multiply, _MUL)
+ self._compareCpu(x, y + 0.1, np.divide, _DIV)
+
+ def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
+ x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
+ y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
+ self._compareCpu(x, y, np_func, tf_func)
+ if x.dtype == np.float32 or x.dtype == np.float64:
+ self._compareGradientX(x, y, np_func, tf_func)
+ self._compareGradientY(x, y, np_func, tf_func)
+ self._compareGpu(x, y, np_func, tf_func)
+
+ # TODO(josh11b,vrv): Refactor this to use parameterized tests.
+ def _testBCastByFunc(self, funcs, xs, ys):
+ dtypes = [
+ np.float32,
+ np.float64,
+ np.int32,
+ np.int64,
+ np.complex64
+ ]
+ for dtype in dtypes:
+ for (np_func, tf_func) in funcs:
+ self._compareBCast(xs, ys, dtype, np_func, tf_func)
+ self._compareBCast(ys, xs, dtype, np_func, tf_func)
+
+ def _testBCastA(self, xs, ys):
+ funcs = [
+ (np.add, tf.add),
+ (np.add, _ADD),
+ ]
+ self._testBCastByFunc(funcs, xs, ys)
+
+ def _testBCastB(self, xs, ys):
+ funcs = [
+ (np.subtract, tf.sub),
+ (np.subtract, _SUB),
+ (np.power, tf.pow),
+ ]
+ self._testBCastByFunc(funcs, xs, ys)
+
+ def _testBCastC(self, xs, ys):
+ funcs = [
+ (np.multiply, tf.mul),
+ (np.multiply, _MUL),
+ ]
+ self._testBCastByFunc(funcs, xs, ys)
+
+ def _testBCastD(self, xs, ys):
+ funcs = [
+ (np.divide, tf.div),
+ (np.divide, _DIV)
+ ]
+ self._testBCastByFunc(funcs, xs, ys)
+
+ def testBCast_0A(self):
+ self._testBCastA([1, 3, 2], [1])
+
+ def testBCast_0B(self):
+ self._testBCastB([1, 3, 2], [1])
+
+ def testBCast_0C(self):
+ self._testBCastC([1, 3, 2], [1])
+
+ def testBCast_0D(self):
+ self._testBCastD([1, 3, 2], [1])
+
+ def testBCast_1A(self):
+ self._testBCastA([1, 3, 2], [2])
+
+ def testBCast_1B(self):
+ self._testBCastB([1, 3, 2], [2])
+
+ def testBCast_1C(self):
+ self._testBCastC([1, 3, 2], [2])
+
+ def testBCast_1D(self):
+ self._testBCastD([1, 3, 2], [2])
+
+ def testBCast_2A(self):
+ self._testBCastA([1, 3, 2], [3, 2])
+
+ def testBCast_2B(self):
+ self._testBCastB([1, 3, 2], [3, 2])
+
+ def testBCast_2C(self):
+ self._testBCastC([1, 3, 2], [3, 2])
+
+ def testBCast_2D(self):
+ self._testBCastD([1, 3, 2], [3, 2])
+
+ def testBCast_3A(self):
+ self._testBCastA([1, 3, 2], [3, 1])
+
+ def testBCast_3B(self):
+ self._testBCastB([1, 3, 2], [3, 1])
+
+ def testBCast_3C(self):
+ self._testBCastC([1, 3, 2], [3, 1])
+
+ def testBCast_3D(self):
+ self._testBCastD([1, 3, 2], [3, 1])
+
+ def testBCast_4A(self):
+ self._testBCastA([1, 3, 2], [1, 3, 2])
+
+ def testBCast_4B(self):
+ self._testBCastB([1, 3, 2], [1, 3, 2])
+
+ def testBCast_4C(self):
+ self._testBCastC([1, 3, 2], [1, 3, 2])
+
+ def testBCast_4D(self):
+ self._testBCastD([1, 3, 2], [1, 3, 2])
+
+ def testBCast_5A(self):
+ self._testBCastA([1, 3, 2], [2, 3, 1])
+
+ def testBCast_5B(self):
+ self._testBCastB([1, 3, 2], [2, 3, 1])
+
+ def testBCast_5C(self):
+ self._testBCastC([1, 3, 2], [2, 3, 1])
+
+ def testBCast_5D(self):
+ self._testBCastD([1, 3, 2], [2, 3, 1])
+
+ def testBCast_6A(self):
+ self._testBCastA([1, 3, 2], [2, 1, 1])
+
+ def testBCast_6B(self):
+ self._testBCastB([1, 3, 2], [2, 1, 1])
+
+ def testBCast_6C(self):
+ self._testBCastC([1, 3, 2], [2, 1, 1])
+
+ def testBCast_6D(self):
+ self._testBCastD([1, 3, 2], [2, 1, 1])
+
+ def testBCast_7A(self):
+ self._testBCastA([1, 3, 2], [1, 3, 1])
+
+ def testBCast_7B(self):
+ self._testBCastB([1, 3, 2], [1, 3, 1])
+
+ def testBCast_7C(self):
+ self._testBCastC([1, 3, 2], [1, 3, 1])
+
+ def testBCast_7D(self):
+ self._testBCastD([1, 3, 2], [1, 3, 1])
+
+ def testBCast_8A(self):
+ self._testBCastA([2, 1, 5], [2, 3, 1])
+
+ def testBCast_8B(self):
+ self._testBCastB([2, 1, 5], [2, 3, 1])
+
+ def testBCast_8C(self):
+ self._testBCastC([2, 1, 5], [2, 3, 1])
+
+ def testBCast_8D(self):
+ self._testBCastD([2, 1, 5], [2, 3, 1])
+
+ def testBCast_9A(self):
+ self._testBCastA([2, 0, 5], [2, 0, 1])
+
+ def testBCast_9B(self):
+ self._testBCastB([2, 0, 5], [2, 0, 1])
+
+ def testBCast_9C(self):
+ self._testBCastC([2, 0, 5], [2, 0, 1])
+
+ def testBCast_9D(self):
+ self._testBCastD([2, 0, 5], [2, 0, 1])
+
+ def testBCast_10A(self):
+ self._testBCastA([2, 3, 0], [2, 3, 1])
+
+ def testBCast_10B(self):
+ self._testBCastB([2, 3, 0], [2, 3, 1])
+
+ def testBCast_10C(self):
+ self._testBCastC([2, 3, 0], [2, 3, 1])
+
+ def testBCast_10D(self):
+ self._testBCastD([2, 3, 0], [2, 3, 1])
+
+ def testBCast_11A(self):
+ self._testBCastA([1, 3, 2], [1, 3, 2])
+
+ def testBCast_11B(self):
+ self._testBCastB([1, 3, 2], [1, 3, 2])
+
+ def testBCast_11C(self):
+ self._testBCastC([1, 3, 2], [1, 3, 2])
+
+ def testBCast_11D(self):
+ self._testBCastD([1, 3, 2], [1, 3, 2])
+
+ def testBCast_12A(self):
+ self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
+
+ def testBCast_12B(self):
+ self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
+
+ def testBCast_12C(self):
+ self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
+
+ def testBCast_12D(self):
+ self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
+
+ def testBCast_13A(self):
+ self._testBCastA([1, 3, 2, 1, 1], [1])
+
+ def testBCast_13B(self):
+ self._testBCastB([1, 3, 2, 1, 1], [1])
+
+ def testBCast_13C(self):
+ self._testBCastC([1, 3, 2, 1, 1], [1])
+
+ def testBCast_13D(self):
+ self._testBCastD([1, 3, 2, 1, 1], [1])
+
+ def testBCast_14A(self):
+ self._testBCastA([2, 3, 1, 1, 5], [1])
+
+ def testBCast_14B(self):
+ self._testBCastB([2, 3, 1, 1, 5], [1])
+
+ def testBCast_14C(self):
+ self._testBCastC([2, 3, 1, 1, 5], [1])
+
+ def testBCast_14D(self):
+ self._testBCastD([2, 3, 1, 1, 5], [1])
+
+ def testBCast_15A(self):
+ self._testBCastA([10, 3, 1, 2], [3, 1, 2])
+
+ def testBCast_15B(self):
+ self._testBCastB([10, 3, 1, 2], [3, 1, 2])
+
+ def testBCast_15C(self):
+ self._testBCastC([10, 3, 1, 2], [3, 1, 2])
+
+ def testBCast_15D(self):
+ self._testBCastD([10, 3, 1, 2], [3, 1, 2])
+
+ def testMismatchedDimensions(self):
+ for func in [tf.add, tf.sub, tf.mul, tf.div,
+ _ADD, _SUB, _MUL, _DIV]:
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Incompatible shapes" in e.message):
+ func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
+ tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
+
+
+class ComparisonOpTest(tf.test.TestCase):
+
+ def _compare(self, func, x, y, dtype):
+ with self.test_session(use_gpu=False):
+ out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
+ tf.convert_to_tensor(np.array([y]).astype(dtype)))
+ ret = out.eval()
+ return ret[0]
+
+ def testScalarCompareScalar(self):
+ dtypes = [np.float32, np.float64, np.int32, np.int64]
+ data = [-1, 0, 1]
+ for t in dtypes:
+ for x in data:
+ for y in data:
+ self.assertEqual(self._compare(tf.less, x, y, t),
+ x < y)
+ self.assertEqual(self._compare(tf.less_equal, x, y, t),
+ x <= y)
+ self.assertEqual(self._compare(tf.greater, x, y, t),
+ x > y)
+ self.assertEqual(self._compare(tf.greater_equal, x, y, t),
+ x >= y)
+ self.assertEqual(self._compare(tf.equal, x, y, t),
+ x == y)
+ self.assertEqual(self._compare(tf.not_equal, x, y, t),
+ x != y)
+
+ def _compareCpu(self, x, y, np_func, tf_func):
+ np_ans = np_func(x, y)
+ with self.test_session(use_gpu=False):
+ out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
+ tf_cpu = out.eval()
+ self.assertAllEqual(np_ans, tf_cpu)
+
+ def _compareGpu(self, x, y, np_func, tf_func):
+ np_ans = np_func(x, y)
+ with self.test_session(use_gpu=True):
+ out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
+ tf_gpu = out.eval()
+ self.assertAllEqual(np_ans, tf_gpu)
+
+ def _compareBoth(self, x, y, np_func, tf_func):
+ self._compareCpu(x, y, np_func, tf_func)
+ if x.dtype == np.float32 or x.dtype == np.float64:
+ self._compareGpu(x, y, np_func, tf_func)
+
+ def testTensorCompareTensor(self):
+ x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
+ y = np.linspace(20, -10, 6).reshape(1, 3, 2)
+ for t in [np.float32, np.float64, np.int32, np.int64]:
+ xt = x.astype(t)
+ yt = y.astype(t)
+ self._compareBoth(xt, yt, np.less, tf.less)
+ self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
+ self._compareBoth(xt, yt, np.greater, tf.greater)
+ self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
+ self._compareBoth(xt, yt, np.equal, tf.equal)
+ self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
+ # TODO(zhifengc): complex64 doesn't work on GPU yet.
+ self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),
+ np.equal, tf.equal)
+ self._compareCpu(x.astype(np.complex64), y.astype(np.complex64),
+ np.not_equal, tf.not_equal)
+
+ def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
+ x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
+ y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
+ self._compareCpu(x, y, np_func, tf_func)
+ self._compareCpu(y, x, np_func, tf_func)
+ if x.dtype == np.float32 or x.dtype == np.float64:
+ self._compareGpu(x, y, np_func, tf_func)
+ self._compareGpu(y, x, np_func, tf_func)
+
+ def _testBCastByFunc(self, np_func, tf_func):
+ shapes = [
+ ([1, 3, 2], [1]),
+ ([1, 3, 2], [2]),
+ ([1, 3, 2], [3, 2]),
+ ([1, 3, 2], [3, 1]),
+ ([1, 3, 2], [1, 3, 2]),
+ ([1, 3, 2], [2, 3, 1]),
+ ([1, 3, 2], [2, 1, 1]),
+ ([1, 3, 2], [1, 3, 1]),
+ ([2, 1, 5], [2, 3, 1]),
+ ([2, 0, 5], [2, 0, 1]),
+ ([2, 3, 0], [2, 3, 1]),
+ ]
+ dtypes = [
+ np.float32,
+ np.float64,
+ np.int32,
+ np.int64,
+ ]
+ for (xs, ys) in shapes:
+ for dtype in dtypes:
+ self._compareBCast(xs, ys, dtype, np_func, tf_func)
+
+ def testBCastLess(self):
+ self._testBCastByFunc(np.less, tf.less)
+
+ def testBCastLessEqual(self):
+ self._testBCastByFunc(np.less_equal, tf.less_equal)
+
+ def testBCastGreater(self):
+ self._testBCastByFunc(np.greater, tf.greater)
+
+ def testBCastGreaterEqual(self):
+ self._testBCastByFunc(np.greater_equal, tf.greater_equal)
+
+ def testBCastEqual(self):
+ self._testBCastByFunc(np.equal, tf.equal)
+
+ def testBCastNotEqual(self):
+ self._testBCastByFunc(np.not_equal, tf.not_equal)
+
+ def testShapeMismatch(self):
+ dtypes = [np.float32, np.float64, np.int32, np.int64]
+ funcs = [tf.less, tf.less_equal, tf.greater,
+ tf.greater_equal, tf.equal, tf.not_equal]
+ x = np.arange(0, 10).reshape([2, 5])
+ y = np.arange(0, 10).reshape([5, 2])
+ for t in dtypes:
+ for f in funcs:
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Incompatible shapes" in e.message):
+ f(x.astype(t), y.astype(t))
+
+
+class LogicalOpTest(tf.test.TestCase):
+
+ def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
+ np_ans = np_func(x, y)
+ with self.test_session(use_gpu=use_gpu):
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf_func(inx, iny)
+ tf_val = out.eval()
+ self.assertEqual(out.dtype, tf.bool)
+ self.assertAllEqual(np_ans, tf_val)
+ self.assertShapeEqual(np_ans, out)
+
+ def _not(self, x, use_gpu=False):
+ np_ans = np.logical_not(x)
+ with self.test_session(use_gpu=use_gpu):
+ out = tf.logical_not(tf.convert_to_tensor(x))
+ tf_val = out.eval()
+ self.assertEqual(out.dtype, tf.bool)
+ self.assertAllEqual(np_ans, tf_val)
+ self.assertShapeEqual(np_ans, out)
+
+ def testScalar(self):
+ data = [np.array([True]), np.array([False])]
+ for use_gpu in [True, False]:
+ for x in data:
+ self._not(x, use_gpu)
+ for x in data:
+ for y in data:
+ self._compareBinary(
+ x, y, np.logical_and, tf.logical_and, use_gpu)
+ self._compareBinary(
+ x, y, np.logical_or, tf.logical_or, use_gpu)
+ self._compareBinary(
+ x, y, np.logical_xor, tf.logical_xor, use_gpu)
+
+ def testTensor(self):
+ x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ for use_gpu in [True, False]:
+ self._not(x, use_gpu)
+ self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
+ self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
+ self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
+
+ def testBCast(self):
+ shapes = [
+ ([1, 3, 2], [1]),
+ ([1, 3, 2], [2]),
+ ([1, 3, 2], [3, 2]),
+ ([1, 3, 2], [3, 1]),
+ ([1, 3, 2], [1, 3, 2]),
+ ([1, 3, 2], [2, 3, 1]),
+ ([1, 3, 2], [2, 1, 1]),
+ ([1, 3, 2], [1, 3, 1]),
+ ([2, 1, 5], [2, 3, 1]),
+ ([2, 0, 5], [2, 0, 1]),
+ ([2, 3, 0], [2, 3, 1]),
+ ]
+ for (xs, ys) in shapes:
+ x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
+ y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
+ for use_gpu in [True, False]:
+ self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
+ self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
+ self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
+
+ def testShapeMismatch(self):
+ x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
+ for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Incompatible shapes" in e.message):
+ f(x, y)
+
+
+class SelectOpTest(tf.test.TestCase):
+
+ def _compare(self, c, x, y, use_gpu):
+ np_ans = np.where(c, x, y)
+ with self.test_session(use_gpu=use_gpu):
+ out = tf.select(c, x, y)
+ tf_ans = out.eval()
+ self.assertAllEqual(np_ans, tf_ans)
+ self.assertShapeEqual(np_ans, out)
+
+ def _compareGradientX(self, c, x, y):
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf.select(c, inx, iny)
+ s = list(np.shape(c))
+ jacob_t, jacob_n = gc.ComputeGradient(inx, s, out, s, x_init_value=x)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _compareGradientY(self, c, x, y):
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = tf.select(c, inx, iny)
+ s = list(np.shape(c))
+ jacob_t, jacob_n = gc.ComputeGradient(iny, s, out, s, x_init_value=y)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def testBasic(self):
+ c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ x = np.random.rand(1, 3, 2) * 100
+ y = np.random.rand(1, 3, 2) * 100
+ for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
+ xt = x.astype(t)
+ yt = y.astype(t)
+ self._compare(c, xt, yt, use_gpu=False)
+ if t in [np.float32, np.float64]:
+ self._compare(c, xt, yt, use_gpu=True)
+
+ def testGradients(self):
+ c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ x = np.random.rand(1, 3, 2) * 100
+ y = np.random.rand(1, 3, 2) * 100
+ for t in [np.float32, np.float64]:
+ xt = x.astype(t)
+ yt = y.astype(t)
+ self._compareGradientX(c, xt, yt)
+ self._compareGradientY(c, xt, yt)
+
+ def testShapeMismatch(self):
+ c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
+ x = np.random.rand(1, 3, 2) * 100
+ y = np.random.rand(2, 5, 3) * 100
+ for t in [np.float32, np.float64, np.int32, np.int64, np.complex64]:
+ xt = x.astype(t)
+ yt = y.astype(t)
+ with self.assertRaises(ValueError):
+ tf.select(c, xt, yt)
+
+
+class MinMaxOpTest(tf.test.TestCase):
+
+ def _compare(self, x, y, use_gpu):
+ np_min, np_max = np.minimum(x, y), np.maximum(x, y)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
+ tf_min, tf_max = sess.run([omin, omax])
+ self.assertAllEqual(np_min, tf_min)
+ self.assertAllEqual(np_max, tf_max)
+
+ def testBasic(self):
+ x = np.random.rand(1, 3, 2) * 100.
+ y = np.random.rand(1, 3, 2) * 100.
+ for t in [np.float32, np.float64, np.int32, np.int64]:
+ self._compare(x.astype(t), y.astype(t), use_gpu=False)
+ self._compare(x.astype(t), y.astype(t), use_gpu=True)
+
+ def testDifferentShapes(self):
+ x = np.random.rand(1, 3, 2) * 100.
+ y = np.random.rand(2) * 100. # should broadcast
+ for t in [np.float32, np.float64, np.int32, np.int64]:
+ self._compare(x.astype(t), y.astype(t), use_gpu=False)
+ self._compare(x.astype(t), y.astype(t), use_gpu=True)
+
+ def testScalar(self):
+ x = np.random.rand(1, 3, 2) * 100.
+ y = np.asscalar(np.random.rand(1) * 100.) # should broadcast
+ # dropped np.float64, int64 because TF automatically converts to 32 bit
+ for t in [np.float32, np.int32]:
+ self._compare(x.astype(t), t(y), use_gpu=False)
+ self._compare(x.astype(t), t(y), use_gpu=True)
+
+ def _compareGradientX(self, func, x, y):
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = func(inx, iny)
+ s = list(np.shape(x))
+ jacob_t, jacob_n = gc.ComputeGradient(inx, s, out, s, x_init_value=x)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _compareGradientY(self, func, x, y):
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ iny = tf.convert_to_tensor(y)
+ out = func(inx, iny)
+ s = list(np.shape(x))
+ jacob_t, jacob_n = gc.ComputeGradient(iny, s, out, s, x_init_value=y)
+ if x.dtype == np.float32:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def testGradients(self):
+ x = np.random.rand(1, 3, 2) * 100.
+ # ensure x != y
+ y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
+ self._compareGradientX(tf.maximum, x, y)
+ self._compareGradientY(tf.maximum, x, y)
+ self._compareGradientX(tf.minimum, x, y)
+ self._compareGradientY(tf.minimum, x, y)
+
+
+class MathOpsOverloadTest(tf.test.TestCase):
+
+ def _computeTensorAndLiteral(self, x, y, dtype, func):
+ with self.test_session(use_gpu=False):
+ inx = tf.convert_to_tensor(x, dtype=dtype)
+ z = func(inx, y) # Should use __add__, __sub__, etc.
+ return z.eval()
+
+ def _computeLiteralAndTensor(self, x, y, dtype, func):
+ with self.test_session(use_gpu=False):
+ iny = tf.convert_to_tensor(y, dtype=dtype)
+ z = func(x, iny) # Should use __radd__, __rsub__, etc.
+ return z.eval()
+
+ def _compareBinary(self, x, y, dtype, np_func, tf_func):
+ np_ans = np_func(x, y)
+ self.assertAllClose(np_ans, self._computeTensorAndLiteral(
+ x, y, dtype, tf_func))
+ self.assertAllClose(np_ans, self._computeLiteralAndTensor(
+ x, y, dtype, tf_func))
+
+ def _compareUnary(self, x, dtype, np_func, tf_func):
+ np_ans = np_func(x)
+ with self.test_session(use_gpu=False):
+ self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())
+
+ def testOverload(self):
+ dtypes = [
+ tf.float32,
+ tf.float64,
+ tf.int32,
+ tf.int64,
+ tf.complex64,
+ ]
+ funcs = [
+ (np.add, _ADD),
+ (np.subtract, _SUB),
+ (np.multiply, _MUL),
+ (np.divide, _DIV)
+ ]
+ for dtype in dtypes:
+ for np_func, tf_func in funcs:
+ self._compareBinary(10, 5, dtype, np_func, tf_func)
+ # Mod only works for int32 and int64.
+ for dtype in [tf.int32, tf.int64]:
+ self._compareBinary(10, 3, dtype, np.mod, _MOD)
+
+ def testOverloadComparisons(self):
+ dtypes = [
+ tf.float32,
+ tf.float64,
+ tf.int32,
+ tf.int64,
+ ]
+ funcs = [
+ (np.less, _LT),
+ (np.less_equal, _LE),
+ (np.greater, _GT),
+ (np.greater_equal, _GE),
+ ]
+ for dtype in dtypes:
+ for np_func, tf_func in funcs:
+ self._compareBinary(10, 5, dtype, np_func, tf_func)
+ logical_funcs = [
+ (np.logical_and, _AND),
+ (np.logical_or, _OR),
+ (np.logical_xor, _XOR),
+ ]
+ for np_func, tf_func in logical_funcs:
+ self._compareBinary(True, False, tf.bool, np_func, tf_func)
+ self._compareBinary(True, True, tf.bool, np_func, tf_func)
+ self._compareBinary(False, False, tf.bool, np_func, tf_func)
+ self._compareBinary(False, True, tf.bool, np_func, tf_func)
+ self._compareBinary([True, True, False, False],
+ [True, False, True, False],
+ tf.bool, np_func, tf_func)
+ self._compareUnary(True, tf.bool, np.logical_not, _INV)
+ self._compareUnary(False, tf.bool, np.logical_not, _INV)
+ self._compareUnary([True, False], tf.bool, np.logical_not, _INV)
+
+
+class IsFiniteInfNanTest(tf.test.TestCase):
+
+ def _compare(self, x, use_gpu):
+ np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ inx = tf.convert_to_tensor(x)
+ ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
+ inx), tf.is_nan(inx)
+ tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
+ self.assertAllEqual(np_inf, tf_inf)
+ self.assertAllEqual(np_nan, tf_nan)
+ self.assertAllEqual(np_finite, tf_finite)
+ self.assertShapeEqual(np_inf, oinf)
+ self.assertShapeEqual(np_nan, onan)
+ self.assertShapeEqual(np_finite, ofinite)
+
+ def _testDtype(self, dtype):
+ fi = np.finfo(dtype)
+ data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,
+ -np.inf, np.inf, np.nan]).astype(dtype)
+ self._compare(data, use_gpu=False)
+ self._compare(data, use_gpu=True)
+
+ def testFloat(self):
+ self._testDtype(np.float32)
+
+ def testDouble(self):
+ self._testDtype(np.float64)
+
+
+class RoundingTest(tf.test.TestCase):
+
+ def _compare(self, x, use_gpu):
+ np_floor, np_ceil = np.floor(x), np.ceil(x)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ inx = tf.convert_to_tensor(x)
+ ofloor, oceil = tf.floor(inx), tf.ceil(inx)
+ tf_floor, tf_ceil = sess.run([ofloor, oceil])
+ self.assertAllEqual(np_floor, tf_floor)
+ self.assertAllEqual(np_ceil, tf_ceil)
+ self.assertShapeEqual(np_floor, ofloor)
+ self.assertShapeEqual(np_ceil, oceil)
+
+ def _testDtype(self, dtype):
+ data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
+ self._compare(data, use_gpu=True)
+ self._compare(data, use_gpu=True)
+
+ def testTypes(self):
+ for dtype in [np.float32, np.float64]:
+ self._testDtype(dtype)
+
+
+class ComplexMakeRealImagTest(tf.test.TestCase):
+
+ def _compareMake(self, real, imag, use_gpu):
+ np_ans = real + (1j) * imag
+ with self.test_session(use_gpu=use_gpu):
+ real = tf.convert_to_tensor(real)
+ imag = tf.convert_to_tensor(imag)
+ tf_ans = tf.complex(real, imag)
+ out = tf_ans.eval()
+ self.assertAllEqual(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def testMake(self):
+ real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
+ imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
+ for use_gpu in [False, True]:
+ self._compareMake(real, imag, use_gpu)
+ self._compareMake(real, 12.0, use_gpu)
+ self._compareMake(23.0, imag, use_gpu)
+
+ def _compareRealImag(self, cplx, use_gpu):
+ np_real, np_imag = np.real(cplx), np.imag(cplx)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ inx = tf.convert_to_tensor(cplx)
+ tf_real = tf.real(inx)
+ tf_imag = tf.imag(inx)
+ tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
+ self.assertAllEqual(np_real, tf_real_val)
+ self.assertAllEqual(np_imag, tf_imag_val)
+ self.assertShapeEqual(np_real, tf_real)
+ self.assertShapeEqual(np_imag, tf_imag)
+
+ def testRealImag(self):
+ real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
+ imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
+ cplx = real + (1j) * imag
+ self._compareRealImag(cplx, use_gpu=False)
+ self._compareRealImag(cplx, use_gpu=True)
+
+ def _compareConj(self, cplx, use_gpu):
+ np_ans = np.conj(cplx)
+ with self.test_session(use_gpu=use_gpu):
+ inx = tf.convert_to_tensor(cplx)
+ tf_conj = tf.conj(inx)
+ tf_ans = tf_conj.eval()
+ self.assertAllEqual(np_ans, tf_ans)
+ self.assertShapeEqual(np_ans, tf_conj)
+
+ def testConj(self):
+ real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
+ imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
+ cplx = real + (1j) * imag
+ self._compareConj(cplx, use_gpu=False)
+ self._compareConj(cplx, use_gpu=True)
+
+ def _compareGradient(self, x):
+ # x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
+ # complex numbers. Then, we extract real and imag parts and
+ # computes the squared sum. This is obviously the same as sum(real
+ # * real) + sum(imag * imag). We just want to make sure the
+ # gradient function is checked.
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ real, imag = tf.split(1, 2, inx)
+ real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
+ cplx = tf.complex(real, imag)
+ cplx = tf.conj(cplx)
+ loss = tf.reduce_sum(
+ tf.square(tf.real(cplx))) + tf.reduce_sum(
+ tf.square(tf.imag(cplx)))
+ epsilon = 1e-3
+ jacob_t, jacob_n = gc.ComputeGradient(inx, list(x.shape), loss, [1],
+ x_init_value=x, delta=epsilon)
+ self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
+
+ def testGradient(self):
+ data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
+ self._compareGradient(data)
+
+ def _compareMulGradient(self, data):
+ # data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
+ # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
+ # x, real parts of y and imaginary parts of y.
+ with self.test_session():
+ inp = tf.convert_to_tensor(data)
+ xr, xi, yr, yi = tf.split(1, 4, inp)
+
+ def vec(x): # Reshape to a vector
+ return tf.reshape(x, [-1])
+ xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
+
+ def cplx(r, i): # Combine to a complex vector
+ return tf.complex(r, i)
+ x, y = cplx(xr, xi), cplx(yr, yi)
+ # z is x times y in complex plane.
+ z = x * y
+ # Defines the loss function as the sum of all coefficients of z.
+ loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
+ epsilon = 0.005
+ jacob_t, jacob_n = gc.ComputeGradient(inp, list(data.shape), loss, [1],
+ x_init_value=data, delta=epsilon)
+ self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
+
+ def testMulGradient(self):
+ data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
+ self._compareMulGradient(data)
+
+
+class AccumulateTest(tf.test.TestCase):
+
+ def testSimple(self):
+ with self.test_session():
+ random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)
+ for _ in range(20)]
+ random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)
+ for x in random_arrays]
+ tf_val = tf.accumulate_n(random_tensors)
+ np_val = random_arrays[0]
+ for random_array in random_arrays[1:]:
+ np_val += random_array
+ self.assertAllClose(np_val, tf_val.eval())
+
+ def testZeroArgs(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf_val = tf.accumulate_n([])
+ tf_val.eval()
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/decode_csv_op_test.py b/tensorflow/python/kernel_tests/decode_csv_op_test.py
new file mode 100644
index 0000000000..ae0917f8c4
--- /dev/null
+++ b/tensorflow/python/kernel_tests/decode_csv_op_test.py
@@ -0,0 +1,148 @@
+"""Tests for DecodeCSV op from parsing_ops."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class DecodeCSVOpTest(tf.test.TestCase):
+
+ def _test(self, args, expected_out=None, expected_err_re=None):
+ with self.test_session() as sess:
+ decode = tf.decode_csv(**args)
+
+ if expected_err_re is None:
+ out = sess.run(decode)
+
+ for i, field in enumerate(out):
+ if field.dtype == np.float32:
+ self.assertAllClose(field, expected_out[i])
+ else:
+ self.assertAllEqual(field, expected_out[i])
+
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ sess.run(decode)
+
+ def testSimple(self):
+ args = {"records": ["1", "2", '"3"'], "record_defaults": [[1]],}
+
+ expected_out = [[1, 2, 3]]
+
+ self._test(args, expected_out)
+
+ def testScalar(self):
+ args = {"records": '1,""', "record_defaults": [[3], [4]]}
+
+ expected_out = [1, 4]
+
+ self._test(args, expected_out)
+
+ def test2D(self):
+ args = {"records": [["1", "2"], ['""', "4"]], "record_defaults": [[5]]}
+ expected_out = [[[1, 2], [5, 4]]]
+
+ self._test(args, expected_out)
+
+ def testInt64(self):
+ args = {
+ "records": ["1", "2", '"2147483648"'],
+ "record_defaults": [np.array([],
+ dtype=np.int64)],
+ }
+
+ expected_out = [[1, 2, 2147483648]]
+
+ self._test(args, expected_out)
+
+ def testComplexString(self):
+ args = {
+ "records": ['"1.0"', '"ab , c"', '"a\nbc"', '"ab""c"', " abc "],
+ "record_defaults": [["1"]]
+ }
+
+ expected_out = [["1.0", "ab , c", "a\nbc", 'ab"c', " abc "]]
+
+ self._test(args, expected_out)
+
+ def testMultiRecords(self):
+ args = {
+ "records": ["1.0,4,aa", "0.2,5,bb", "3,6,cc"],
+ "record_defaults": [[1.0], [1], ["aa"]]
+ }
+
+ expected_out = [[1.0, 0.2, 3], [4, 5, 6], ["aa", "bb", "cc"]]
+
+ self._test(args, expected_out)
+
+ def testWithDefaults(self):
+ args = {
+ "records": [",1,", "0.2,3,bcd", "3.0,,"],
+ "record_defaults": [[1.0], [0], ["a"]]
+ }
+
+ expected_out = [[1.0, 0.2, 3.0], [1, 3, 0], ["a", "bcd", "a"]]
+
+ self._test(args, expected_out)
+
+ def testWithTabDelim(self):
+ args = {
+ "records": ["1\t1", "0.2\t3", "3.0\t"],
+ "record_defaults": [[1.0], [0]],
+ "field_delim": "\t"
+ }
+
+ expected_out = [[1.0, 0.2, 3.0], [1, 3, 0]]
+
+ self._test(args, expected_out)
+
+ def testWithoutDefaultsError(self):
+ args = {
+ "records": [",1", "0.2,3", "3.0,"],
+ "record_defaults": [[1.0], np.array([],
+ dtype=np.int32)]
+ }
+
+ self._test(args,
+ expected_err_re="Field 1 is required but missing in record 2!")
+
+ def testWrongFieldIntError(self):
+ args = {
+ "records": [",1", "0.2,234a", "3.0,2"],
+ "record_defaults": [[1.0], np.array([],
+ dtype=np.int32)]
+ }
+
+ self._test(args,
+ expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
+
+ def testOutOfRangeError(self):
+ args = {
+ "records": ["1", "9999999999999999999999999", "3"],
+ "record_defaults": [[1]]
+ }
+
+ self._test(args,
+ expected_err_re="Field 0 in record 1 is not a valid int32: ")
+
+ def testWrongFieldFloatError(self):
+ args = {
+ "records": [",1", "0.2,2", "3.0adf,3"],
+ "record_defaults": [[1.0], np.array([],
+ dtype=np.int32)]
+ }
+
+ self._test(args,
+ expected_err_re="Field 0 in record 2 is not a valid float: ")
+
+ def testWrongFieldStringError(self):
+ args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
+
+ self._test(
+ args,
+ expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/decode_raw_op_test.py b/tensorflow/python/kernel_tests/decode_raw_op_test.py
new file mode 100644
index 0000000000..abd50a7527
--- /dev/null
+++ b/tensorflow/python/kernel_tests/decode_raw_op_test.py
@@ -0,0 +1,44 @@
+"""Tests for DecodeRaw op from parsing_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class DecodeRawOpTest(tf.test.TestCase):
+
+ def testToUint8(self):
+ with self.test_session():
+ in_bytes = tf.placeholder(tf.string, shape=[2])
+ decode = tf.decode_raw(in_bytes, out_type=tf.uint8)
+ self.assertEqual([2, None], decode.get_shape().as_list())
+
+ result = decode.eval(feed_dict={in_bytes: ["A", "a"]})
+ self.assertAllEqual([[ord("A")], [ord("a")]], result)
+
+ result = decode.eval(feed_dict={in_bytes: ["wer", "XYZ"]})
+ self.assertAllEqual([[ord("w"), ord("e"), ord("r")],
+ [ord("X"), ord("Y"), ord("Z")]], result)
+
+ with self.assertRaisesOpError(
+ "DecodeRaw requires input strings to all be the same size, but "
+ "element 1 has size 5 != 6"):
+ decode.eval(feed_dict={in_bytes: ["short", "longer"]})
+
+ def testToInt16(self):
+ with self.test_session():
+ in_bytes = tf.placeholder(tf.string, shape=[None])
+ decode = tf.decode_raw(in_bytes, out_type=tf.int16)
+ self.assertEqual([None, None], decode.get_shape().as_list())
+
+ result = decode.eval(feed_dict={in_bytes: ["AaBC"]})
+ self.assertAllEqual([[ord("A") + ord("a") * 256,
+ ord("B") + ord("C") * 256]], result)
+
+ with self.assertRaisesOpError(
+ "Input to DecodeRaw has length 3 that is not a multiple of 2, the "
+ "size of int16"):
+ decode.eval(feed_dict={in_bytes: ["123", "456"]})
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py b/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
new file mode 100644
index 0000000000..ad0724931e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py
@@ -0,0 +1,60 @@
+"""Tests for state updating ops that may have benign race conditions."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class AssignOpTest(tf.test.TestCase):
+
+ # NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
+ # contain benign and deliberate data races when multiple threads update
+ # the same parameters without a lock.
+ def testParallelUpdateWithoutLocking(self):
+ with self.test_session() as sess:
+ ones_t = tf.fill([1024, 1024], 1.0)
+ p = tf.Variable(tf.zeros([1024, 1024]))
+ adds = [tf.assign_add(p, ones_t, use_locking=False)
+ for _ in range(20)]
+ tf.initialize_all_variables().run()
+
+ def run_add(add_op):
+ sess.run(add_op)
+ threads = [self.checkedThread(target=run_add, args=(add_op,))
+ for add_op in adds]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ vals = p.eval()
+ ones = np.ones((1024, 1024)).astype(np.float32)
+ self.assertTrue((vals >= ones).all())
+ self.assertTrue((vals <= ones * 20).all())
+
+ def testParallelAssignWithoutLocking(self):
+ with self.test_session() as sess:
+ ones_t = tf.fill([1024, 1024], float(1))
+ p = tf.Variable(tf.zeros([1024, 1024]))
+ assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
+ for i in range(1, 21)]
+ tf.initialize_all_variables().run()
+
+ def run_assign(assign_op):
+ sess.run(assign_op)
+ threads = [self.checkedThread(target=run_assign, args=(assign_op,))
+ for assign_op in assigns]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ vals = p.eval()
+
+ # Assert every element is taken from one of the assignments.
+ self.assertTrue((vals > 0).all())
+ self.assertTrue((vals <= 20).all())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/dense_update_ops_test.py b/tensorflow/python/kernel_tests/dense_update_ops_test.py
new file mode 100644
index 0000000000..2e1ea468c3
--- /dev/null
+++ b/tensorflow/python/kernel_tests/dense_update_ops_test.py
@@ -0,0 +1,151 @@
+"""Tests for tensorflow.ops.tf.Assign*."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class AssignOpTest(tf.test.TestCase):
+
+ def _initAssignFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init and update it with y."""
+ super(AssignOpTest, self).setUp()
+ with self.test_session(use_gpu=use_gpu):
+ p = tf.Variable(x)
+ assign = tf.assign(p, y)
+ p.initializer.run()
+ new_value = assign.eval()
+ return p.eval(), new_value
+
+ def _initAssignAddFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init, and compute param += y."""
+ with self.test_session(use_gpu=use_gpu):
+ p = tf.Variable(x)
+ add = tf.assign_add(p, y)
+ p.initializer.run()
+ new_value = add.eval()
+ return p.eval(), new_value
+
+ def _initAssignSubFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init, and compute param -= y."""
+ with self.test_session(use_gpu=use_gpu):
+ p = tf.Variable(x)
+ sub = tf.assign_sub(p, y)
+ p.initializer.run()
+ new_value = sub.eval()
+ return p.eval(), new_value
+
+ def _testTypes(self, vals):
+ for dtype in [np.float32, np.float64, np.int32, np.int64]:
+ x = np.zeros(vals.shape).astype(dtype)
+ y = vals.astype(dtype)
+ var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
+ self.assertAllEqual(y, var_value)
+ self.assertAllEqual(y, op_value)
+ var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x + y, var_value)
+ self.assertAllEqual(x + y, op_value)
+ var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x - y, var_value)
+ self.assertAllEqual(x - y, op_value)
+ if tf.test.IsBuiltWithCuda() and dtype in [np.float32, np.float64]:
+ var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
+ self.assertAllEqual(y, var_value)
+ self.assertAllEqual(y, op_value)
+ var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
+ self.assertAllEqual(x + y, var_value)
+ self.assertAllEqual(x + y, op_value)
+ var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x - y, var_value)
+ self.assertAllEqual(x - y, op_value)
+
+ def testBasic(self):
+ self._testTypes(np.arange(0, 20).reshape([4, 5]))
+
+ def testAssignNonStrictShapeChecking(self):
+ with self.test_session():
+ data = tf.fill([1024, 1024], 0)
+ p = tf.Variable([1])
+ a = tf.assign(p, data, validate_shape=False)
+ a.op.run()
+ self.assertAllEqual(p.eval(), data.eval())
+
+ # Assign to yet another shape
+ data2 = tf.fill([10, 10], 1)
+ a2 = tf.assign(p, data2, validate_shape=False)
+ a2.op.run()
+ self.assertAllEqual(p.eval(), data2.eval())
+
+ def testInitRequiredAssignAdd(self):
+ with self.test_session():
+ p = tf.Variable(tf.fill([1024, 1024], 1),
+ tf.int32)
+ a = tf.assign_add(p, tf.fill([1024, 1024], 0))
+ with self.assertRaisesOpError("use uninitialized"):
+ a.op.run()
+
+ def testInitRequiredAssignSub(self):
+ with self.test_session():
+ p = tf.Variable(tf.fill([1024, 1024], 1),
+ tf.int32)
+ a = tf.assign_sub(p, tf.fill([1024, 1024], 0))
+ with self.assertRaisesOpError("use uninitialized"):
+ a.op.run()
+
+ # NOTE(mrry): See also
+ # dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign
+ # data race and must run without TSAN.
+ def testParallelUpdateWithLocking(self):
+ with self.test_session() as sess:
+ zeros_t = tf.fill([1024, 1024], 0.0)
+ ones_t = tf.fill([1024, 1024], 1.0)
+ p = tf.Variable(zeros_t)
+ adds = [tf.assign_add(p, ones_t, use_locking=True)
+ for _ in range(20)]
+ p.initializer.run()
+
+ def run_add(add_op):
+ sess.run(add_op)
+ threads = [
+ self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ vals = p.eval()
+ ones = np.ones((1024, 1024)).astype(np.float32)
+ self.assertAllEqual(vals, ones * 20)
+
+ # NOTE(mrry): See also
+ # dense_update_ops_no_tsan_test.[...].testParallelAssignWithoutLocking,
+ # which contains a benign data race and must run without TSAN.
+ def testParallelAssignWithLocking(self):
+ with self.test_session() as sess:
+ zeros_t = tf.fill([1024, 1024], 0.0)
+ ones_t = tf.fill([1024, 1024], 1.0)
+ p = tf.Variable(zeros_t)
+ assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
+ use_locking=True)
+ for i in range(1, 21)]
+ p.initializer.run()
+
+ def run_assign(assign_op):
+ sess.run(assign_op)
+ threads = [self.checkedThread(target=run_assign, args=(assign_op,))
+ for assign_op in assigns]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ vals = p.eval()
+
+ # Assert every element is the same, and taken from one of the assignments.
+ self.assertTrue(vals[0, 0] > 0)
+ self.assertTrue(vals[0, 0] <= 20)
+ self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/determinant_op_test.py b/tensorflow/python/kernel_tests/determinant_op_test.py
new file mode 100644
index 0000000000..d4e2b88339
--- /dev/null
+++ b/tensorflow/python/kernel_tests/determinant_op_test.py
@@ -0,0 +1,72 @@
+"""Tests for tensorflow.ops.tf.MatrixDeterminant."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class DeterminantOpTest(tf.test.TestCase):
+
+ def _compareDeterminant(self, matrix_x):
+ with self.test_session():
+ if matrix_x.ndim == 2:
+ tf_ans = tf.matrix_determinant(matrix_x)
+ else:
+ tf_ans = tf.batch_matrix_determinant(matrix_x)
+ out = tf_ans.eval()
+ shape = matrix_x.shape
+ if shape[-1] == 0 and shape[-2] == 0:
+ np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
+ else:
+ np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def testBasic(self):
+ # 2x2 matrices
+ self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float32))
+ self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float32))
+ # 5x5 matrices (Eigen forces LU decomposition)
+ self._compareDeterminant(np.array(
+ [[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
+ [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
+ # A multidimensional batch of 2x2 matrices
+ self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float32))
+
+ def testBasicDouble(self):
+ # 2x2 matrices
+ self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float64))
+ self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float64))
+ # 5x5 matrices (Eigen forces LU decomposition)
+ self._compareDeterminant(np.array(
+ [[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
+ [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
+ # A multidimensional batch of 2x2 matrices
+ self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float64))
+
+ def testOverflow(self):
+ max_double = np.finfo("d").max
+ huge_matrix = np.array([[max_double, 0.0], [0.0, max_double]])
+ with self.assertRaisesOpError("not finite"):
+ self._compareDeterminant(huge_matrix)
+
+ def testNonSquareMatrix(self):
+ # When the determinant of a non-square matrix is attempted we should return
+ # an error
+ with self.assertRaises(ValueError):
+ tf.matrix_determinant(
+ np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
+
+ def testWrongDimensions(self):
+ # The input to the determinant should be a 2-dimensional tensor.
+ tensor1 = tf.constant([1., 2.])
+ with self.assertRaises(ValueError):
+ tf.matrix_determinant(tensor1)
+
+ def testEmpty(self):
+ self._compareDeterminant(np.empty([0, 2, 2]))
+ self._compareDeterminant(np.empty([2, 0, 0]))
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/diag_op_test.py b/tensorflow/python/kernel_tests/diag_op_test.py
new file mode 100644
index 0000000000..7b53ee26fa
--- /dev/null
+++ b/tensorflow/python/kernel_tests/diag_op_test.py
@@ -0,0 +1,80 @@
+import tensorflow.python.platform
+
+import numpy
+import tensorflow as tf
+
+
+class GenerateIdentityTensorTest(tf.test.TestCase):
+
+ def _testDiagOp(self, diag, dtype, expected_ans, use_gpu=False,
+ expected_err_re=None):
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
+ out = tf_ans.eval()
+ self.assertAllClose(out, expected_ans)
+ self.assertShapeEqual(expected_ans, tf_ans)
+
+ def testEmptyTensor(self):
+ x = numpy.array([])
+ expected_ans = numpy.empty([0, 0])
+ self._testDiagOp(x, numpy.int32, expected_ans)
+
+ def testRankOneIntTensor(self):
+ x = numpy.array([1, 2, 3])
+ expected_ans = numpy.array(
+ [[1, 0, 0],
+ [0, 2, 0],
+ [0, 0, 3]])
+ self._testDiagOp(x, numpy.int32, expected_ans)
+ self._testDiagOp(x, numpy.int64, expected_ans)
+
+ def testRankOneFloatTensor(self):
+ x = numpy.array([1.1, 2.2, 3.3])
+ expected_ans = numpy.array(
+ [[1.1, 0, 0],
+ [0, 2.2, 0],
+ [0, 0, 3.3]])
+ self._testDiagOp(x, numpy.float32, expected_ans)
+ self._testDiagOp(x, numpy.float64, expected_ans)
+
+ def testRankTwoIntTensor(self):
+ x = numpy.array([[1, 2, 3], [4, 5, 6]])
+ expected_ans = numpy.array(
+ [[[[1, 0, 0], [0, 0, 0]],
+ [[0, 2, 0], [0, 0, 0]],
+ [[0, 0, 3], [0, 0, 0]]],
+ [[[0, 0, 0], [4, 0, 0]],
+ [[0, 0, 0], [0, 5, 0]],
+ [[0, 0, 0], [0, 0, 6]]]])
+ self._testDiagOp(x, numpy.int32, expected_ans)
+ self._testDiagOp(x, numpy.int64, expected_ans)
+
+ def testRankTwoFloatTensor(self):
+ x = numpy.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
+ expected_ans = numpy.array(
+ [[[[1.1, 0, 0], [0, 0, 0]],
+ [[0, 2.2, 0], [0, 0, 0]],
+ [[0, 0, 3.3], [0, 0, 0]]],
+ [[[0, 0, 0], [4.4, 0, 0]],
+ [[0, 0, 0], [0, 5.5, 0]],
+ [[0, 0, 0], [0, 0, 6.6]]]])
+ self._testDiagOp(x, numpy.float32, expected_ans)
+ self._testDiagOp(x, numpy.float64, expected_ans)
+
+ def testRankThreeFloatTensor(self):
+ x = numpy.array([[[1.1, 2.2], [3.3, 4.4]],
+ [[5.5, 6.6], [7.7, 8.8]]])
+ expected_ans = numpy.array(
+ [[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
+ [[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
+ [[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
+ [[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
+ [[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
+ [[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
+ [[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
+ [[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
+ self._testDiagOp(x, numpy.float32, expected_ans)
+ self._testDiagOp(x, numpy.float64, expected_ans)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py
new file mode 100644
index 0000000000..a7a276893d
--- /dev/null
+++ b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py
@@ -0,0 +1,99 @@
+"""Tests for the DynamicPartition op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class DynamicPartitionTest(tf.test.TestCase):
+
+ def testSimpleOneDimensional(self):
+ with self.test_session() as sess:
+ data = tf.constant([0, 13, 2, 39, 4, 17])
+ indices = tf.constant([0, 0, 2, 3, 2, 1])
+ partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ partition_vals = sess.run(partitions)
+
+ self.assertAllEqual([0, 13], partition_vals[0])
+ self.assertAllEqual([17], partition_vals[1])
+ self.assertAllEqual([2, 4], partition_vals[2])
+ self.assertAllEqual([39], partition_vals[3])
+ # Vector data input to DynamicPartition results in
+ # `num_partitions` vectors of unknown length.
+ self.assertEqual([None], partitions[0].get_shape().as_list())
+ self.assertEqual([None], partitions[1].get_shape().as_list())
+ self.assertEqual([None], partitions[2].get_shape().as_list())
+ self.assertEqual([None], partitions[3].get_shape().as_list())
+
+ def testSimpleTwoDimensional(self):
+ with self.test_session() as sess:
+ data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ [9, 10, 11], [12, 13, 14], [15, 16, 17]])
+ indices = tf.constant([0, 0, 2, 3, 2, 1])
+ partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ partition_vals = sess.run(partitions)
+
+ self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
+ self.assertAllEqual([[15, 16, 17]], partition_vals[1])
+ self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
+ self.assertAllEqual([[9, 10, 11]], partition_vals[3])
+ # Vector data input to DynamicPartition results in
+ # `num_partitions` matrices with an unknown number of rows, and 3 columns.
+ self.assertEqual([None, 3], partitions[0].get_shape().as_list())
+ self.assertEqual([None, 3], partitions[1].get_shape().as_list())
+ self.assertEqual([None, 3], partitions[2].get_shape().as_list())
+ self.assertEqual([None, 3], partitions[3].get_shape().as_list())
+
+ def testHigherRank(self):
+ np.random.seed(7)
+ with self.test_session() as sess:
+ for n in 2, 3:
+ for shape in (4,), (4, 5), (4, 5, 2):
+ partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
+ for extra_shape in (), (6,), (6, 7):
+ data = np.random.randn(*(shape + extra_shape))
+ outputs = tf.dynamic_partition(data, partitions, num_partitions=n)
+ self.assertEqual(n, len(outputs))
+ for i, output in enumerate(sess.run(outputs)):
+ self.assertAllEqual(output, data[partitions == i])
+
+ def testErrorIndexOutOfRange(self):
+ with self.test_session() as sess:
+ data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ [9, 10, 11], [12, 13, 14]])
+ indices = tf.constant([0, 2, 99, 2, 2])
+ partitions = tf.dynamic_partition(data, indices, num_partitions=4)
+ with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
+ sess.run(partitions)
+
+ def testScalarIndexOutOfRange(self):
+ with self.test_session() as sess:
+ bad = 17
+ data = np.zeros(5)
+ partitions = tf.dynamic_partition(data, bad, num_partitions=7)
+ with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
+ sess.run(partitions)
+
+ def testHigherRankIndexOutOfRange(self):
+ with self.test_session() as sess:
+ shape = (2, 3)
+ indices = tf.placeholder(shape=shape, dtype=np.int32)
+ data = np.zeros(shape + (5,))
+ partitions = tf.dynamic_partition(data, indices, num_partitions=7)
+ for i in xrange(2):
+ for j in xrange(3):
+ bad = np.zeros(shape, dtype=np.int32)
+ bad[i, j] = 17
+ with self.assertRaisesOpError(
+ r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
+ sess.run(partitions, feed_dict={indices: bad})
+
+ def testErrorWrongDimsIndices(self):
+ data = tf.constant([[0], [1], [2]])
+ indices = tf.constant([[0], [0]])
+ with self.assertRaises(ValueError):
+ tf.dynamic_partition(data, indices, num_partitions=4)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py b/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
new file mode 100644
index 0000000000..9ac49390b9
--- /dev/null
+++ b/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py
@@ -0,0 +1,107 @@
+"""Tests for tensorflow.ops.data_flow_ops.dynamic_stitch."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class DynamicStitchTest(tf.test.TestCase):
+
+ def testScalar(self):
+ with self.test_session():
+ indices = [tf.constant(0), tf.constant(1)]
+ data = [tf.constant(40), tf.constant(60)]
+ for step in -1, 1:
+ stitched_t = tf.dynamic_stitch(indices[::step], data)
+ stitched_val = stitched_t.eval()
+ self.assertAllEqual([40, 60][::step], stitched_val)
+ # Dimension 0 is determined by the max index in indices, so we
+ # can only infer that the output is a vector of some unknown
+ # length.
+ self.assertEqual([None], stitched_t.get_shape().as_list())
+
+ def testSimpleOneDimensional(self):
+ with self.test_session():
+ indices = [tf.constant([0, 4, 7]),
+ tf.constant([1, 6, 2, 3, 5])]
+ data = [tf.constant([0, 40, 70]),
+ tf.constant([10, 60, 20, 30, 50])]
+ stitched_t = tf.dynamic_stitch(indices, data)
+ stitched_val = stitched_t.eval()
+ self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
+ # Dimension 0 is determined by the max index in indices, so we
+ # can only infer that the output is a vector of some unknown
+ # length.
+ self.assertEqual([None], stitched_t.get_shape().as_list())
+
+ def testSimpleTwoDimensional(self):
+ with self.test_session():
+ indices = [tf.constant([0, 4, 7]),
+ tf.constant([1, 6]),
+ tf.constant([2, 3, 5])]
+ data = [tf.constant([[0, 1], [40, 41], [70, 71]]),
+ tf.constant([[10, 11], [60, 61]]),
+ tf.constant([[20, 21], [30, 31], [50, 51]])]
+ stitched_t = tf.dynamic_stitch(indices, data)
+ stitched_val = stitched_t.eval()
+ self.assertAllEqual(
+ [[0, 1], [10, 11], [20, 21], [30, 31],
+ [40, 41], [50, 51], [60, 61], [70, 71]], stitched_val)
+ # Dimension 0 is determined by the max index in indices, so we
+ # can only infer that the output is a matrix with 2 columns and
+ # some unknown number of rows.
+ self.assertEqual([None, 2], stitched_t.get_shape().as_list())
+
+ def testHigherRank(self):
+ with self.test_session() as sess:
+ indices = [tf.constant(6), tf.constant([4, 1]),
+ tf.constant([[5, 2], [0, 3]])]
+ data = [tf.constant([61, 62]), tf.constant([[41, 42], [11, 12]]),
+ tf.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])]
+ stitched_t = tf.dynamic_stitch(indices, data)
+ stitched_val = stitched_t.eval()
+ correct = 10 * np.arange(7)[:, None] + [1, 2]
+ self.assertAllEqual(correct, stitched_val)
+ self.assertEqual([None, 2], stitched_t.get_shape().as_list())
+ # Test gradients
+ stitched_grad = 7 * stitched_val
+ grads = tf.gradients(stitched_t, indices + data, stitched_grad)
+ self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
+ for datum, grad in zip(data, sess.run(grads[3:])):
+ self.assertAllEqual(7 * datum.eval(), grad)
+
+ def testErrorIndicesMultiDimensional(self):
+ indices = [tf.constant([0, 4, 7]),
+ tf.constant([[1, 6, 2, 3, 5]])]
+ data = [tf.constant([[0, 40, 70]]),
+ tf.constant([10, 60, 20, 30, 50])]
+ with self.assertRaises(ValueError):
+ tf.dynamic_stitch(indices, data)
+
+ def testErrorDataNumDimsMismatch(self):
+ indices = [tf.constant([0, 4, 7]),
+ tf.constant([1, 6, 2, 3, 5])]
+ data = [tf.constant([0, 40, 70]),
+ tf.constant([[10, 60, 20, 30, 50]])]
+ with self.assertRaises(ValueError):
+ tf.dynamic_stitch(indices, data)
+
+ def testErrorDataDimSizeMismatch(self):
+ indices = [tf.constant([0, 4, 5]),
+ tf.constant([1, 6, 2, 3])]
+ data = [tf.constant([[0], [40], [70]]),
+ tf.constant([[10, 11], [60, 61], [20, 21], [30, 31]])]
+ with self.assertRaises(ValueError):
+ tf.dynamic_stitch(indices, data)
+
+ def testErrorDataAndIndicesSizeMismatch(self):
+ indices = [tf.constant([0, 4, 7]),
+ tf.constant([1, 6, 2, 3, 5])]
+ data = [tf.constant([0, 40, 70]),
+ tf.constant([10, 60, 20, 30])]
+ with self.assertRaises(ValueError):
+ tf.dynamic_stitch(indices, data)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/edit_distance_op_test.py b/tensorflow/python/kernel_tests/edit_distance_op_test.py
new file mode 100644
index 0000000000..5919adcfaf
--- /dev/null
+++ b/tensorflow/python/kernel_tests/edit_distance_op_test.py
@@ -0,0 +1,153 @@
+"""Tests for tensorflow.kernels.edit_distance_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+def ConstantOf(x):
+ x = np.asarray(x)
+ # Convert to int64 if it's not a string
+ if x.dtype.char != "S": x = np.asarray(x, dtype=np.int64)
+ return tf.constant(x)
+
+
+class EditDistanceTest(tf.test.TestCase):
+
+ def _testEditDistance(self, hypothesis, truth, normalize,
+ expected_output, expected_err_re=None):
+ # hypothesis and truth are (index, value, shape) tuples
+ hypothesis_st = tf.SparseTensor(*[ConstantOf(x) for x in hypothesis])
+ truth_st = tf.SparseTensor(*[ConstantOf(x) for x in truth])
+ edit_distance = tf.edit_distance(
+ hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
+
+ with self.test_session():
+ if expected_err_re is None:
+ # Shape inference figures out the shape from the shape variables
+ expected_shape = [
+ max(h, t) for h, t in zip(hypothesis[2], truth[2])[:-1]]
+ self.assertEqual(edit_distance.get_shape(), expected_shape)
+ output = edit_distance.eval()
+ self.assertAllClose(output, expected_output)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ edit_distance.eval()
+
+ def testEditDistanceNormalized(self):
+ hypothesis_indices = [[0, 0], [0, 1],
+ [1, 0], [1, 1]]
+ hypothesis_values = [0, 1,
+ 1, -1]
+ hypothesis_shape = [2, 2]
+ truth_indices = [[0, 0],
+ [1, 0], [1, 1]]
+ truth_values = [0,
+ 1, 1]
+ truth_shape = [2, 2]
+ expected_output = [1.0, 0.5]
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=True,
+ expected_output=expected_output)
+
+ def testEditDistanceUnnormalized(self):
+ hypothesis_indices = [[0, 0],
+ [1, 0], [1, 1]]
+ hypothesis_values = [10,
+ 10, 11]
+ hypothesis_shape = [2, 2]
+ truth_indices = [[0, 0], [0, 1],
+ [1, 0], [1, 1]]
+ truth_values = [1, 2,
+ 1, -1]
+ truth_shape = [2, 3]
+ expected_output = [2.0, 2.0]
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=False,
+ expected_output=expected_output)
+
+ def testEditDistanceProperDistance(self):
+ # In this case, the values are individual characters stored in the
+ # SparseTensor (type DT_STRING)
+ hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
+ [[1, i] for i, _ in enumerate("altruistic")])
+ hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
+ hypothesis_shape = [2, 11]
+ truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
+ [[1, i] for i, _ in enumerate("algorithm")])
+ truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
+ truth_shape = [2, 11]
+ expected_unnormalized = [6.0, 6.0]
+ expected_normalized = [6.0/len("altruistic"),
+ 6.0/len("algorithm")]
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=False,
+ expected_output=expected_unnormalized)
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=True,
+ expected_output=expected_normalized)
+
+ def testEditDistance3D(self):
+ hypothesis_indices = [[0, 0, 0],
+ [1, 0, 0]]
+ hypothesis_values = [0, 1]
+ hypothesis_shape = [2, 1, 1]
+ truth_indices = [[0, 1, 0],
+ [1, 0, 0],
+ [1, 1, 0]]
+ truth_values = [0, 1, 1]
+ truth_shape = [2, 2, 1]
+ expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
+ [0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=True,
+ expected_output=expected_output)
+
+ def testEditDistanceMissingHypothesis(self):
+ hypothesis_indices = np.empty((0, 2), dtype=np.int64)
+ hypothesis_values = []
+ hypothesis_shape = [1, 0]
+ truth_indices = [[0, 0]]
+ truth_values = [0]
+ truth_shape = [1, 1]
+ expected_output = [1.0]
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=True,
+ expected_output=expected_output)
+
+ def testEditDistanceMissingTruth(self):
+ hypothesis_indices = [[0, 0]]
+ hypothesis_values = [0]
+ hypothesis_shape = [1, 1]
+ truth_indices = np.empty((0, 2), dtype=np.int64)
+ truth_values = []
+ truth_shape = [1, 0]
+ expected_output = [np.inf] # Normalized, divide by zero
+
+ self._testEditDistance(
+ hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
+ truth=(truth_indices, truth_values, truth_shape),
+ normalize=True,
+ expected_output=expected_output)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py
new file mode 100644
index 0000000000..99aa2453dc
--- /dev/null
+++ b/tensorflow/python/kernel_tests/embedding_ops_test.py
@@ -0,0 +1,422 @@
+"""Functional tests for ops used with embeddings."""
+import itertools
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+def _AsLong(array):
+ """Casts arrays elements to long type. Used to convert from numpy tf."""
+ return [long(x) for x in array]
+
+
+class ScatterAddSubTest(tf.test.TestCase):
+
+ def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):
+ """Run a random test case with the given shape and indices.
+
+ Args:
+ shape: Shape of the parameters array.
+ indices: One-dimensional array of ints, the indices of the last dimension
+ of the parameters to update.
+ scatter_op: ScatterAdd or ScatterSub.
+ """
+ super(ScatterAddSubTest, self).setUp()
+ with self.test_session(use_gpu=False):
+ # Create a random parameter array of given shape
+ p_init = np.random.rand(*shape).astype("f")
+ # Create the shape of the update array. All dimensions except the last
+ # match the parameter array, the last dimension equals the # of indices.
+ vals_shape = [len(indices)] + shape[1:]
+ vals_init = np.random.rand(*vals_shape).astype("f")
+ v_i = [float(x) for x in vals_init.ravel()]
+ p = tf.Variable(p_init)
+ vals = tf.constant(v_i, shape=vals_shape, name="vals")
+ ind = tf.constant(indices, dtype=tf.int32)
+ p2 = scatter_op(p, ind, vals, name="updated_p")
+ # p = init
+ tf.initialize_all_variables().run()
+ # p += vals
+ result = p2.eval()
+ # Compute the expected 'p' using numpy operations.
+ for i, ind in enumerate(indices):
+ if scatter_op == tf.scatter_add:
+ p_init.reshape(shape[0], -1)[ind, :] += (
+ vals_init.reshape(vals_shape[0], -1)[i, :])
+ else:
+ p_init.reshape(shape[0], -1)[ind, :] -= (
+ vals_init.reshape(vals_shape[0], -1)[i, :])
+ self.assertTrue(all((p_init == result).ravel()))
+
+ def testNoRepetitions(self):
+ self._TestCase([2, 2], [1])
+ self._TestCase([4, 4, 4], [2, 0])
+ self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
+
+ def testWithRepetitions(self):
+ self._TestCase([2, 2], [1, 1])
+ self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
+ self._TestCase([32, 4, 4], [31] * 8)
+
+ def testRandom(self):
+ # Random shapes of rank 4, random indices
+ for _ in range(5):
+ shape = np.random.randint(1, 20, size=4)
+ indices = np.random.randint(shape[0], size=2 * shape[0])
+ self._TestCase(_AsLong(list(shape)), list(indices))
+
+ def testSubRandom(self):
+ # Random shapes of rank 4, random indices
+ for _ in range(5):
+ shape = np.random.randint(1, 20, size=4)
+ indices = np.random.randint(shape[0], size=2 * shape[0])
+ self._TestCase(_AsLong(list(shape)), list(indices),
+ tf.scatter_sub)
+
+ def testWrongShape(self):
+ # Indices and values mismatch.
+ var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))
+ indices = tf.placeholder(tf.int32, shape=[32])
+ values = tf.placeholder(tf.float32, shape=[33, 64, 64])
+ with self.assertRaises(ValueError):
+ tf.scatter_add(var, indices, values)
+
+ # Var and values mismatch.
+ values = tf.placeholder(tf.float32, shape=[32, 64, 63])
+ with self.assertRaises(ValueError):
+ tf.scatter_add(var, indices, values)
+
+
+def _PName(param_id):
+ return "p" + str(param_id)
+
+
+def _EmbeddingParams(num_shards, vocab_size,
+ dtype=tf.float32,
+ shape=None):
+ p = []
+ params = {}
+ feed_dict = {}
+ if not shape: shape = [10]
+ assert not vocab_size % num_shards
+ shape = [vocab_size / num_shards] + shape
+ for i in range(num_shards):
+ param_name = _PName(i)
+ constant_t = tf.constant(1.0, shape=shape, dtype=dtype,
+ name=param_name)
+ p.append(constant_t)
+ np_type = "f" if dtype == tf.float32 else "d"
+ val = (np.random.rand(*shape).astype(np_type)) + 1
+ params[param_name + ":0"] = val
+ feed_dict[constant_t.name] = val
+ return p, params, feed_dict
+
+
+def _EmbeddingResult(params, id_vals, num_shards, weight_vals=None):
+ if weight_vals is None:
+ weight_vals = np.copy(id_vals)
+ weight_vals.fill(1)
+ values = []
+ weights = []
+ for ids, wts in zip(id_vals, weight_vals):
+ val_aggr = None
+ wt_aggr = None
+ if isinstance(ids, int):
+ ids = [ids]
+ wts = [wts]
+ for i, wt_val in zip(ids, wts):
+ val = np.copy(params[_PName(i % num_shards) + ":0"]
+ [i / num_shards, :]) * wt_val
+ if val_aggr is None:
+ assert wt_aggr is None
+ val_aggr = val
+ wt_aggr = wt_val
+ else:
+ assert wt_aggr is not None
+ val_aggr += val
+ wt_aggr += wt_val
+ values.append(val_aggr)
+ weights.append(wt_aggr)
+ values = np.array(values).astype(np.float32)
+ weights = np.array(weights).astype(np.float32)
+ return values, weights
+
+
+class EmbeddingLookupTest(tf.test.TestCase):
+
+ # This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
+ # both the ids are in the first shard, one of the resulting lookup
+ # vector is going to be empty. The subsequent DivOp fails because of that.
+ # TODO(keveman): Disabling the test until the underlying problem is fixed.
+ def testSimpleSharded(self):
+ with self.test_session():
+ num_shards = 2
+ vocab_size = 4
+ p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
+
+ id_vals = np.array([0, 0])
+ ids = tf.constant(list(id_vals), dtype=tf.int32)
+ print "Construct ids", ids.get_shape()
+ embedding = tf.nn.embedding_lookup(p, ids)
+
+ tf_result = embedding.eval(feed_dict=feed_dict)
+ np_result, _ = _EmbeddingResult(params, id_vals, num_shards)
+ self.assertAllEqual(np_result, tf_result)
+ self.assertShapeEqual(np_result, embedding)
+
+ def testSharded(self):
+ with self.test_session():
+ num_shards = 5
+ vocab_size = 25
+ # Embedding dimensions is 10. The 10 x vocab_size embedding
+ # parameters are spread in num_shards matrices, so each
+ # matrix is 10 x (vocab_size / num_shards)
+ p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
+
+ num_vals = 30
+ # Fetch num_vals embeddings for random word ids. Since
+ # num_vals > vocab_size, this ought to have repetitions, so
+ # will test that aspect.
+ id_vals = np.random.randint(vocab_size, size=num_vals)
+ ids = tf.constant(list(id_vals), dtype=tf.int32)
+
+ embedding = tf.nn.embedding_lookup(p, ids)
+ tf_result = embedding.eval(feed_dict=feed_dict)
+ np_result, _ = _EmbeddingResult(params, id_vals, num_shards)
+ self.assertAllEqual(np_result, tf_result)
+ self.assertShapeEqual(np_result, embedding)
+
+ def testGradientsEmbeddingLookup(self):
+ vocab_size = 9
+ num_ids = 5
+ id_vals = list(np.random.randint(vocab_size, size=num_ids))
+ tf.logging.vlog(1, id_vals)
+ for num_shards in [1, 3]:
+ with self.test_session():
+ ids = tf.constant(id_vals, dtype=tf.int32)
+ x, params, _ = _EmbeddingParams(
+ num_shards, vocab_size, shape=[2])
+ y = tf.nn.embedding_lookup(x, ids)
+ y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
+ x_name = [_PName(i) for i in range(num_shards)]
+ x_init_value = [params[x_n + ":0"] for x_n in x_name]
+ x_shape = [i.shape for i in x_init_value]
+ err = gc.ComputeGradientError(x, x_shape, y, y_shape,
+ x_init_value=x_init_value)
+ self.assertLess(err, 1e-4)
+
+ def testGradientsEmbeddingLookupWithComputedParams(self):
+ vocab_size = 9
+ num_ids = 5
+ id_vals = list(np.random.randint(vocab_size, size=num_ids))
+ tf.logging.vlog(1, id_vals)
+ for num_shards in [1, 3]:
+ with self.test_session():
+ ids = tf.constant(id_vals, dtype=tf.int32)
+ x, params, _ = _EmbeddingParams(
+ num_shards, vocab_size, shape=[2])
+ # This will force a conversion from IndexedSlices to Tensor.
+ x_squared = [tf.square(elem) for elem in x]
+ y = tf.nn.embedding_lookup(x_squared, ids)
+ y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
+ x_name = [_PName(i) for i in range(num_shards)]
+ x_init_value = [params[x_n + ":0"] for x_n in x_name]
+ x_shape = [i.shape for i in x_init_value]
+ err = gc.ComputeGradientError(x, x_shape, y, y_shape,
+ x_init_value=x_init_value)
+ self.assertLess(err, 1e-3)
+
+ def testConstructionNonSharded(self):
+ with tf.Graph().as_default():
+ p = tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))
+ ids = tf.constant([0, 1, 1, 7], dtype=tf.int32)
+ tf.nn.embedding_lookup([p], ids)
+
+ def testConstructionSharded(self):
+ with tf.Graph().as_default():
+ p = []
+ for _ in range(2):
+ p += [tf.Variable(tf.zeros(shape=[100, 100], dtype=tf.float32))]
+ ids = tf.constant([0, 1, 1, 17], dtype=tf.int32)
+ tf.nn.embedding_lookup(p, ids)
+
+ def testHigherRank(self):
+ np.random.seed(8)
+ with self.test_session():
+ for params_shape in (12,), (6, 3):
+ params = np.random.randn(*params_shape)
+ for ids_shape in (3, 2), (4, 3):
+ ids = np.random.randint(params.shape[0],
+ size=np.prod(ids_shape)).reshape(ids_shape)
+ # Compare nonsharded to gather
+ simple = tf.nn.embedding_lookup(params, ids).eval()
+ self.assertAllEqual(simple, tf.gather(params, ids).eval())
+ # Run a few random sharded versions
+ for procs in 1, 2, 3:
+ stride = procs * tf.range(0, params.shape[0] / procs)
+ split_params = [tf.gather(params, stride + p)
+ for p in xrange(procs)]
+ sharded = tf.nn.embedding_lookup(split_params, ids).eval()
+ self.assertAllEqual(simple, sharded)
+
+
+class EmbeddingLookupSparseTest(tf.test.TestCase):
+
+ def _RandomIdsAndWeights(self, batch_size, vocab_size):
+ max_val_per_entry = 6
+ vals_per_batch_entry = np.random.randint(
+ 1, max_val_per_entry, size=batch_size)
+ num_vals = np.sum(vals_per_batch_entry)
+
+ ids = np.random.randint(vocab_size, size=num_vals)
+ weights = 1 + np.random.rand(num_vals)
+
+ indices = []
+ for batch_entry, num_val in enumerate(vals_per_batch_entry):
+ for val_index in range(num_val):
+ indices.append([batch_entry, val_index])
+
+ shape = [batch_size, max_val_per_entry]
+
+ sp_ids = tf.SparseTensor(
+ tf.constant(indices, tf.int64),
+ tf.constant(ids, tf.int32),
+ tf.constant(shape, tf.int64))
+ sp_weights = tf.SparseTensor(
+ tf.constant(indices, tf.int64),
+ tf.constant(weights, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
+
+ def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
+ grouped_vals = []
+ index = 0
+ for num_val in vals_per_batch_entry:
+ grouped_vals.append(list(vals[index: (index + num_val)]))
+ index += num_val
+ return grouped_vals
+
+ def testEmbeddingLookupSparse(self):
+ vocab_size = 25
+ batch_size = 10
+ param_shape = [2, 5]
+
+ sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
+ self._RandomIdsAndWeights(batch_size, vocab_size))
+
+ grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
+ grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
+ grouped_ignored_weights = self._GroupByBatchEntry(
+ np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
+
+ for num_shards, combiner, dtype, ignore_weights in itertools.product(
+ [1, 5],
+ ["sum", "mean"],
+ [tf.float32, tf.float64],
+ [True, False]):
+
+ with self.test_session():
+ p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size,
+ shape=param_shape,
+ dtype=dtype)
+ embedding_sum = tf.nn.embedding_lookup_sparse(
+ p, sp_ids, None if ignore_weights else sp_weights,
+ combiner=combiner)
+ tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
+
+ np_embedding_sum, np_weight_sum = _EmbeddingResult(
+ params, grouped_ids, num_shards,
+ weight_vals=grouped_ignored_weights
+ if ignore_weights else grouped_weights)
+ if combiner == "mean":
+ np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
+ self.assertAllClose(np_embedding_sum, tf_embedding_sum)
+
+ def testGradientsEmbeddingLookupSparse(self):
+ vocab_size = 12
+ batch_size = 4
+ param_shape = [2, 3]
+ sp_ids, sp_weights, _, _, _ = (
+ self._RandomIdsAndWeights(batch_size, vocab_size))
+
+ for num_shards, combiner, dtype, ignore_weights in itertools.product(
+ [1, 3],
+ ["sum", "mean"],
+ [tf.float32, tf.float64],
+ [True, False]):
+ with self.test_session():
+ x, params, _ = _EmbeddingParams(num_shards, vocab_size,
+ shape=param_shape,
+ dtype=dtype)
+
+ y = tf.nn.embedding_lookup_sparse(
+ x, sp_ids, None if ignore_weights else sp_weights,
+ combiner=combiner)
+ x_name = [_PName(i) for i in range(num_shards)]
+ x_init_value = [params[x_n + ":0"] for x_n in x_name]
+ x_shape = [i.shape for i in x_init_value]
+ y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
+ err = gc.ComputeGradientError(x, x_shape, y, y_shape,
+ x_init_value=x_init_value)
+ self.assertLess(err, 1e-5 if dtype == tf.float64 else 2e-3)
+
+
+class DynamicStitchOpTest(tf.test.TestCase):
+
+ def testCint32Cpu(self):
+ with self.test_session(use_gpu=False):
+ indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
+ values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ self.assertAllEqual(
+ tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+
+ def testCint32Gpu(self):
+ with self.test_session(use_gpu=True):
+ indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
+ values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ self.assertAllEqual(
+ tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+
+ def testInt32Cpu(self):
+ with self.test_session(use_gpu=False):
+ indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
+ values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ self.assertAllEqual(
+ tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+
+ def testInt32Gpu(self):
+ with self.test_session(use_gpu=True):
+ indices = [tf.convert_to_tensor([0, 1, 2]), tf.convert_to_tensor([2, 3])]
+ values = [tf.convert_to_tensor([12, 23, 34]), tf.convert_to_tensor([1, 2])]
+ self.assertAllEqual(
+ tf.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
+
+ def testSumGradArgs(self):
+ with self.test_session(use_gpu=False):
+ indices = [tf.convert_to_tensor([0, 1, 2, 3]),
+ tf.convert_to_tensor([2, 3])]
+ values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
+ self.assertAllEqual(
+ tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
+
+ # We expect that the values are merged in order.
+ def testStitchOrder(self):
+ with self.test_session():
+ indices = []
+ np_values = []
+ values = []
+ for _ in range(10):
+ indices.extend([tf.convert_to_tensor(np.arange(100).astype(np.int32))])
+ np_values.extend([np.random.uniform(size=100)])
+ values.extend([tf.convert_to_tensor(np_values[-1])])
+ stitched = tf.dynamic_stitch(indices, values).eval()
+ self.assertAllEqual(np_values[-1], stitched)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/fifo_queue_test.py b/tensorflow/python/kernel_tests/fifo_queue_test.py
new file mode 100644
index 0000000000..57448db433
--- /dev/null
+++ b/tensorflow/python/kernel_tests/fifo_queue_test.py
@@ -0,0 +1,1043 @@
+"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
+import random
+import re
+import time
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class FIFOQueueTest(tf.test.TestCase):
+
+ def testConstructor(self):
+ with tf.Graph().as_default():
+ q = tf.FIFOQueue(10, tf.float32, name="Q")
+ self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ self.assertEquals(tf.string_ref, q.queue_ref.dtype)
+ self.assertProtoEquals("""
+ name:'Q' op:'FIFOQueue'
+ attr { key: 'component_types' value { list { type: DT_FLOAT } } }
+ attr { key: 'shapes' value { list {} } }
+ attr { key: 'capacity' value { i: 10 } }
+ attr { key: 'container' value { s: '' } }
+ attr { key: 'shared_name' value { s: '' } }
+ """, q.queue_ref.op.node_def)
+
+ def testMultiQueueConstructor(self):
+ with tf.Graph().as_default():
+ q = tf.FIFOQueue(5, (tf.int32, tf.float32),
+ shared_name="foo", name="Q")
+ self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ self.assertEquals(tf.string_ref, q.queue_ref.dtype)
+ self.assertProtoEquals("""
+ name:'Q' op:'FIFOQueue'
+ attr { key: 'component_types' value { list {
+ type: DT_INT32 type : DT_FLOAT
+ } } }
+ attr { key: 'shapes' value { list {} } }
+ attr { key: 'capacity' value { i: 5 } }
+ attr { key: 'container' value { s: '' } }
+ attr { key: 'shared_name' value { s: 'foo' } }
+ """, q.queue_ref.op.node_def)
+
+ def testConstructorWithShapes(self):
+ with tf.Graph().as_default():
+ q = tf.FIFOQueue(5, (tf.int32, tf.float32),
+ shapes=(tf.TensorShape([1, 1, 2, 3]),
+ tf.TensorShape([5, 8])), name="Q")
+ self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
+ self.assertEquals(tf.string_ref, q.queue_ref.dtype)
+ self.assertProtoEquals("""
+ name:'Q' op:'FIFOQueue'
+ attr { key: 'component_types' value { list {
+ type: DT_INT32 type : DT_FLOAT
+ } } }
+ attr { key: 'shapes' value { list {
+ shape { dim { size: 1 }
+ dim { size: 1 }
+ dim { size: 2 }
+ dim { size: 3 } }
+ shape { dim { size: 5 }
+ dim { size: 8 } }
+ } } }
+ attr { key: 'capacity' value { i: 5 } }
+ attr { key: 'container' value { s: '' } }
+ attr { key: 'shared_name' value { s: '' } }
+ """, q.queue_ref.op.node_def)
+
+ def testEnqueue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ enqueue_op.run()
+
+ def testEnqueueWithShape(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
+ enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
+ enqueue_correct_op.run()
+ with self.assertRaises(ValueError):
+ q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
+ self.assertEqual(1, q.size().eval())
+
+ def testEnqueueManyWithShape(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, [tf.int32, tf.int32],
+ shapes=[(), (2,)])
+ q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
+ self.assertEqual(4, q.size().eval())
+
+ def testParallelEnqueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Run one producer thread for each element in elems.
+ def enqueue(enqueue_op):
+ sess.run(enqueue_op)
+ threads = [self.checkedThread(target=enqueue, args=(e,))
+ for e in enqueue_ops]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ # Dequeue every element using a single thread.
+ results = []
+ for _ in xrange(len(elems)):
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems, results)
+
+ def testParallelDequeue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Enqueue every element using a single thread.
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ # Run one consumer thread for each element in elems.
+ results = []
+
+ def dequeue():
+ results.append(sess.run(dequeued_t))
+ threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+ self.assertItemsEqual(elems, results)
+
+ def testDequeue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ for i in xrange(len(elems)):
+ vals = dequeued_t.eval()
+ self.assertEqual([elems[i]], vals)
+
+ def testEnqueueAndBlockingDequeue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(3, tf.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ def enqueue():
+ # The enqueue_ops should run after the dequeue op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ for enqueue_op in enqueue_ops:
+ sess.run(enqueue_op)
+
+ results = []
+
+ def dequeue():
+ for _ in xrange(len(elems)):
+ results.append(sess.run(dequeued_t))
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ enqueue_thread.start()
+ dequeue_thread.start()
+ enqueue_thread.join()
+ dequeue_thread.join()
+
+ for elem, result in zip(elems, results):
+ self.assertEqual([elem], result)
+
+ def testMultiEnqueueAndDequeue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, (tf.int32, tf.float32))
+ elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
+ enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ for i in xrange(len(elems)):
+ x_val, y_val = sess.run(dequeued_t)
+ x, y = elems[i]
+ self.assertEqual([x], x_val)
+ self.assertEqual([y], y_val)
+
+ def testQueueSizeEmpty(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ self.assertEqual([0], q.size().eval())
+
+ def testQueueSizeAfterEnqueueAndDequeue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ dequeued_t = q.dequeue()
+ size = q.size()
+ self.assertEqual([], size.get_shape())
+
+ enqueue_op.run()
+ self.assertEqual(1, size.eval())
+ dequeued_t.op.run()
+ self.assertEqual(0, size.eval())
+
+ def testEnqueueMany(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue()
+ enqueue_op.run()
+ enqueue_op.run()
+
+ for i in range(8):
+ vals = dequeued_t.eval()
+ self.assertEqual([elems[i % 4]], vals)
+
+ def testEmptyEnqueueMany(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ empty_t = tf.constant([], dtype=tf.float32,
+ shape=[0, 2, 3])
+ enqueue_op = q.enqueue_many((empty_t,))
+ size_t = q.size()
+
+ self.assertEqual([0], size_t.eval())
+ enqueue_op.run()
+ self.assertEqual([0], size_t.eval())
+
+ def testEmptyDequeueMany(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32, shapes=())
+ enqueue_op = q.enqueue((10.0,))
+ dequeued_t = q.dequeue_many(0)
+
+ self.assertEqual([], dequeued_t.eval().tolist())
+ enqueue_op.run()
+ self.assertEqual([], dequeued_t.eval().tolist())
+
+ def testEmptyDequeueManyWithNoShape(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ # Expect the operation to fail due to the shape not being constrained.
+ with self.assertRaisesOpError("specified shapes"):
+ q.dequeue_many(0).eval()
+
+ def testMultiEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, (tf.float32, tf.int32))
+ float_elems = [10.0, 20.0, 30.0, 40.0]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
+ enqueue_op = q.enqueue_many((float_elems, int_elems))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+ enqueue_op.run()
+
+ for i in range(8):
+ float_val, int_val = sess.run(dequeued_t)
+ self.assertEqual(float_elems[i % 4], float_val)
+ self.assertAllEqual(int_elems[i % 4], int_val)
+
+ def testDequeueMany(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32, ())
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(4)
+
+ enqueue_op.run()
+
+ self.assertAllEqual(elems[0:4], dequeued_t.eval())
+ self.assertAllEqual(elems[4:8], dequeued_t.eval())
+
+ def testMultiDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, (tf.float32, tf.int32),
+ shapes=((), (2,)))
+ float_elems = [
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
+ [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ enqueue_op = q.enqueue_many((float_elems, int_elems))
+ dequeued_t = q.dequeue_many(4)
+ dequeued_single_t = q.dequeue()
+
+ enqueue_op.run()
+
+ float_val, int_val = sess.run(dequeued_t)
+ self.assertAllEqual(float_elems[0:4], float_val)
+ self.assertAllEqual(int_elems[0:4], int_val)
+ self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
+ self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
+
+ float_val, int_val = sess.run(dequeued_t)
+ self.assertAllEqual(float_elems[4:8], float_val)
+ self.assertAllEqual(int_elems[4:8], int_val)
+
+ float_val, int_val = sess.run(dequeued_single_t)
+ self.assertAllEqual(float_elems[8], float_val)
+ self.assertAllEqual(int_elems[8], int_val)
+ self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
+ self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
+
+ def testHighDimension(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
+ elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(10)
+
+ enqueue_op.run()
+ self.assertAllEqual(dequeued_t.eval(), elems)
+
+ def testParallelEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(1000, tf.float32, shapes=())
+ elems = [10.0 * x for x in range(100)]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(1000)
+
+ # Enqueue 100 items in parallel on 10 threads.
+ def enqueue():
+ sess.run(enqueue_op)
+ threads = [self.checkedThread(target=enqueue) for _ in range(10)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ self.assertItemsEqual(dequeued_t.eval(), elems * 10)
+
+ def testParallelDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(1000, tf.float32, shapes=())
+ elems = [10.0 * x for x in range(1000)]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(100)
+
+ enqueue_op.run()
+
+ # Dequeue 100 items in parallel on 10 threads.
+ dequeued_elems = []
+
+ def dequeue():
+ dequeued_elems.extend(sess.run(dequeued_t))
+ threads = [self.checkedThread(target=dequeue) for _ in range(10)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+ self.assertItemsEqual(elems, dequeued_elems)
+
+ def testParallelEnqueueAndDequeue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(50, tf.float32, shapes=())
+ initial_elements = [10.0] * 49
+ q.enqueue_many((initial_elements,)).run()
+
+ enqueue_op = q.enqueue((20.0,))
+ dequeued_t = q.dequeue()
+
+ def enqueue():
+ for _ in xrange(100):
+ sess.run(enqueue_op)
+ def dequeue():
+ for _ in xrange(100):
+ self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
+
+ enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
+ dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
+ for enqueue_thread in enqueue_threads:
+ enqueue_thread.start()
+ for dequeue_thread in dequeue_threads:
+ dequeue_thread.start()
+ for enqueue_thread in enqueue_threads:
+ enqueue_thread.join()
+ for dequeue_thread in dequeue_threads:
+ dequeue_thread.join()
+
+ # Dequeue the initial count of elements to clean up.
+ cleanup_elems = q.dequeue_many(49).eval()
+ for elem in cleanup_elems:
+ self.assertTrue(elem in (10.0, 20.0))
+
+ def testMixtureOfEnqueueAndEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.int32, shapes=())
+ enqueue_placeholder = tf.placeholder(tf.int32, shape=())
+ enqueue_op = q.enqueue((enqueue_placeholder,))
+ enqueuemany_placeholder = tf.placeholder(
+ tf.int32, shape=(None,))
+ enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
+
+ dequeued_t = q.dequeue()
+ close_op = q.close()
+
+ def dequeue():
+ for i in xrange(250):
+ self.assertEqual(i, sess.run(dequeued_t))
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+
+ elements_enqueued = 0
+ while elements_enqueued < 250:
+ # With equal probability, run Enqueue or enqueue_many.
+ if random.random() > 0.5:
+ enqueue_op.run({enqueue_placeholder: elements_enqueued})
+ elements_enqueued += 1
+ else:
+ count = random.randint(0, min(20, 250 - elements_enqueued))
+ range_to_enqueue = range(elements_enqueued, elements_enqueued + count)
+ enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
+ elements_enqueued += count
+
+ close_op.run()
+ dequeue_thread.join()
+ self.assertEqual(0, q.size().eval())
+
+ def testMixtureOfDequeueAndDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.int32, shapes=())
+ enqueue_op = q.enqueue_many((range(250),))
+ dequeued_t = q.dequeue()
+ count_placeholder = tf.placeholder(tf.int32, shape=())
+ dequeuemany_t = q.dequeue_many(count_placeholder)
+
+ def enqueue():
+ sess.run(enqueue_op)
+ enqueue_thread = self.checkedThread(target=enqueue)
+ enqueue_thread.start()
+
+ elements_dequeued = 0
+ while elements_dequeued < 250:
+ # With equal probability, run Dequeue or dequeue_many.
+ if random.random() > 0.5:
+ self.assertEqual(elements_dequeued, dequeued_t.eval())
+ elements_dequeued += 1
+ else:
+ count = random.randint(0, min(20, 250 - elements_dequeued))
+ expected_range = range(elements_dequeued, elements_dequeued + count)
+ self.assertAllEqual(
+ expected_range, dequeuemany_t.eval({count_placeholder: count}))
+ elements_dequeued += count
+
+ q.close().run()
+ enqueue_thread.join()
+ self.assertEqual(0, q.size().eval())
+
+ def testBlockingDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32, ())
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(4)
+
+ dequeued_elems = []
+
+ def enqueue():
+ # The enqueue_op should run after the dequeue op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ sess.run(enqueue_op)
+
+ def dequeue():
+ dequeued_elems.extend(sess.run(dequeued_t).tolist())
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ enqueue_thread.start()
+ dequeue_thread.start()
+ enqueue_thread.join()
+ dequeue_thread.join()
+
+ self.assertAllEqual(elems, dequeued_elems)
+
+ def testDequeueManyWithTensorParameter(self):
+ with self.test_session():
+ # Define a first queue that contains integer counts.
+ dequeue_counts = [random.randint(1, 10) for _ in range(100)]
+ count_q = tf.FIFOQueue(100, tf.int32, ())
+ enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
+ total_count = sum(dequeue_counts)
+
+ # Define a second queue that contains total_count elements.
+ elems = [random.randint(0, 100) for _ in range(total_count)]
+ q = tf.FIFOQueue(total_count, tf.int32, ())
+ enqueue_elems_op = q.enqueue_many((elems,))
+
+ # Define a subgraph that first dequeues a count, then DequeuesMany
+ # that number of elements.
+ dequeued_t = q.dequeue_many(count_q.dequeue())
+
+ enqueue_counts_op.run()
+ enqueue_elems_op.run()
+
+ dequeued_elems = []
+ for _ in dequeue_counts:
+ dequeued_elems.extend(dequeued_t.eval())
+ self.assertEqual(elems, dequeued_elems)
+
+ def testDequeueFromClosedQueue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+ close_op.run()
+ for elem in elems:
+ self.assertEqual([elem], dequeued_t.eval())
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ dequeued_t.eval()
+
+ def testBlockingDequeueFromClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def dequeue():
+ for elem in elems:
+ self.assertEqual([elem], sess.run(dequeued_t))
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ close_op.run()
+ dequeue_thread.join()
+
+ def testBlockingDequeueFromClosedEmptyQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32)
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ def dequeue():
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ close_op.run()
+ dequeue_thread.join()
+
+ def testBlockingDequeueManyFromClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32, ())
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(4)
+
+ enqueue_op.run()
+
+ def dequeue():
+ self.assertAllEqual(elems, sess.run(dequeued_t))
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ close_op.run()
+ dequeue_thread.join()
+
+ def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, tf.float32, ())
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(3)
+ cleanup_dequeue_t = q.dequeue()
+
+ def enqueue():
+ sess.run(enqueue_op)
+
+ def dequeue():
+ self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
+ with self.assertRaises(tf.errors.OutOfRangeError):
+ sess.run(dequeued_t)
+ self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
+
+ def close():
+ sess.run(close_op)
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ enqueue_thread.start()
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ close_thread = self.checkedThread(target=close)
+ close_thread.start()
+
+ enqueue_thread.join()
+ dequeue_thread.join()
+ close_thread.join()
+
+ def testClosedBlockingDequeueManyRestoresPartialBatch(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
+ elems_a = [1.0, 2.0, 3.0]
+ elems_b = [10.0, 20.0, 30.0]
+ enqueue_op = q.enqueue_many((elems_a, elems_b))
+ dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
+ cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
+ close_op = q.close()
+
+ enqueue_op.run()
+
+ def dequeue():
+ with self.assertRaises(tf.errors.OutOfRangeError):
+ sess.run([dequeued_a_t, dequeued_b_t])
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ close_op.run()
+ dequeue_thread.join()
+ # Test that the elements in the partially-dequeued batch are
+ # restored in the correct order.
+ for elem_a, elem_b in zip(elems_a, elems_b):
+ val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
+ self.assertEqual(elem_a, val_a)
+ self.assertEqual(elem_b, val_b)
+ self.assertEqual(0, q.size().eval())
+
+ def testBlockingDequeueManyFromClosedEmptyQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(10, tf.float32, ())
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(4)
+
+ def dequeue():
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ close_op.run()
+ dequeue_thread.join()
+
+ def testEnqueueToClosedQueue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ close_op = q.close()
+
+ enqueue_op.run()
+ close_op.run()
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
+ enqueue_op.run()
+
+ def testEnqueueManyToClosedQueue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(10, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+
+ enqueue_op.run()
+ close_op.run()
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
+ enqueue_op.run()
+
+ def testBlockingEnqueueToFullQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue((50.0,))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ sess.run(blocking_enqueue_op)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+ # The dequeue ops should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ for elem in elems:
+ self.assertEqual([elem], dequeued_t.eval())
+ self.assertEqual([50.0], dequeued_t.eval())
+ thread.join()
+
+ def testBlockingEnqueueManyToFullQueue(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ sess.run(blocking_enqueue_op)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+ # The dequeue ops should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ for elem in elems:
+ self.assertEqual([elem], dequeued_t.eval())
+ time.sleep(0.01)
+ self.assertEqual([50.0], dequeued_t.eval())
+ self.assertEqual([60.0], dequeued_t.eval())
+
+ def testBlockingEnqueueBeforeClose(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue((50.0,))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ # Expect the operation to succeed once the dequeue op runs.
+ sess.run(blocking_enqueue_op)
+ enqueue_thread = self.checkedThread(target=blocking_enqueue)
+ enqueue_thread.start()
+
+ # The close_op should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ def close():
+ sess.run(close_op)
+ close_thread = self.checkedThread(target=close)
+ close_thread.start()
+
+ # The dequeue will unblock both threads.
+ self.assertEqual(10.0, dequeued_t.eval())
+ enqueue_thread.join()
+ close_thread.join()
+
+ for elem in [20.0, 30.0, 40.0, 50.0]:
+ self.assertEqual(elem, dequeued_t.eval())
+ self.assertEqual(0, q.size().eval())
+
+ def testBlockingEnqueueManyBeforeClose(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(4, tf.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ sess.run(blocking_enqueue_op)
+ enqueue_thread = self.checkedThread(target=blocking_enqueue)
+ enqueue_thread.start()
+
+ # The close_op should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ def close():
+ sess.run(close_op)
+ close_thread = self.checkedThread(target=close)
+ close_thread.start()
+
+ # The dequeue will unblock both threads.
+ self.assertEqual(10.0, dequeued_t.eval())
+ enqueue_thread.join()
+ close_thread.join()
+ for elem in [20.0, 30.0, 50.0, 60.0]:
+ self.assertEqual(elem, dequeued_t.eval())
+
+ def testDoesNotLoseValue(self):
+ with self.test_session():
+ q = tf.FIFOQueue(1, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ size_t = q.size()
+
+ enqueue_op.run()
+ for _ in range(500):
+ self.assertEqual(size_t.eval(), [1])
+
+ def testSharedQueueSameSession(self):
+ with self.test_session():
+ q1 = tf.FIFOQueue(
+ 1, tf.float32, shared_name="shared_queue")
+ q1.enqueue((10.0,)).run()
+
+ q2 = tf.FIFOQueue(
+ 1, tf.float32, shared_name="shared_queue")
+
+ q1_size_t = q1.size()
+ q2_size_t = q2.size()
+
+ self.assertEqual(q1_size_t.eval(), [1])
+ self.assertEqual(q2_size_t.eval(), [1])
+
+ self.assertEqual(q2.dequeue().eval(), [10.0])
+
+ self.assertEqual(q1_size_t.eval(), [0])
+ self.assertEqual(q2_size_t.eval(), [0])
+
+ q2.enqueue((20.0,)).run()
+
+ self.assertEqual(q1_size_t.eval(), [1])
+ self.assertEqual(q2_size_t.eval(), [1])
+
+ self.assertEqual(q1.dequeue().eval(), [20.0])
+
+ self.assertEqual(q1_size_t.eval(), [0])
+ self.assertEqual(q2_size_t.eval(), [0])
+
+ def testIncompatibleSharedQueueErrors(self):
+ with self.test_session():
+ q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
+ q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
+ q_a_1.queue_ref.eval()
+ with self.assertRaisesOpError("capacity"):
+ q_a_2.queue_ref.eval()
+
+ q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
+ q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
+ q_b_1.queue_ref.eval()
+ with self.assertRaisesOpError("component types"):
+ q_b_2.queue_ref.eval()
+
+ q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
+ q_c_2 = tf.FIFOQueue(
+ 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
+ q_c_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_c_2.queue_ref.eval()
+
+ q_d_1 = tf.FIFOQueue(
+ 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
+ q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
+ q_d_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_d_2.queue_ref.eval()
+
+ q_e_1 = tf.FIFOQueue(
+ 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
+ q_e_2 = tf.FIFOQueue(
+ 10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
+ q_e_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_e_2.queue_ref.eval()
+
+ q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
+ q_f_2 = tf.FIFOQueue(
+ 10, (tf.float32, tf.int32), shared_name="q_f")
+ q_f_1.queue_ref.eval()
+ with self.assertRaisesOpError("component types"):
+ q_f_2.queue_ref.eval()
+
+ def testSelectQueue(self):
+ with self.test_session():
+ num_queues = 10
+ qlist = list()
+ for _ in xrange(num_queues):
+ qlist.append(tf.FIFOQueue(10, tf.float32))
+ # Enqueue/Dequeue into a dynamically selected queue
+ for _ in xrange(20):
+ index = np.random.randint(num_queues)
+ q = tf.FIFOQueue.from_list(index, qlist)
+ q.enqueue((10.,)).run()
+ self.assertEqual(q.dequeue().eval(), 10.0)
+
+ def testSelectQueueOutOfRange(self):
+ with self.test_session():
+ q1 = tf.FIFOQueue(10, tf.float32)
+ q2 = tf.FIFOQueue(15, tf.float32)
+ enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
+ with self.assertRaisesOpError("Index must be in the range"):
+ enq_q.dequeue().eval()
+
+ def _blockingDequeue(self, sess, dequeue_op):
+ with self.assertRaisesOpError("Dequeue operation was cancelled"):
+ sess.run(dequeue_op)
+
+ def _blockingDequeueMany(self, sess, dequeue_many_op):
+ with self.assertRaisesOpError("Dequeue operation was cancelled"):
+ sess.run(dequeue_many_op)
+
+ def _blockingEnqueue(self, sess, enqueue_op):
+ with self.assertRaisesOpError("Enqueue operation was cancelled"):
+ sess.run(enqueue_op)
+
+ def _blockingEnqueueMany(self, sess, enqueue_many_op):
+ with self.assertRaisesOpError("Enqueue operation was cancelled"):
+ sess.run(enqueue_many_op)
+
+ def testResetOfBlockingOperation(self):
+ with self.test_session() as sess:
+ q_empty = tf.FIFOQueue(5, tf.float32, ())
+ dequeue_op = q_empty.dequeue()
+ dequeue_many_op = q_empty.dequeue_many(1)
+
+ q_full = tf.FIFOQueue(5, tf.float32)
+ sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
+ enqueue_op = q_full.enqueue((6.0,))
+ enqueue_many_op = q_full.enqueue_many(([6.0],))
+
+ threads = [
+ self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
+ self.checkedThread(self._blockingDequeueMany, args=(sess,
+ dequeue_many_op)),
+ self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
+ self.checkedThread(self._blockingEnqueueMany, args=(sess,
+ enqueue_many_op))]
+ for t in threads:
+ t.start()
+ time.sleep(0.1)
+ sess.close() # Will cancel the blocked operations.
+ for t in threads:
+ t.join()
+
+ def testBigEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(5, tf.int32, ((),))
+ elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ enq = q.enqueue_many((elem,))
+ deq = q.dequeue()
+ size_op = q.size()
+
+ enq_done = []
+ def blocking_enqueue():
+ enq_done.append(False)
+ # This will fill the queue and then block until enough dequeues happen.
+ sess.run(enq)
+ enq_done.append(True)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+
+ # The enqueue should start and then block.
+ results = []
+ results.append(deq.eval()) # Will only complete after the enqueue starts.
+ self.assertEqual(len(enq_done), 1)
+ self.assertEqual(sess.run(size_op), 5)
+
+ for _ in range(3):
+ results.append(deq.eval())
+
+ time.sleep(0.1)
+ self.assertEqual(len(enq_done), 1)
+ self.assertEqual(sess.run(size_op), 5)
+
+ # This dequeue will unblock the thread.
+ results.append(deq.eval())
+ time.sleep(0.1)
+ self.assertEqual(len(enq_done), 2)
+ thread.join()
+
+ for i in range(5):
+ self.assertEqual(size_op.eval(), 5 - i)
+ results.append(deq.eval())
+ self.assertEqual(size_op.eval(), 5 - i - 1)
+
+ self.assertAllEqual(elem, results)
+
+ def testBigDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.FIFOQueue(2, tf.int32, ((),))
+ elem = range(4)
+ enq_list = [q.enqueue((e,)) for e in elem]
+ deq = q.dequeue_many(4)
+
+ results = []
+ def blocking_dequeue():
+ # Will only complete after 4 enqueues complete.
+ results.extend(sess.run(deq))
+ thread = self.checkedThread(target=blocking_dequeue)
+ thread.start()
+ # The dequeue should start and then block.
+ for enq in enq_list:
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ self.assertEqual(len(results), 0)
+ sess.run(enq)
+
+ # Enough enqueued to unblock the dequeue
+ thread.join()
+ self.assertAllEqual(elem, results)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/gather_op_test.py b/tensorflow/python/kernel_tests/gather_op_test.py
new file mode 100644
index 0000000000..39e97531d2
--- /dev/null
+++ b/tensorflow/python/kernel_tests/gather_op_test.py
@@ -0,0 +1,71 @@
+"""Tests for tensorflow.ops.tf.gather."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class GatherTest(tf.test.TestCase):
+
+ def testScalar1D(self):
+ with self.test_session():
+ params = tf.constant([0, 1, 2, 3, 7, 5])
+ indices = tf.constant(4)
+ gather_t = tf.gather(params, indices)
+ gather_val = gather_t.eval()
+ self.assertAllEqual(7, gather_val)
+ self.assertEqual([], gather_t.get_shape())
+
+ def testScalar2D(self):
+ with self.test_session():
+ params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ [9, 10, 11], [12, 13, 14]])
+ indices = tf.constant(2)
+ gather_t = tf.gather(params, indices)
+ gather_val = gather_t.eval()
+ self.assertAllEqual([6, 7, 8], gather_val)
+ self.assertEqual([3], gather_t.get_shape())
+
+ def testSimpleTwoD32(self):
+ with self.test_session():
+ params = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
+ [9, 10, 11], [12, 13, 14]])
+ indices = tf.constant([0, 4, 0, 2])
+ gather_t = tf.gather(params, indices)
+ gather_val = gather_t.eval()
+ self.assertAllEqual([[0, 1, 2], [12, 13, 14], [0, 1, 2], [6, 7, 8]],
+ gather_val)
+ self.assertEqual([4, 3], gather_t.get_shape())
+
+ def testHigherRank(self):
+ np.random.seed(1)
+ shape = (4, 3, 2)
+ params = np.random.randn(*shape)
+ indices = np.random.randint(shape[0], size=15).reshape(3, 5)
+ with self.test_session():
+ tf_params = tf.constant(params)
+ tf_indices = tf.constant(indices)
+ gather = tf.gather(tf_params, tf_indices)
+ self.assertAllEqual(params[indices], gather.eval())
+ self.assertEqual(indices.shape + params.shape[1:], gather.get_shape())
+ # Test gradients
+ gather_grad = np.random.randn(*gather.get_shape().as_list())
+ params_grad, indices_grad = tf.gradients(gather, [tf_params, tf_indices],
+ gather_grad)
+ self.assertEqual(indices_grad, None)
+ self.assertEqual(type(params_grad), tf.IndexedSlices)
+ params_grad = tf.convert_to_tensor(params_grad)
+ correct_params_grad = np.zeros(shape)
+ for i, g in zip(indices.ravel(), gather_grad.reshape((15,) + shape[1:])):
+ correct_params_grad[i] += g
+ self.assertAllEqual(correct_params_grad, params_grad.eval())
+
+ def testUnknownIndices(self):
+ params = tf.constant([[0, 1, 2]])
+ indices = tf.placeholder(tf.int32)
+ gather_t = tf.gather(params, indices)
+ self.assertEqual(None, gather_t.get_shape())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/gradient_checker.py b/tensorflow/python/kernel_tests/gradient_checker.py
new file mode 100644
index 0000000000..fe74768986
--- /dev/null
+++ b/tensorflow/python/kernel_tests/gradient_checker.py
@@ -0,0 +1,251 @@
+"""Gradient checker for any ops, graphs.
+
+The gradient checker verifies numerically that an op/graph properly
+computes the gradients
+"""
+import tensorflow.python.platform
+
+import numpy as np
+
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import types
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import constant_op
+from tensorflow.python.ops import gradients
+from tensorflow.python.platform import logging
+
+
+def _Product(t):
+ if isinstance(t, int):
+ return t
+ else:
+ y = 1
+ for x in t:
+ y *= x
+ return y
+
+
+def _ComputeTheoricalJacobian(x, x_shape, x_data, dy, dy_shape, dx):
+ """Computes the theoretical Jacobian for dy/dx.
+
+ Computes the theoretical Jacobian using the ops generated by
+ ComputeGradient().
+
+ Args:
+ x: the tensor "x".
+ x_shape: the dimensions of x as a tuple or an array of ints.
+ x_data: a numpy parray as the input data for x
+ dy: the tensor "dy".
+ dy_shape: the dimensions of dy as a tuple or an array of ints.
+ dx: Tensor or IndexedSlices representing dx
+
+ Returns:
+ A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
+ and "dy_size" columns where "x_size" is the number of elements in x and
+ "dy_size" is the number of elements in dy.
+ """
+ # To compute the jacobian, we treat x and y are one-dimensional vectors
+ x_size = _Product(x_shape)
+ x_val_size = _Product(x_shape[1:]) # This is used for sparse gradients
+ dy_size = _Product(dy_shape)
+
+ jacobian = np.zeros((x_size, dy_size), dtype=x_data.dtype)
+ # For each of the entry of dy, we set this to be 1 and
+ # everything else to be 0 and compute the backprop -- this will give us one
+ # one column of the Jacobian matrix.
+ for col in range(0, dy_size):
+ dy_data = np.zeros(dy_shape, dtype=x_data.dtype)
+ dy_data.flat[col] = 1
+ sess = ops.get_default_session()
+ if isinstance(dx, ops.IndexedSlices):
+ backprop_indices, backprop_values = sess.run(
+ [dx.indices, dx.values], feed_dict={x: x_data, dy: dy_data})
+ for i, v in zip(backprop_indices, backprop_values):
+ r_begin = i * x_val_size
+ r_end = r_begin + x_val_size
+ jacobian[r_begin:r_end, col] += v.flat
+ else:
+ assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
+ backprop = sess.run(dx, feed_dict={x: x_data, dy: dy_data})
+ jacobian[:, col] = backprop.reshape(x_size)
+
+ logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
+ return jacobian
+
+
+def _ComputeNumericJacobian(x, x_shape, x_data, y, y_shape, delta):
+ """Computes the numeric Jacobian for dy/dx.
+
+ Computes the numeric Japcobian by slightly perturbing the inputs and
+ measuring the differences on the output.
+
+ Args:
+ x: the tensor "x".
+ x_shape: the dimensions of x as a tuple or an array of ints.
+ x_data: a numpy array as the input data for x
+ y: the tensor "y".
+ y_shape: the dimensions of y as a tuple or an array of ints.
+ delta: the amount of perturbation we give to the input
+
+ Returns:
+ A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
+ and "y_size" columns where "x_size" is the number of elements in x and
+ "y_size" is the number of elements in y.
+ """
+
+ # To compute the jacobian, we treat x and y are one-dimensional vectors
+ x_size = _Product(x_shape)
+ y_size = _Product(y_shape)
+
+ jacobian = np.zeros((x_size, y_size), dtype=x_data.dtype)
+ # For each of the entry of x, we slightly perturbs this by adding and
+ # subtracting a delta and then compute difference between the outputs. This
+ # will give us one row of the Jacobian matrix.
+ for row in range(0, x_size):
+ x_pos = x_data.copy()
+ x_pos.flat[row] += delta
+ y_pos = y.eval(feed_dict={x: x_pos})
+ x_neg = x_data.copy()
+ x_neg.flat[row] -= delta
+ y_neg = y.eval(feed_dict={x: x_neg})
+ diff = (y_pos - y_neg) / (2 * delta)
+ jacobian[row, :] = diff.reshape(y_size)
+
+ logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
+ return jacobian
+
+
+def _ComputeDxAndDy(x, y, y_shape):
+ """Returns a node to compute gradient of x wrt y."""
+ # We make up a dy so that we can compute the gradients. We don't really use
+ # the value of dy -- we will always feed it. We need to add an identity node
+ # so that we can always feed it properly. Otherwise, for the Add operation,
+ # dx is the same as dy and we cannot fetch the tensor that we are feeding.
+ with x.graph.as_default():
+ dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)
+ dy = array_ops.identity(dy_orig)
+ # We compute the gradients for x wrt. y
+ grads = gradients.gradients(y, x, dy)
+ assert len(grads) == 1
+ return grads[0], dy_orig
+
+
+def _ComputeGradient(x, x_shape, dx, y, y_shape, dy,
+ x_init_value=None, delta=1e-3):
+ """Computes the theoretical and numerical jacobian."""
+ t = types.as_dtype(x.dtype)
+ allowed_types = [types.float32, types.float64]
+ assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
+ t2 = types.as_dtype(y.dtype)
+ assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
+
+ if x_init_value is not None:
+ i_shape = list(x_init_value.shape)
+ assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
+ x_shape, i_shape)
+ x_data = x_init_value
+ else:
+ if t == types.float32:
+ dtype = np.float32
+ else:
+ dtype = np.float64
+ x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)
+
+ jacob_t = _ComputeTheoricalJacobian(x, x_shape, x_data, dy, y_shape, dx)
+ jacob_n = _ComputeNumericJacobian(x, x_shape, x_data, y, y_shape, delta)
+ return jacob_t, jacob_n
+
+
+def _ComputeGradientList(
+ x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
+ """Compute gradients for a list of x values."""
+ assert isinstance(x, list)
+ dx, dy = zip(*[_ComputeDxAndDy(xi, y, y_shape) for xi in x])
+
+ if init_targets is not None:
+ assert isinstance(init_targets, (list, tuple))
+ for init in init_targets:
+ init.run()
+ if x_init_value is None:
+ x_init_value = [None] * len(x)
+ ret = [_ComputeGradient(xi, x_shapei, dxi, y, y_shape, dyi,
+ x_init_valuei, delta)
+ for xi, x_shapei, dxi, dyi, x_init_valuei in
+ zip(x, x_shape, dx, dy, x_init_value)]
+ return ret
+
+
+def ComputeGradient(
+ x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
+ """Computes and returns the theoretical and numerical Jacobian.
+
+ Args:
+ x: a tensor or list of tensors
+ x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
+ then this is the list of shapes.
+ y: a tensor
+ y_shape: the dimensions of y as a tuple or an array of ints.
+ x_init_value: (optional) a numpy array of the same shape as "x"
+ representing the initial value of x. If x is a list, this should be a list
+ of numpy arrays. If this is none, the function will pick a random tensor
+ as the initial value.
+ delta: (optional) the amount of perturbation.
+ init_targets: list of targets to run to initialize model params.
+ TODO(mrry): remove this argument.
+
+ Returns:
+ Two 2-d numpy arrays representing the theoretical and numerical
+ Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns
+ where "x_size" is the number of elements in x and "y_size" is the
+ number of elements in y. If x is a list, returns a list of two numpy arrays.
+ """
+ if isinstance(x, list):
+ return _ComputeGradientList(x, x_shape, y, y_shape, x_init_value,
+ delta, init_targets)
+ else:
+ if init_targets is not None:
+ assert isinstance(init_targets, (list, tuple))
+ for init in init_targets:
+ init.run()
+ dx, dy = _ComputeDxAndDy(x, y, y_shape)
+ ret = _ComputeGradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta)
+ return ret
+
+
+def ComputeGradientError(
+ x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None):
+ """Computes the gradient error.
+
+ Computes the maximum error for dy/dx between the computed Jacobian and the
+ numerically estimated Jacobian.
+
+ This function will modify the tensors passed in as it adds more operations
+ and hence changing the consumers of the operations of the input tensors.
+
+ This function adds operations to the current session. To compute the error
+ using a particular device, such as a GPU, use the standard methods for
+ setting a device (e.g. using with sess.graph.device() or setting a device
+ function in the session constructor).
+
+ Args:
+ x: a tensor or list of tensors
+ x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
+ then this is the list of shapes.
+ y: a tensor
+ y_shape: the dimensions of y as a tuple or an array of ints.
+ x_init_value: (optional) a numpy array of the same shape as "x"
+ representing the initial value of x. If x is a list, this should be a list
+ of numpy arrays. If this is none, the function will pick a random tensor
+ as the initial value.
+ delta: (optional) the amount of perturbation.
+ init_targets: list of targets to run to initialize model params.
+ TODO(mrry): Remove this argument.
+
+ Returns:
+ The maximum error in between the two Jacobians.
+ """
+ grad = ComputeGradient(x, x_shape, y, y_shape, x_init_value,
+ delta, init_targets)
+ if isinstance(grad, tuple):
+ grad = [grad]
+ return max(np.fabs(j_t - j_n).max() for j_t, j_n in grad)
diff --git a/tensorflow/python/kernel_tests/gradient_checker_test.py b/tensorflow/python/kernel_tests/gradient_checker_test.py
new file mode 100644
index 0000000000..a844b7c637
--- /dev/null
+++ b/tensorflow/python/kernel_tests/gradient_checker_test.py
@@ -0,0 +1,178 @@
+"""Tests for tensorflow.kernels.gradient_checker."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests.gradient_checker import ComputeGradientError
+
+
+class GradientCheckerTest(tf.test.TestCase):
+
+ def testAddSimple(self):
+ with self.test_session(use_gpu=False):
+ # a test case for Add operation
+ size = (2, 3)
+ x1 = tf.constant(2.0, shape=size, name="x1")
+ x2 = tf.constant(3.0, shape=size, name="x2")
+ y = tf.add(x1, x2, name="y")
+
+ # checking gradients for x1
+ error = ComputeGradientError(x1, size, y, size)
+ tf.logging.info("x1 error = %f", error)
+ assert error < 1e-4
+
+ def testAddSimpleGPU(self):
+ with self.test_session(use_gpu=True):
+ # a test case for Add operation
+ size = (2, 3)
+ x1 = tf.constant(2.0, shape=size, name="x1")
+ x2 = tf.constant(3.0, shape=size, name="x2")
+ y = tf.add(x1, x2, name="y")
+
+ # checking gradients for x1
+ error = ComputeGradientError(x1, size, y, size)
+ tf.logging.info("x1 error = %f", error)
+ assert error < 1e-4
+
+ def testAddCustomized(self):
+ with self.test_session():
+ # a test case for Add operation
+ size = (2, 3)
+ x1 = tf.constant(2.0, shape=size, dtype=tf.float64,
+ name="x1")
+ x2 = tf.constant(3.0, shape=size, dtype=tf.float64,
+ name="x2")
+ y = tf.add(x1, x2, name="y")
+
+ # checkint gradients for x2 using a special init_value and delta
+ x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
+ error = ComputeGradientError(x2, size, y, size, x_init_value=x_init_value,
+ delta=1e-2)
+ tf.logging.info("x2 error = %f", error)
+ assert error < 1e-10
+
+ def testGather(self):
+ with self.test_session():
+ p_shape = (4, 2)
+ p_size = 8
+ index_values = [1, 3]
+ y_shape = [2, 2]
+ params = tf.constant(np.arange(p_size).astype(np.float),
+ shape=p_shape, name="p")
+ indices = tf.constant(index_values, name="i")
+ y = tf.gather(params, indices, name="y")
+
+ error = ComputeGradientError(params, p_shape, y, y_shape)
+ tf.logging.info("gather error = %f", error)
+ assert error < 1e-4
+
+ def testNestedGather(self):
+ with self.test_session():
+ p_shape = (8, 2)
+ p_size = 16
+ index_values = [1, 3, 5, 6]
+ index_values2 = [0, 2]
+ y2_shape = [2, 2]
+
+ params = tf.constant(np.arange(p_size).astype(np.float),
+ shape=p_shape, name="p")
+ indices = tf.constant(index_values, name="i")
+ y = tf.gather(params, indices, name="y")
+ indices2 = tf.constant(index_values2, name="i2")
+ y2 = tf.gather(y, indices2, name="y2")
+
+ error = ComputeGradientError(params, p_shape, y2, y2_shape)
+ tf.logging.info("nested gather error = %f", error)
+ assert error < 1e-4
+
+
+# Gradient checker for MNIST.
+def BuildAndTestMiniMNIST(param_index, tag):
+ # Hyperparameters
+ batch = 3
+ inputs = 16
+ features = 32
+ classes = 10
+
+ # Define the parameters
+ inp_data = np.random.random_sample(inputs * batch)
+ hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
+ hidden_bias_data = np.random.random_sample(features)
+ sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
+ sm_bias_data = np.random.random_sample(classes)
+
+ # special care for labels since they need to be normalized per batch
+ label_data = np.random.random(batch * classes).reshape((batch, classes))
+ s = label_data.sum(axis=1)
+ label_data /= s[:, None]
+
+ with tf.Session():
+ # We treat the inputs as "parameters" here
+ inp = tf.constant(inp_data.tolist(), shape=[batch, inputs],
+ dtype=tf.float64, name="inp")
+ hidden_weight = tf.constant(hidden_weight_data.tolist(),
+ shape=[inputs, features],
+ dtype=tf.float64,
+ name="hidden_weight")
+ hidden_bias = tf.constant(hidden_bias_data.tolist(),
+ shape=[features],
+ dtype=tf.float64,
+ name="hidden_bias")
+ softmax_weight = tf.constant(sm_weight_data.tolist(),
+ shape=[features, classes],
+ dtype=tf.float64,
+ name="softmax_weight")
+ softmax_bias = tf.constant(sm_bias_data.tolist(), shape=[classes],
+ dtype=tf.float64,
+ name="softmax_bias")
+
+ # List all the parameter so that we can test them one at a time
+ all_params = [inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias]
+ param_sizes = [[batch, inputs], # inp
+ [inputs, features], # hidden_weight,
+ [features], # hidden_bias
+ [features, classes], # softmax_weight,
+ [classes]] # softmax_bias
+
+ # Now, Building MNIST
+ features = tf.nn.relu(tf.nn.xw_plus_b(inp, hidden_weight, hidden_bias),
+ name="features")
+ logits = tf.nn.xw_plus_b(features, softmax_weight, softmax_bias,
+ name="logits")
+ labels = tf.constant(label_data.tolist(),
+ shape=[batch, classes],
+ dtype=tf.float64,
+ name="labels")
+ cost = tf.nn.softmax_cross_entropy_with_logits(logits, labels, name="cost")
+
+ # Test the gradients.
+ err = ComputeGradientError(all_params[param_index],
+ param_sizes[param_index],
+ cost, [batch], delta=1e-5)
+
+ tf.logging.info("Mini MNIST: %s gradient error = %g", tag, err)
+ return err
+
+
+class MiniMNISTTest(tf.test.TestCase):
+
+ def testInputGradient(self):
+ self.assertLess(BuildAndTestMiniMNIST(0, "input"), 1e-8)
+
+ def testHiddenWeightGradient(self):
+ self.assertLess(BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
+
+ def testHiddenBiasGradient(self):
+ self.assertLess(BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
+
+ def testSoftmaxWeightGradient(self):
+ self.assertLess(BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
+
+ def testSoftmaxBiasGradient(self):
+ self.assertLess(BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/identity_op_py_test.py b/tensorflow/python/kernel_tests/identity_op_py_test.py
new file mode 100644
index 0000000000..2209cf08ad
--- /dev/null
+++ b/tensorflow/python/kernel_tests/identity_op_py_test.py
@@ -0,0 +1,47 @@
+"""Tests for IdentityOp."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import gen_array_ops
+
+
+class IdentityOpTest(tf.test.TestCase):
+
+ def testInt32_6(self):
+ with self.test_session():
+ value = tf.identity([1, 2, 3, 4, 5, 6]).eval()
+ self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
+
+ def testInt32_2_3(self):
+ with self.test_session():
+ inp = tf.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
+ value = tf.identity(inp).eval()
+ self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
+
+ def testString(self):
+ with self.test_session():
+ value = tf.identity(["A", "b", "C", "d", "E", "f"]).eval()
+ self.assertAllEqual(["A", "b", "C", "d", "E", "f"], value)
+
+ def testIdentityShape(self):
+ with self.test_session():
+ shape = [2, 3]
+ array_2x3 = [[1, 2, 3], [6, 5, 4]]
+ tensor = tf.constant(array_2x3)
+ self.assertEquals(shape, tensor.get_shape())
+ self.assertEquals(shape, tf.identity(tensor).get_shape())
+ self.assertEquals(shape, tf.identity(array_2x3).get_shape())
+ self.assertEquals(shape, tf.identity(np.array(array_2x3)).get_shape())
+
+ def testRefIdentityShape(self):
+ with self.test_session():
+ shape = [2, 3]
+ tensor = tf.Variable(tf.constant([[1, 2, 3], [6, 5, 4]], dtype=tf.int32))
+ self.assertEquals(shape, tensor.get_shape())
+ self.assertEquals(shape, gen_array_ops._ref_identity(tensor).get_shape())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/in_topk_op_test.py b/tensorflow/python/kernel_tests/in_topk_op_test.py
new file mode 100644
index 0000000000..d2a51788c4
--- /dev/null
+++ b/tensorflow/python/kernel_tests/in_topk_op_test.py
@@ -0,0 +1,36 @@
+"""Tests for PrecisionOp."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class InTopKTest(tf.test.TestCase):
+
+ def _validateInTopK(self, predictions, target, k, expected):
+ np_ans = np.array(expected)
+ with self.test_session():
+ precision = tf.nn.in_top_k(predictions, target, k)
+ out = precision.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, precision)
+
+ def testInTop1(self):
+ predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
+ target = [3, 1]
+ self._validateInTopK(predictions, target, 1, [True, False])
+
+ def testInTop2(self):
+ predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
+ target = [0, 2]
+ self._validateInTopK(predictions, target, 2, [False, True])
+
+ def testInTop2Tie(self):
+ # Class 2 and 3 tie for 2nd, so both are considered in top 2.
+ predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
+ target = [2, 3]
+ self._validateInTopK(predictions, target, 2, [True, True])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py
new file mode 100644
index 0000000000..4ce6081b7b
--- /dev/null
+++ b/tensorflow/python/kernel_tests/init_ops_test.py
@@ -0,0 +1,252 @@
+"""Tests for tensorflow.ops.ops."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import init_ops
+
+
+# Returns true iff the two initalizers produce the same tensor to
+# within a tiny tolerance.
+def identicaltest(tc, init1, init2, use_gpu):
+ """Tests if two initializations are identical to within tiny tolerances.
+
+ Args:
+ tc: An instance of TensorFlowTestCase.
+ init1: An Initializer that generates a tensor of a given shape
+ init2: An Initializer that generates a tensor of a given shape
+ use_gpu: Use gpu if true.
+ Returns:
+ True or False as determined by test.
+ """
+ num = 100
+ with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t1 = init1([num]).eval()
+ with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t2 = init2([num]).eval()
+ return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
+
+
+def duplicated_initializer(tc, init, use_gpu, graph_seed):
+ """Tests duplicated random initializer within the same graph.
+
+ This test generates two random kernels from the same initializer to the same
+ graph, and checks if the results are close enough. Even given the same global,
+ seed, two different instances of random kernels should generate different
+ results.
+
+ Args:
+ tc: An instance of TensorFlowTestCase.
+ init: An Initializer that generates a tensor of a given shape
+ use_gpu: Use gpu if true.
+ graph_seed: A graph-level seed to use.
+ Returns:
+ True or False as determined by test.
+ """
+ num = 100
+ with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ random_seed.set_random_seed(graph_seed)
+ t1 = init([num]).eval()
+ t2 = init([num]).eval()
+ return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
+
+
+def _init_sampler(tc, init, num, use_gpu):
+ """Returns a func to generate a random tensor of shape [num].
+
+ Args:
+ tc: An instance of TensorFlowTestCase.
+ init: An Initializer that generates a tensor of a given shape
+ num: Size of 1D tensor to create.
+ use_gpu: Use gpu if true.
+ Returns:
+ Function to generate a random tensor.
+ """
+ def func():
+ with tc.test_session(use_gpu=use_gpu):
+ return init([num]).eval()
+ return func
+
+
+class RandomNormalInitializationTest(tf.test.TestCase):
+
+ def testInitializerIdentical(self):
+ for use_gpu in [False, True]:
+ init1 = tf.random_normal_initializer(0.0, 1.0, seed=1)
+ init2 = tf.random_normal_initializer(0.0, 1.0, seed=1)
+ self.assertTrue(identicaltest(self, init1, init2, use_gpu))
+
+ def testInitializerDifferent(self):
+ for use_gpu in [False, True]:
+ init1 = tf.random_normal_initializer(0.0, 1.0, seed=1)
+ init2 = tf.random_normal_initializer(0.0, 1.0, seed=2)
+ self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
+
+ def testDuplicatedInitializer(self):
+ for use_gpu in [False, True]:
+ init = tf.random_normal_initializer(0.0, 1.0)
+ self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
+
+
+class TruncatedNormalInitializationTest(tf.test.TestCase):
+
+ def testInitializerIdentical(self):
+ for use_gpu in [False, True]:
+ init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
+ init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
+ self.assertTrue(identicaltest(self, init1, init2, use_gpu))
+
+ def testInitializerDifferent(self):
+ for use_gpu in [False, True]:
+ init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
+ init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=2)
+ self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
+
+ def testDuplicatedInitializer(self):
+ for use_gpu in [False, True]:
+ init = tf.truncated_normal_initializer(0.0, 1.0)
+ self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
+
+
+class RandomUniformInitializationTest(tf.test.TestCase):
+
+ def testInitializerIdentical(self):
+ for use_gpu in [False, True]:
+ init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
+ init2 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
+ self.assertTrue(identicaltest(self, init1, init2, use_gpu))
+
+ def testInitializerDifferent(self):
+ for use_gpu in [False, True]:
+ init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
+ init2 = tf.random_uniform_initializer(0.0, 1.0, seed=2)
+ self.assertFalse(identicaltest(self, init1, init2, use_gpu))
+
+ def testDuplicatedInitializer(self):
+ for use_gpu in [False, True]:
+ init = tf.random_uniform_initializer(0.0, 1.0)
+ self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
+
+
+class UniformUnitScalingInitializationTest(tf.test.TestCase):
+
+ def testInitializerIdentical(self):
+ for use_gpu in [False, True]:
+ init1 = tf.uniform_unit_scaling_initializer(seed=1)
+ init2 = tf.uniform_unit_scaling_initializer(seed=1)
+ self.assertTrue(identicaltest(self, init1, init2, use_gpu))
+ init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
+ init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
+ self.assertTrue(identicaltest(self, init3, init4, use_gpu))
+
+ def testInitializerDifferent(self):
+ for use_gpu in [False, True]:
+ init1 = tf.uniform_unit_scaling_initializer(seed=1)
+ init2 = tf.uniform_unit_scaling_initializer(seed=2)
+ init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
+ self.assertFalse(identicaltest(self, init1, init2, use_gpu))
+ self.assertFalse(identicaltest(self, init1, init3, use_gpu))
+ self.assertFalse(identicaltest(self, init2, init3, use_gpu))
+
+ def testDuplicatedInitializer(self):
+ for use_gpu in [False, True]:
+ init = tf.uniform_unit_scaling_initializer()
+ self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
+
+
+class RandomWalkShapeTest(tf.test.TestCase):
+
+ def testRandomWalk(self):
+ # Fully known shape.
+ rnd1 = init_ops._random_walk([1, 2], tf.nn.relu)
+ self.assertEqual([1, 2], rnd1.get_shape())
+
+
+# TODO(vrv): move to sequence_ops_test?
+class RangeTest(tf.test.TestCase):
+
+ def _Range(self, start, limit, delta):
+ with self.test_session():
+ tf_ans = tf.range(start, limit, delta, name="range")
+ self.assertEqual([len(range(start, limit, delta))], tf_ans.get_shape())
+ return tf_ans.eval()
+
+ def testBasic(self):
+ self.assertTrue(np.array_equal(
+ self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
+ self.assertTrue(np.array_equal(
+ self._Range(0, 5, 2), np.array([0, 2, 4])))
+ self.assertTrue(np.array_equal(
+ self._Range(0, 6, 2), np.array([0, 2, 4])))
+ self.assertTrue(np.array_equal(
+ self._Range(13, 32, 7), np.array([13, 20, 27])))
+ self.assertTrue(np.array_equal(
+ self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
+ self.assertEqual(tf.range(0, 5, 1).dtype, tf.int32)
+
+ def testEmpty(self):
+ for start in 0, 5:
+ self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
+
+
+# TODO(vrv): move to sequence_ops_test?
+class LinSpaceTest(tf.test.TestCase):
+
+ def _LinSpace(self, start, stop, num):
+ with self.test_session():
+ tf_ans = tf.linspace(start, stop, num, name="linspace")
+ self.assertEqual([num], tf_ans.get_shape())
+ return tf_ans.eval()
+
+ def testPositive(self):
+ self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(1., 5., 3),
+ np.array([1., 3., 5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(1., 5., 4),
+ np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5)
+
+ def testNegative(self):
+ self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., -5., 2),
+ np.array([-1., -5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., -5., 3),
+ np.array([-1., -3., -5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., -5., 4),
+ np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
+
+ def testNegativeToPositive(self):
+ self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., 5., 3),
+ np.array([-1., 2., 5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(-1., 5., 4),
+ np.array([-1., 1., 3., 5.]), 1e-5)
+
+ def testPoint(self):
+ self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
+ self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
+ self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
+ self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
+
+
+class DeviceTest(tf.test.TestCase):
+
+ def testNoDevice(self):
+ with tf.Graph().as_default():
+ var = tf.Variable([[1.0, 1.0]])
+ self.assertEqual(None, var.device)
+ self.assertEqual(None, var.initializer.device)
+
+ def testDevice(self):
+ with tf.Graph().as_default():
+ with tf.device("/job:ps"):
+ var = tf.Variable([[1.0, 1.0]])
+ self.assertEqual("/job:ps", var.device)
+ self.assertEqual("/job:ps", var.initializer.device)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/io_ops_test.py b/tensorflow/python/kernel_tests/io_ops_test.py
new file mode 100644
index 0000000000..2eb8bdd26f
--- /dev/null
+++ b/tensorflow/python/kernel_tests/io_ops_test.py
@@ -0,0 +1,53 @@
+"""Tests for tensorflow.python.ops.io_ops."""
+# -*- coding: utf-8 -*-
+
+import tempfile
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class IoOpsTest(tf.test.TestCase):
+
+ def testReadFile(self):
+ cases = ['', 'Some contents', 'Неки садржаји на српском']
+ for contents in cases:
+ temp = tempfile.NamedTemporaryFile(prefix='ReadFileTest')
+ open(temp.name, 'wb').write(contents)
+ with self.test_session():
+ read = tf.read_file(temp.name)
+ self.assertEqual([], read.get_shape())
+ self.assertEqual(read.eval(), contents)
+
+ def _subset(self, files, indices):
+ return set([files[i].name for i in range(len(files)) if i in indices])
+
+ def testMatchingFiles(self):
+ cases = ['ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH',
+ 'AB4DEF.GH', 'ABDEF.GH', 'XYZ']
+ files = [tempfile.NamedTemporaryFile(prefix=c) for c in cases]
+
+ with self.test_session():
+ # Test exact match without wildcards.
+ for f in files:
+ self.assertEqual(tf.matching_files(f.name).eval(), f.name)
+
+ # We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
+ pos = files[0].name.find(cases[0])
+ pattern = files[0].name[:pos] + 'AB%sDEF.GH*'
+
+ self.assertEqual(set(tf.matching_files(pattern % 'z').eval()),
+ self._subset(files, [1]))
+ self.assertEqual(set(tf.matching_files(pattern % '?').eval()),
+ self._subset(files, [0, 1, 3, 4]))
+ self.assertEqual(set(tf.matching_files(pattern % '*').eval()),
+ self._subset(files, [0, 1, 2, 3, 4, 5]))
+ self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()),
+ self._subset(files, [0, 1]))
+ self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()),
+ self._subset(files, [3, 4]))
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/linalg_grad_test.py b/tensorflow/python/kernel_tests/linalg_grad_test.py
new file mode 100644
index 0000000000..50e5328c3e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/linalg_grad_test.py
@@ -0,0 +1,49 @@
+"""Tests for tensorflow.ops.linalg_grad."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class MatrixInverseGradientTest(tf.test.TestCase):
+ pass # Filled in below
+
+def _GetMatrixInverseGradientTest(dtype, shape):
+ def Test(self):
+ with self.test_session():
+ np.random.seed(1)
+ m = np.random.uniform(low=1.0, high=100.0, size=np.prod(shape)).reshape(
+ shape).astype(dtype)
+ a = tf.constant(m)
+ epsilon = np.finfo(dtype).eps
+ # Optimal stepsize for central difference is O(epsilon^{1/3}).
+ delta = epsilon ** (1.0 / 3.0)
+ tol = 1e-3
+
+ if len(shape) == 2:
+ ainv = tf.matrix_inverse(a)
+ else:
+ ainv = tf.batch_matrix_inverse(a)
+
+ theoretical, numerical = gc.ComputeGradient(a, shape, ainv, shape,
+ delta=delta)
+ self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
+ return Test
+
+
+if __name__ == "__main__":
+ # TODO(rmlarsen,irving): Reenable float32 once tolerances are fixed
+ # The test used to loop over (np.float, np.double), both of which are float64.
+ for dtype in np.float64,:
+ for size in 2, 3, 5, 10:
+ # We skip the rank 4, size 10 case: it is slow and conceptually covered
+ # by the other cases.
+ for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
+ shape = extra + (size, size)
+ name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
+ setattr(MatrixInverseGradientTest, 'testMatrixInverseGradient_' + name,
+ _GetMatrixInverseGradientTest(dtype, shape))
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/listdiff_op_test.py b/tensorflow/python/kernel_tests/listdiff_op_test.py
new file mode 100644
index 0000000000..b4607be1fb
--- /dev/null
+++ b/tensorflow/python/kernel_tests/listdiff_op_test.py
@@ -0,0 +1,117 @@
+"""Tests for tensorflow.kernels.listdiff_op."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class ListDiffTest(tf.test.TestCase):
+
+ def _testListDiff(self, x, y, out, idx, dtype=np.int32):
+ x = np.array(x, dtype=dtype)
+ y = np.array(y, dtype=dtype)
+ out = np.array(out, dtype=dtype)
+ idx = np.array(idx, dtype=dtype)
+
+ with self.test_session() as sess:
+ x_tensor = tf.convert_to_tensor(x)
+ y_tensor = tf.convert_to_tensor(y)
+ out_tensor, idx_tensor = tf.listdiff(x_tensor, y_tensor)
+ tf_out, tf_idx = sess.run([out_tensor, idx_tensor])
+
+ self.assertAllEqual(tf_out, out)
+ self.assertAllEqual(tf_idx, idx)
+ self.assertEqual(1, out_tensor.get_shape().ndims)
+ self.assertEqual(1, idx_tensor.get_shape().ndims)
+
+ def testBasic1(self):
+ x = [1, 2, 3, 4]
+ y = [1, 2]
+ out = [3, 4]
+ idx = [2, 3]
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testListDiff(x, y, out, idx, dtype=t)
+
+ def testBasic2(self):
+ x = [1, 2, 3, 4]
+ y = [2]
+ out = [1, 3, 4]
+ idx = [0, 2, 3]
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testListDiff(x, y, out, idx, dtype=t)
+
+ def testBasic3(self):
+ x = [1, 4, 3, 2]
+ y = [4, 2]
+ out = [1, 3]
+ idx = [0, 2]
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testListDiff(x, y, out, idx, dtype=t)
+
+ def testDuplicates(self):
+ x = [1, 2, 4, 3, 2, 3, 3, 1]
+ y = [4, 2]
+ out = [1, 3, 3, 3, 1]
+ idx = [0, 3, 5, 6, 7]
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testListDiff(x, y, out, idx, dtype=t)
+
+ def testRandom(self):
+ num_random_tests = 10
+ int_low = -7
+ int_high = 8
+ max_size = 50
+ for _ in xrange(num_random_tests):
+ x_size = np.random.randint(max_size + 1)
+ x = np.random.randint(int_low, int_high, size=x_size)
+ y_size = np.random.randint(max_size + 1)
+ y = np.random.randint(int_low, int_high, size=y_size)
+ out_idx = [(entry, pos) for pos, entry in enumerate(x) if entry not in y]
+ if out_idx:
+ out_idx = map(list, zip(*out_idx))
+ out = out_idx[0]
+ idx = out_idx[1]
+ else:
+ out = []
+ idx = []
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testListDiff(x, y, out, idx, dtype=t)
+
+ def testInt32FullyOverlapping(self):
+ x = [1, 2, 3, 4]
+ y = [1, 2, 3, 4]
+ out = []
+ idx = []
+ self._testListDiff(x, y, out, idx)
+
+ def testInt32NonOverlapping(self):
+ x = [1, 2, 3, 4]
+ y = [5, 6]
+ out = x
+ idx = range(len(x))
+ self._testListDiff(x, y, out, idx)
+
+ def testInt32EmptyX(self):
+ x = []
+ y = [1, 2]
+ out = []
+ idx = []
+ self._testListDiff(x, y, out, idx)
+
+ def testInt32EmptyY(self):
+ x = [1, 2, 3, 4]
+ y = []
+ out = x
+ idx = range(len(x))
+ self._testListDiff(x, y, out, idx)
+
+ def testInt32EmptyXY(self):
+ x = []
+ y = []
+ out = []
+ idx = []
+ self._testListDiff(x, y, out, idx)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/logging_ops_test.py b/tensorflow/python/kernel_tests/logging_ops_test.py
new file mode 100644
index 0000000000..18ca441b23
--- /dev/null
+++ b/tensorflow/python/kernel_tests/logging_ops_test.py
@@ -0,0 +1,50 @@
+"""Tests for tensorflow.kernels.logging_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class LoggingOpsTest(tf.test.TestCase):
+
+ def testAssertDivideByZero(self):
+ with self.test_session() as sess:
+ epsilon = tf.convert_to_tensor(1e-20)
+ x = tf.convert_to_tensor(0.0)
+ y = tf.convert_to_tensor(1.0)
+ z = tf.convert_to_tensor(2.0)
+ # assert(epsilon < y)
+ # z / y
+ with sess.graph.control_dependencies(
+ [tf.Assert(tf.less(epsilon, y), ["Divide-by-zero"])]):
+ out = tf.div(z, y)
+ self.assertAllEqual(2.0, out.eval())
+ # assert(epsilon < x)
+ # z / x
+ #
+ # This tests printing out multiple tensors
+ with sess.graph.control_dependencies(
+ [tf.Assert(tf.less(epsilon, x),
+ ["Divide-by-zero", "less than x"])]):
+ out = tf.div(z, x)
+ with self.assertRaisesOpError("less than x"):
+ out.eval()
+
+
+class PrintGradientTest(tf.test.TestCase):
+
+ def testPrintGradient(self):
+ with self.test_session():
+ inp = tf.constant(2.0, shape=[100, 32], name="in")
+ w = tf.constant(4.0, shape=[10, 100], name="w")
+ wx = tf.matmul(w, inp, name="wx")
+ wx_print = tf.Print(wx, [w, w, w])
+ wx_grad = tf.gradients(wx, w)[0]
+ wx_print_grad = tf.gradients(wx_print, w)[0]
+ wxg = wx_grad.eval()
+ wxpg = wx_print_grad.eval()
+ self.assertAllEqual(wxg, wxpg)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/lookup_table_op_test.py b/tensorflow/python/kernel_tests/lookup_table_op_test.py
new file mode 100644
index 0000000000..cd170876e6
--- /dev/null
+++ b/tensorflow/python/kernel_tests/lookup_table_op_test.py
@@ -0,0 +1,195 @@
+"""Tests for lookup table ops from tf."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class HashTableOpTest(tf.test.TestCase):
+
+ def testHashTable(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2], tf.int64)
+ init = table.initialize_from(keys, values)
+ init.run()
+ self.assertAllEqual(3, table.size().eval())
+
+ input_string = tf.constant(['brain', 'salad', 'tank'])
+ output = table.lookup(input_string)
+
+ result = output.eval()
+ self.assertAllEqual([0, 1, -1], result)
+
+ def testHashTableInitWithPythonArrays(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+ # Empty table.
+ self.assertAllEqual(0, table.size().eval())
+
+ # Initialize with keys and values tensors.
+ keys = ['brain', 'salad', 'surgery']
+ values = [0, 1, 2]
+ init = table.initialize_from(keys, values)
+ init.run()
+ self.assertAllEqual(3, table.size().eval())
+
+ input_string = tf.constant(['brain', 'salad', 'tank'])
+ output = table.lookup(input_string)
+
+ result = output.eval()
+ self.assertAllEqual([0, 1, -1], result)
+
+ def testHashTableInitWithNumPyArrays(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = np.array(['brain', 'salad', 'surgery'], dtype=np.str)
+ values = np.array([0, 1, 2], dtype=np.int64)
+ init = table.initialize_from(keys, values)
+ init.run()
+ self.assertAllEqual(3, table.size().eval())
+
+ input_string = tf.constant(['brain', 'salad', 'tank'])
+ output = table.lookup(input_string)
+
+ result = output.eval()
+ self.assertAllEqual([0, 1, -1], result)
+
+ def testMultipleHashTables(self):
+ with self.test_session() as sess:
+ shared_name = ''
+ default_val = -1
+ table1 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+ table2 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+ table3 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2], tf.int64)
+ table1.initialize_from(keys, values)
+ table2.initialize_from(keys, values)
+ table3.initialize_from(keys, values)
+
+ tf.initialize_all_tables().run()
+ self.assertAllEqual(3, table1.size().eval())
+ self.assertAllEqual(3, table2.size().eval())
+ self.assertAllEqual(3, table3.size().eval())
+
+ input_string = tf.constant(['brain', 'salad', 'tank'])
+ output1 = table1.lookup(input_string)
+ output2 = table2.lookup(input_string)
+ output3 = table3.lookup(input_string)
+
+ out1, out2, out3 = sess.run([output1, output2, output3])
+ self.assertAllEqual([0, 1, -1], out1)
+ self.assertAllEqual([0, 1, -1], out2)
+ self.assertAllEqual([0, 1, -1], out3)
+
+ def testHashTableWithTensorDefault(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = tf.constant(-1, tf.int64)
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2], tf.int64)
+ init = table.initialize_from(keys, values)
+ init.run()
+
+ input_string = tf.constant(['brain', 'salad', 'tank'])
+ output = table.lookup(input_string)
+
+ result = output.eval()
+ self.assertAllEqual([0, 1, -1], result)
+
+ def testSignatureMismatch(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2], tf.int64)
+ init = table.initialize_from(keys, values)
+ init.run()
+
+ input_string = tf.constant([1, 2, 3], tf.int64)
+ with self.assertRaises(TypeError):
+ table.lookup(input_string)
+
+ with self.assertRaises(TypeError):
+ tf.HashTable(tf.string, tf.int64, 'UNK', shared_name)
+
+ def testDTypes(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ with self.assertRaises(TypeError):
+ tf.HashTable([tf.string], tf.string, default_val, shared_name)
+
+ def testNotInitialized(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ input_string = tf.constant(['brain', 'salad', 'surgery'])
+ output = table.lookup(input_string)
+
+ with self.assertRaisesOpError('Table not initialized'):
+ output.eval()
+
+ def testInitializeTwice(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2], tf.int64)
+ init = table.initialize_from(keys, values)
+ init.run()
+
+ with self.assertRaisesOpError('Table already initialized'):
+ init.run()
+
+ def testInitializationWithInvalidDimensions(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = tf.constant(['brain', 'salad', 'surgery'])
+ values = tf.constant([0, 1, 2, 3, 4], tf.int64)
+ with self.assertRaises(ValueError):
+ table.initialize_from(keys, values)
+
+ def testInitializationWithInvalidDataTypes(self):
+ with self.test_session():
+ shared_name = ''
+ default_val = -1
+ table = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
+
+ # Initialize with keys and values tensors.
+ keys = [0, 1, 2]
+ values = ['brain', 'salad', 'surgery']
+ with self.assertRaises(TypeError):
+ table.initialize_from(keys, values)
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/lrn_op_test.py b/tensorflow/python/kernel_tests/lrn_op_test.py
new file mode 100644
index 0000000000..7a3bb67938
--- /dev/null
+++ b/tensorflow/python/kernel_tests/lrn_op_test.py
@@ -0,0 +1,101 @@
+"""Tests for local response normalization."""
+import copy
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests.gradient_checker import ComputeGradientError
+
+
+
+class LRNOpTest(tf.test.TestCase):
+
+ def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0,
+ alpha=1.0, beta=0.5):
+ """Compute expected result."""
+ output = copy.deepcopy(input_image)
+ batch_size = input_image.shape[0]
+ rows = input_image.shape[1]
+ cols = input_image.shape[2]
+ depth = input_image.shape[3]
+ for b in range(batch_size):
+ for r in range(rows):
+ for c in range(cols):
+ for d in range(depth):
+ begin = max(0, d - lrn_depth_radius)
+ end = min(depth, d + lrn_depth_radius + 1)
+ patch = input_image[b, r, c, begin:end]
+ output[b, r, c, d] /= (
+ np.power(bias + alpha * np.sum(patch * patch), beta))
+ return output
+
+ def _RunAndVerify(self):
+ with self.test_session():
+ # random shape
+ shape = np.random.randint(1, 16, size=4)
+ # Make depth at least 2 to make it meaningful
+ shape[3] += 1
+ p = tf.placeholder(tf.float32, shape=shape)
+ # random depth_radius, bias, alpha, beta
+ lrn_depth_radius = np.random.randint(1, shape[3])
+ bias = 1.0 + np.random.rand()
+ alpha = 2.0 * np.random.rand()
+ beta = 2.0 * np.random.rand()
+ lrn_t = tf.nn.local_response_normalization(
+ p, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
+ alpha=alpha, beta=beta)
+ params = {p: np.random.rand(*shape).astype("f")}
+ result = lrn_t.eval(feed_dict=params)
+ expected = self._LRN(
+ params[p], lrn_depth_radius=lrn_depth_radius, bias=bias, alpha=alpha,
+ beta=beta)
+ self.assertTrue(np.amax(np.abs(result - expected)) < 1e-4)
+ self.assertShapeEqual(expected, lrn_t)
+
+ def testCompute(self):
+ for _ in range(2):
+ self._RunAndVerify()
+
+ def testGradientsZeroInput(self):
+ with self.test_session():
+ shape = [4, 4, 4, 4]
+ p = tf.placeholder(tf.float32, shape=shape)
+ inp_array = np.zeros(shape).astype("f")
+ lrn_op = tf.nn.local_response_normalization(p, 2, 1.0, 0.0,
+ 1.0, name="lrn")
+ grad = tf.gradients([lrn_op], [p])[0]
+ params = {p: inp_array}
+ r = grad.eval(feed_dict=params)
+ expected = np.ones(shape).astype("f")
+ self.assertAllClose(r, expected)
+ self.assertShapeEqual(expected, grad)
+
+ def _RunAndVerifyGradients(self):
+ with self.test_session():
+ # random shape
+ shape = np.random.randint(1, 5, size=4)
+ # Make depth at least 2 to make it meaningful
+ shape[3] += 1
+ # random depth_radius, bias, alpha, beta
+ lrn_depth_radius = np.random.randint(1, shape[3])
+ bias = 1.0 + np.random.rand()
+ alpha = 1.0 * np.random.rand()
+ beta = 1.0 * np.random.rand()
+ inp_array = np.random.rand(*shape).astype("f")
+ inp = tf.constant(list(inp_array.ravel(order="C")), shape=shape)
+ lrn_op = tf.nn.local_response_normalization(
+ inp, name="lrn", depth_radius=lrn_depth_radius, bias=bias,
+ alpha=alpha, beta=beta)
+ err = ComputeGradientError(inp, shape, lrn_op, shape)
+ print "LRN Gradient error ", err
+ self.assertLess(err, 1e-4)
+
+ def testGradients(self):
+ for _ in range(2):
+ self._RunAndVerifyGradients()
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py
new file mode 100644
index 0000000000..5aeb736b9b
--- /dev/null
+++ b/tensorflow/python/kernel_tests/matmul_op_test.py
@@ -0,0 +1,206 @@
+"""Tests for tensorflow.ops.math_ops.matmul."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class MatMulTest(tf.test.TestCase):
+
+ def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
+ x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
+ y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
+ np_ans = x_mat * y_mat
+ with self.test_session(use_gpu=False):
+ tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
+ self.assertAllClose(np_ans, tf_ans)
+ self.assertAllEqual(np_ans.shape, tf_ans.shape)
+
+ def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
+ x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
+ y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
+ np_ans = x_mat * y_mat
+ with self.test_session(use_gpu=True):
+ tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
+ self.assertAllClose(np_ans, tf_ans)
+ self.assertAllEqual(np_ans.shape, tf_ans.shape)
+
+ def _randMatrix(self, rows, cols, dtype):
+ if dtype is np.complex64:
+ real = self._randMatrix(rows, cols, np.float32)
+ imag = self._randMatrix(rows, cols, np.float32)
+ return real + np.complex(0, 1) * imag
+ else:
+ return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
+ [rows, cols]).astype(dtype)
+
+ # Basic test:
+ # [ [1],
+ # [2],
+ # [3], * [1, 2]
+ # [4] ]
+ def testFloatBasic(self):
+ x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
+ y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
+ self._testCpuMatmul(x, y)
+ self._testGpuMatmul(x, y)
+
+ def testDoubleBasic(self):
+ x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
+ y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
+ self._testCpuMatmul(x, y)
+
+ def testInt32Basic(self):
+ x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
+ y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
+ self._testCpuMatmul(x, y)
+
+ def testSComplexBasic(self):
+ x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
+ y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
+ self._testCpuMatmul(x, y)
+
+ # Tests testing random sized matrices.
+ def testFloatRandom(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(n, k, np.float32)
+ y = self._randMatrix(k, m, np.float32)
+ self._testCpuMatmul(x, y)
+ self._testGpuMatmul(x, y)
+
+ def testDoubleRandom(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(n, k, np.float64)
+ y = self._randMatrix(k, m, np.float64)
+ self._testCpuMatmul(x, y)
+
+ def testInt32Random(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(n, k, np.int32)
+ y = self._randMatrix(k, m, np.int32)
+ self._testCpuMatmul(x, y)
+
+ def testSComplexRandom(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(n, k, np.complex64)
+ y = self._randMatrix(k, m, np.complex64)
+ self._testCpuMatmul(x, y)
+
+ # Test the cases that transpose the matrices before multiplying.
+ # NOTE(keveman): The cases where only one of the inputs is
+ # transposed are covered by tf.matmul's gradient function.
+ def testFloatRandomTransposeBoth(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(k, n, np.float32)
+ y = self._randMatrix(m, k, np.float32)
+ self._testCpuMatmul(x, y, True, True)
+ self._testGpuMatmul(x, y, True, True)
+
+ def testDoubleRandomTranposeBoth(self):
+ for _ in range(10):
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = self._randMatrix(k, n, np.float64)
+ y = self._randMatrix(m, k, np.float64)
+ self._testCpuMatmul(x, y, True, True)
+
+ def testMatMul_OutEmpty_A(self):
+ n, k, m = 0, 8, 3
+ x = self._randMatrix(n, k, np.float32)
+ y = self._randMatrix(k, m, np.float32)
+ self._testCpuMatmul(x, y)
+ self._testGpuMatmul(x, y)
+
+ def testMatMul_OutEmpty_B(self):
+ n, k, m = 3, 8, 0
+ x = self._randMatrix(n, k, np.float32)
+ y = self._randMatrix(k, m, np.float32)
+ self._testCpuMatmul(x, y)
+ self._testGpuMatmul(x, y)
+
+ def testMatMul_Inputs_Empty(self):
+ n, k, m = 3, 0, 4
+ x = self._randMatrix(n, k, np.float32)
+ y = self._randMatrix(k, m, np.float32)
+ self._testCpuMatmul(x, y)
+ self._testGpuMatmul(x, y)
+
+
+# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
+class MatMulGradientTest(tf.test.TestCase):
+
+ def testGradientInput0(self):
+ with self.test_session(use_gpu=False):
+ x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
+ dtype=tf.float64, name="x")
+ y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=[2, 4], dtype=tf.float64, name="y")
+ m = tf.matmul(x, y, name="matmul")
+ err = gc.ComputeGradientError(x, [3, 2], m, [3, 4])
+ print "matmul input0 gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradientInput1(self):
+ with self.test_session(use_gpu=False):
+ x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
+ dtype=tf.float64, name="x")
+ y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=[2, 4], dtype=tf.float64, name="y")
+ m = tf.matmul(x, y, name="matmul")
+ err = gc.ComputeGradientError(y, [2, 4], m, [3, 4])
+ print "matmul input1 gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def _VerifyInput0(self, transpose_a, transpose_b):
+ shape_x = [3, 2]
+ shape_y = [2, 4]
+ if transpose_a:
+ shape_x = list(reversed(shape_x))
+ if transpose_b:
+ shape_y = list(reversed(shape_y))
+ with self.test_session(use_gpu=False):
+ x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
+ dtype=tf.float64, name="x")
+ y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=shape_y, dtype=tf.float64, name="y")
+ m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
+ err = gc.ComputeGradientError(x, shape_x, m, [3, 4])
+ print "matmul input0 gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradientInput0WithTranspose(self):
+ self._VerifyInput0(transpose_a=True, transpose_b=False)
+ self._VerifyInput0(transpose_a=False, transpose_b=True)
+ self._VerifyInput0(transpose_a=True, transpose_b=True)
+
+ def _VerifyInput1(self, transpose_a, transpose_b):
+ shape_x = [3, 2]
+ shape_y = [2, 4]
+ if transpose_a:
+ shape_x = list(reversed(shape_x))
+ if transpose_b:
+ shape_y = list(reversed(shape_y))
+ with self.test_session(use_gpu=False):
+ x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
+ dtype=tf.float64, name="x")
+ y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
+ shape=shape_y, dtype=tf.float64, name="y")
+ m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
+ err = gc.ComputeGradientError(y, shape_y, m, [3, 4])
+ print "matmul input1 gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradientInput1WithTranspose(self):
+ self._VerifyInput1(transpose_a=True, transpose_b=False)
+ self._VerifyInput1(transpose_a=False, transpose_b=True)
+ self._VerifyInput1(transpose_a=True, transpose_b=True)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py
new file mode 100644
index 0000000000..541a937185
--- /dev/null
+++ b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py
@@ -0,0 +1,79 @@
+"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class InverseOpTest(tf.test.TestCase):
+
+ def _verifyInverse(self, x):
+ for np_type in [np.float32, np.float64]:
+ y = x.astype(np_type)
+ with self.test_session():
+ # Verify that x^{-1} * x == Identity matrix.
+ if x.ndim == 2:
+ inv = tf.matrix_inverse(y)
+ tf_ans = tf.matmul(inv, y)
+ np_ans = np.identity(y.shape[-1])
+ else:
+ inv = tf.batch_matrix_inverse(y)
+ tf_ans = tf.batch_matmul(inv, y)
+ tiling = list(y.shape)
+ tiling[-2:] = [1, 1]
+ np_ans = np.tile(np.identity(y.shape[-1]), tiling)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(y, tf_ans)
+
+ def testBasic(self):
+ # 2x2 matrices
+ matrix1 = np.array([[1., 2.], [3., 4.]])
+ matrix2 = np.array([[1., 3.], [3., 5.]])
+ self._verifyInverse(matrix1)
+ self._verifyInverse(matrix2)
+ # A multidimensional batch of 2x2 matrices
+ matrix_batch = np.concatenate([np.expand_dims(matrix1, 0),
+ np.expand_dims(matrix2, 0)])
+ matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
+ self._verifyInverse(matrix_batch)
+
+ def testNonSquareMatrix(self):
+ # When the inverse of a non-square matrix is attempted we should return
+ # an error
+ with self.assertRaises(ValueError):
+ tf.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
+
+ def testWrongDimensions(self):
+ # The input to the inverse should be at least a 2-dimensional tensor.
+ tensor3 = tf.constant([1., 2.])
+ with self.assertRaises(ValueError):
+ tf.matrix_inverse(tensor3)
+
+ def testNotInvertible(self):
+ # The input should be invertible.
+ with self.test_session():
+ with self.assertRaisesOpError("Input is not invertible."):
+ # All rows of the matrix below add to zero
+ tensor3 = tf.constant([[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
+ tf.matrix_inverse(tensor3).eval()
+
+ with self.test_session():
+ with self.assertRaisesOpError("Input is not invertible."):
+ # Determinant of the matrix below is zero
+ tensor3 = tf.constant([[1., 1.], [1., 1.]])
+ tf.matrix_inverse(tensor3).eval()
+
+ with self.test_session():
+ with self.assertRaisesOpError("Input is not invertible."):
+ # Determinant of the matrix below is zero
+ tensor3 = tf.constant([[np.inf, 1.], [1., 1.]])
+ tf.matrix_inverse(tensor3).eval()
+
+ def testEmpty(self):
+ self._verifyInverse(np.empty([0, 2, 2]))
+ self._verifyInverse(np.empty([2, 0, 0]))
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/numerics_test.py b/tensorflow/python/kernel_tests/numerics_test.py
new file mode 100644
index 0000000000..8cb2fe2f8b
--- /dev/null
+++ b/tensorflow/python/kernel_tests/numerics_test.py
@@ -0,0 +1,91 @@
+"""Tests for tensorflow.ops.numerics."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import control_flow_ops
+
+
+class VerifyTensorAllFiniteTest(tf.test.TestCase):
+
+ def testVerifyTensorAllFiniteSucceeds(self):
+ x_shape = [5, 4]
+ x = np.random.random_sample(x_shape).astype(np.float32)
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ t = tf.constant(x, shape=x_shape, dtype=tf.float32)
+ t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.")
+ self.assertAllClose(x, t_verified.eval())
+
+ def testVerifyTensorAllFiniteFails(self):
+ x_shape = [5, 4]
+ x = np.random.random_sample(x_shape).astype(np.float32)
+ my_msg = "Input is not a number."
+
+ # Test NaN.
+ x[0] = np.nan
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ with self.assertRaisesOpError(my_msg):
+ t = tf.constant(x, shape=x_shape, dtype=tf.float32)
+ t_verified = tf.verify_tensor_all_finite(t, my_msg)
+ t_verified.eval()
+
+ # Test Inf.
+ x[0] = np.inf
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ with self.assertRaisesOpError(my_msg):
+ t = tf.constant(x, shape=x_shape, dtype=tf.float32)
+ t_verified = tf.verify_tensor_all_finite(t, my_msg)
+ t_verified.eval()
+
+
+class NumericsTest(tf.test.TestCase):
+
+ def testInf(self):
+ for use_gpu in [True, False]:
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t1 = tf.constant(1.0)
+ t2 = tf.constant(0.0)
+ a = tf.div(t1, t2)
+ check = tf.add_check_numerics_ops()
+ a = control_flow_ops.with_dependencies([check], a)
+ with self.assertRaisesOpError("Inf"):
+ a.eval()
+
+ def testNaN(self):
+ for use_gpu in [True, False]:
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t1 = tf.constant(0.0)
+ t2 = tf.constant(0.0)
+ a = tf.div(t1, t2)
+ check = tf.add_check_numerics_ops()
+ a = control_flow_ops.with_dependencies([check], a)
+ with self.assertRaisesOpError("NaN"):
+ a.eval()
+
+ def testBoth(self):
+ for use_gpu in [True, False]:
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t1 = tf.constant([1.0, 0.0])
+ t2 = tf.constant([0.0, 0.0])
+ a = tf.div(t1, t2)
+ check = tf.add_check_numerics_ops()
+ a = control_flow_ops.with_dependencies([check], a)
+ with self.assertRaisesOpError("Inf and NaN"):
+ a.eval()
+
+ def testPassThrough(self):
+ for use_gpu in [True, False]:
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
+ t1 = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
+ checked = tf.check_numerics(t1, message="pass through test")
+ value = checked.eval()
+ self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
+ self.assertEqual([2, 3], checked.get_shape())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/pack_op_test.py b/tensorflow/python/kernel_tests/pack_op_test.py
new file mode 100644
index 0000000000..5f3b1823c0
--- /dev/null
+++ b/tensorflow/python/kernel_tests/pack_op_test.py
@@ -0,0 +1,47 @@
+"""Functional tests for Pack Op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class PackOpTest(tf.test.TestCase):
+
+ def testSimple(self):
+ np.random.seed(7)
+ for use_gpu in False, True:
+ with self.test_session(use_gpu=use_gpu):
+ for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
+ data = np.random.randn(*shape)
+ # Convert [data[0], data[1], ...] separately to tensorflow
+ xs = map(tf.constant, data)
+ # Pack back into a single tensorflow tensor
+ c = tf.pack(xs)
+ self.assertAllEqual(c.eval(), data)
+
+ def testGradients(self):
+ np.random.seed(7)
+ for use_gpu in False, True:
+ for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
+ data = np.random.randn(*shape)
+ shapes = [shape[1:]] * shape[0]
+ with self.test_session(use_gpu=use_gpu):
+ xs = map(tf.constant, data)
+ c = tf.pack(xs)
+ err = gradient_checker.ComputeGradientError(xs, shapes, c, shape)
+ self.assertLess(err, 1e-6)
+
+ def testZeroSize(self):
+ # Verify that pack doesn't crash for zero size inputs
+ for use_gpu in False, True:
+ with self.test_session(use_gpu=use_gpu):
+ for shape in (0,), (3,0), (0, 3):
+ x = np.zeros((2,) + shape)
+ p = tf.pack(list(x)).eval()
+ self.assertAllEqual(p, x)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/pad_op_test.py b/tensorflow/python/kernel_tests/pad_op_test.py
new file mode 100644
index 0000000000..113aeb1ccf
--- /dev/null
+++ b/tensorflow/python/kernel_tests/pad_op_test.py
@@ -0,0 +1,140 @@
+"""Tests for tensorflow.ops.nn_ops.Pad."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class PadOpTest(tf.test.TestCase):
+
+ def _npPad(self, inp, paddings):
+ return np.pad(inp, paddings, mode="constant")
+
+ def testNpPad(self):
+ self.assertAllClose(
+ np.array([[0, 0, 0, 0, 0, 0],
+ [0, 3, 3, 0, 0, 0],
+ [0, 4, 4, 0, 0, 0],
+ [0, 5, 5, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]]),
+ self._npPad(np.array([[3, 3], [4, 4], [5, 5]]), [[1, 2], [1, 3]]))
+
+ def _testPad(self, np_inputs, paddings, use_gpu=False):
+ np_val = self._npPad(np_inputs, paddings)
+ with self.test_session(use_gpu=use_gpu):
+ tf_val = tf.pad(np_inputs, paddings)
+ out = tf_val.eval()
+ self.assertAllClose(np_val, out)
+ self.assertShapeEqual(np_val, tf_val)
+
+ def _testGradient(self, x, a):
+ with self.test_session():
+ inx = tf.convert_to_tensor(x)
+ xs = list(x.shape)
+ ina = tf.convert_to_tensor(a)
+ y = tf.pad(inx, ina)
+ # Expected y's shape to be:
+ ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
+ jacob_t, jacob_n = gc.ComputeGradient(inx, xs, y, ys, x_init_value=x)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
+
+ def _testAll(self, np_inputs, paddings):
+ self._testPad(np_inputs, paddings, use_gpu=False)
+ self._testPad(np_inputs, paddings, use_gpu=True)
+ if np_inputs.dtype == np.float32:
+ self._testGradient(np_inputs, paddings)
+
+ def testInputDims(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.pad(
+ tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
+ tf.reshape([1, 2], shape=[1, 2]))
+
+ def testPaddingsDim(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.pad(
+ tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1, 2], shape=[2]))
+
+ def testPaddingsDim2(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.pad(
+ tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1, 2], shape=[2, 1]))
+
+ def testPaddingsDim3(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.pad(
+ tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1, 2], shape=[1, 2]))
+
+ def testPaddingsDim4(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.pad(
+ tf.reshape([1, 2], shape=[1, 2]),
+ tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
+
+ def testPaddingsNonNegative(self):
+ with self.test_session():
+ with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+ tf.pad(
+ tf.constant([1], shape=[1]),
+ tf.constant([-1, 0], shape=[1, 2]))
+
+ def testPaddingsNonNegative2(self):
+ with self.test_session():
+ with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+ tf.pad(
+ tf.constant([1], shape=[1]),
+ tf.constant([-1, 0], shape=[1, 2]))
+
+ def testIntTypes(self):
+ # TODO(mdevin): Figure out why the padding tests do not work on GPU
+ # for int types and rank > 2.
+ for t in [np.int32, np.int64]:
+ self._testPad((np.random.rand(4, 3, 3) * 100).astype(t),
+ [[1, 0], [2, 3], [0, 2]])
+
+ def testFloatTypes(self):
+ for t in [np.float32, np.float64]:
+ self._testAll(np.random.rand(2, 5).astype(t),
+ [[1, 0], [2, 0]])
+
+ def testShapeFunctionEdgeCases(self):
+ # Unknown paddings shape.
+ inp = tf.constant(0.0, shape=[4, 4, 4, 4])
+ padded = tf.pad(inp, tf.placeholder(tf.int32))
+ self.assertEqual([None, None, None, None], padded.get_shape().as_list())
+
+ # Unknown input shape.
+ inp = tf.placeholder(tf.float32)
+ padded = tf.pad(inp, [[2, 2], [2, 2]])
+ self.assertEqual([None, None], padded.get_shape().as_list())
+
+ # Unknown input and paddings shape.
+ inp = tf.placeholder(tf.float32)
+ padded = tf.pad(inp, tf.placeholder(tf.int32))
+ self.assertAllEqual(None, padded.get_shape().ndims)
+
+ def testScalars(self):
+ paddings = np.zeros((0, 2), dtype=np.int32)
+ inp = np.asarray(7)
+ for use_gpu in False, True:
+ with self.test_session(use_gpu=use_gpu):
+ tf_val = tf.pad(inp, paddings)
+ out = tf_val.eval()
+ self.assertAllClose(inp, out)
+ self.assertShapeEqual(inp, tf_val)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/parsing_ops_test.py b/tensorflow/python/kernel_tests/parsing_ops_test.py
new file mode 100644
index 0000000000..fba7c705fb
--- /dev/null
+++ b/tensorflow/python/kernel_tests/parsing_ops_test.py
@@ -0,0 +1,414 @@
+"""Tests for tensorflow.ops.parsing_ops."""
+
+import itertools
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+# Helpers for creating Example objects
+example = tf.train.Example
+feature = tf.train.Feature
+features = lambda d: tf.train.Features(feature=d)
+bytes_feature = lambda v: feature(bytes_list=tf.train.BytesList(value=v))
+int64_feature = lambda v: feature(int64_list=tf.train.Int64List(value=v))
+float_feature = lambda v: feature(float_list=tf.train.FloatList(value=v))
+
+
+def flatten(list_of_lists):
+ """Flatten one level of nesting."""
+ return itertools.chain.from_iterable(list_of_lists)
+
+
+def flatten_values_tensors_or_sparse(tensors_list):
+ """Flatten each SparseTensor object into 3 Tensors for session.run()."""
+ return list(flatten([[v.indices, v.values, v.shape]
+ if isinstance(v, tf.SparseTensor) else [v]
+ for v in tensors_list]))
+
+
+def _compare_output_to_expected(
+ tester, dict_tensors, expected_tensors, flat_output):
+ tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
+
+ i = 0 # Index into the flattened output of session.run()
+ for k, v in dict_tensors.iteritems():
+ expected_v = expected_tensors[k]
+ tf.logging.info("Comparing key: %s", k)
+ if isinstance(v, tf.SparseTensor):
+ # Three outputs for SparseTensor : indices, values, shape.
+ tester.assertEqual([k, 3], [k, len(expected_v)])
+ tester.assertAllEqual(flat_output[i], expected_v[0])
+ tester.assertAllEqual(flat_output[i + 1], expected_v[1])
+ tester.assertAllEqual(flat_output[i + 2], expected_v[2])
+ i += 3
+ else:
+ # One output for standard Tensor.
+ tester.assertAllEqual(flat_output[i], expected_v)
+ i += 1
+
+
+class ParseExampleTest(tf.test.TestCase):
+
+ def _test(self, kwargs, expected_values=None, expected_err_re=None):
+ with self.test_session() as sess:
+ # Pull out some keys to check shape inference
+ serialized = kwargs["serialized"]
+ dense_keys = kwargs["dense_keys"] if "dense_keys" in kwargs else []
+ sparse_keys = kwargs["sparse_keys"] if "sparse_keys" in kwargs else []
+ dense_shapes = kwargs["dense_shapes"] if "dense_shapes" in kwargs else []
+
+ # Returns dict w/ Tensors and SparseTensors
+ out = tf.parse_example(**kwargs)
+
+ # Check shapes; if serialized is a Tensor we need its size to
+ # properly check.
+ batch_size = (
+ serialized.eval().size if isinstance(serialized, tf.Tensor)
+ else np.asarray(serialized).size)
+ self.assertEqual(len(dense_keys), len(dense_shapes))
+ for (k, s) in zip(dense_keys, dense_shapes):
+ self.assertEqual(tuple(out[k].get_shape().as_list()), (batch_size,) + s)
+ for k in sparse_keys:
+ self.assertEqual(tuple(out[k].indices.get_shape().as_list()), (None, 2))
+ self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
+ self.assertEqual(tuple(out[k].shape.get_shape().as_list()), (2,))
+
+ # Check values
+ result = flatten_values_tensors_or_sparse(out.values()) # flatten values
+ if expected_err_re is None:
+ tf_result = sess.run(result)
+ _compare_output_to_expected(self, out, expected_values, tf_result)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ sess.run(result)
+
+ def testEmptySerializedWithAllDefaults(self):
+ dense_keys = ["a", "b", "c"]
+ dense_shapes = [(1, 3), (3, 3), (2,)]
+ dense_types = [tf.int64, tf.string, tf.float32]
+ dense_defaults = {
+ "a": [0, 42, 0],
+ "b": np.random.rand(3, 3).astype(np.str),
+ "c": np.random.rand(2).astype(np.float32),
+ }
+
+ expected_st_a = ( # indices, values, shape
+ np.empty((0, 2), dtype=np.int64), # indices
+ np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
+ np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
+
+ expected_output = {
+ "st_a": expected_st_a,
+ "a": np.array(2 * [[dense_defaults["a"]]]),
+ "b": np.array(2 * [dense_defaults["b"]]),
+ "c": np.array(2 * [dense_defaults["c"]]),
+ }
+
+ self._test(
+ {
+ "names": np.empty((0,), dtype=np.str),
+ # empty serialized input Examples
+ "serialized": tf.convert_to_tensor(["", ""]),
+ "dense_defaults": dense_defaults,
+ "sparse_keys": ["st_a"],
+ "sparse_types": [tf.int64],
+ "dense_keys": dense_keys,
+ "dense_types": dense_types,
+ "dense_shapes": dense_shapes
+ }, expected_output)
+
+ def testEmptySerializedWithoutDefaultsShouldFail(self):
+ dense_shapes = [(1, 3), (3, 3), (2,)]
+ dense_defaults = {
+ "a": [0, 42, 0],
+ "b": np.random.rand(3, 3).astype(np.str),
+ # Feature "c" is missing, since there's gaps it will cause failure.
+ }
+ self._test(
+ {
+ "serialized": ["", ""], # empty serialized input Examples
+ "names": ["in1", "in2"],
+ "dense_defaults": dense_defaults,
+ "sparse_keys": ["st_a"],
+ "sparse_types": [tf.int64],
+ "dense_keys": ["a", "b", "c"],
+ "dense_types": [tf.int64, tf.string, tf.float32],
+ "dense_shapes": dense_shapes
+ },
+ expected_err_re="Name: in1, Feature: c is required")
+
+ def testDenseNotMatchingShapeShouldFail(self):
+ dense_shapes = [(1, 3)]
+ dense_defaults = {
+ # no default!
+ }
+
+ original = [
+ example(features=features({
+ "a": float_feature([1, 1, 3]),
+ })),
+ example(features=features({
+ "a": float_feature([-1, -1]),
+ }))
+ ]
+
+ names = ["passing", "failing"]
+ serialized = [m.SerializeToString() for m in original]
+
+ self._test(
+ {
+ "serialized": tf.convert_to_tensor(serialized),
+ "names": names,
+ "dense_defaults": dense_defaults,
+ "dense_keys": ["a"],
+ "dense_types": [tf.float32],
+ "dense_shapes": dense_shapes,
+ },
+ expected_err_re="Name: failing, Key: a. Number of float values")
+
+ def testSerializedContainingSparse(self):
+ original = [
+ example(features=features({
+ "st_c": float_feature([3, 4])
+ })),
+ example(features=features({
+ "st_c": float_feature([]), # empty float list
+ })),
+ example(features=features({
+ "st_d": feature(), # feature with nothing in it
+ })),
+ example(features=features({
+ "st_c": float_feature([1, 2, -1]),
+ "st_d": bytes_feature(["hi"])
+ }))
+ ]
+
+ serialized = [m.SerializeToString() for m in original]
+
+ expected_st_c = ( # indices, values, shape
+ np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
+ np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
+ np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
+
+ expected_st_d = ( # indices, values, shape
+ np.array([[3, 0]], dtype=np.int64),
+ np.array(["hi"], dtype=np.str),
+ np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
+
+ expected_output = {
+ "st_c": expected_st_c,
+ "st_d": expected_st_d,
+ }
+
+ self._test(
+ {
+ "serialized": tf.convert_to_tensor(serialized),
+ "sparse_keys": ["st_c", "st_d"],
+ "sparse_types": [tf.float32, tf.string],
+ }, expected_output)
+
+ def testSerializedContainingDense(self):
+ original = [
+ example(features=features({
+ "a": float_feature([1, 1]),
+ "b": bytes_feature(["b0_str"]),
+ })),
+ example(features=features({
+ "a": float_feature([-1, -1]),
+ "b": bytes_feature(["b1"]),
+ }))
+ ]
+
+ serialized = [m.SerializeToString() for m in original]
+
+ dense_shapes = [(1, 2, 1), (1, 1, 1, 1)]
+
+ expected_output = {
+ "a": np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
+ "b": np.array(["b0_str", "b1"], dtype=np.str).reshape(2, 1, 1, 1, 1),
+ }
+
+ # No defaults, values required
+ self._test(
+ {
+ "serialized": tf.convert_to_tensor(serialized),
+ "dense_keys": ["a", "b"],
+ "dense_types": [tf.float32, tf.string],
+ "dense_shapes": dense_shapes,
+ }, expected_output)
+
+ def testSerializedContainingDenseScalar(self):
+ original = [
+ example(features=features({
+ "a": float_feature([1]),
+ })),
+ example(features=features({}))
+ ]
+
+ serialized = [m.SerializeToString() for m in original]
+
+ expected_output = {
+ "a": np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
+ }
+
+ self._test(
+ {
+ "serialized": tf.convert_to_tensor(serialized),
+ "dense_defaults": {"a": -1},
+ "dense_shapes": [(1,)],
+ "dense_keys": ["a"],
+ "dense_types": [tf.float32],
+ }, expected_output)
+
+ def testSerializedContainingDenseWithDefaults(self):
+ original = [
+ example(features=features({
+ "a": float_feature([1, 1]),
+ })),
+ example(features=features({
+ "b": bytes_feature(["b1"]),
+ }))
+ ]
+
+ serialized = [m.SerializeToString() for m in original]
+
+ dense_shapes = [(1, 2, 1), (1, 1, 1, 1)]
+ dense_types = [tf.float32, tf.string]
+ dense_defaults = {
+ "a": [3.0, -3.0],
+ "b": "tmp_str",
+ }
+
+ expected_output = {
+ "a": np.array([[1, 1], [3, -3]], dtype=np.float32).reshape(2, 1, 2, 1),
+ "b": np.array(["tmp_str", "b1"], dtype=np.str).reshape(2, 1, 1, 1, 1),
+ }
+
+ self._test(
+ {
+ "serialized": tf.convert_to_tensor(serialized),
+ "dense_defaults": dense_defaults,
+ "dense_keys": ["a", "b"],
+ "dense_types": dense_types,
+ "dense_shapes": dense_shapes,
+ }, expected_output)
+
+ def testSerializedContainingSparseAndDenseWithNoDefault(self):
+ dense_defaults = {
+ "a": [1, 2, 3],
+ "b": np.random.rand(3, 3).astype(np.str),
+ # Feature "c" must be provided
+ }
+ dense_shapes = [(1, 3), (3, 3), (2,)]
+
+ expected_st_a = ( # indices, values, shape
+ np.empty((0, 2), dtype=np.int64), # indices
+ np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
+ np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
+
+ original = [
+ example(features=features({
+ "c": float_feature([3, 4])
+ })),
+ example(features=features({
+ "c": float_feature([1, 2])
+ }))
+ ]
+
+ names = ["in1", "in2"]
+ serialized = [m.SerializeToString() for m in original]
+
+ expected_output = {
+ "st_a": expected_st_a,
+ "a": np.array(2 * [[dense_defaults["a"]]]),
+ "b": np.array(2 * [dense_defaults["b"]]),
+ "c": np.array([[3, 4], [1, 2]], dtype=np.float32),
+ }
+
+ self._test(
+ {
+ "names": names,
+ "serialized": tf.convert_to_tensor(serialized),
+ "dense_defaults": dense_defaults,
+ "sparse_keys": ["st_a"],
+ "sparse_types": [tf.int64],
+ "dense_keys": ["a", "b", "c"],
+ "dense_types": [tf.int64, tf.string, tf.float32],
+ "dense_shapes": dense_shapes
+ }, expected_output)
+
+
+class ParseSingleExampleTest(tf.test.TestCase):
+
+ def _test(self, kwargs, expected_values=None, expected_err_re=None):
+ with self.test_session() as sess:
+ # Pull out some keys to check shape inference
+ dense_keys = kwargs["dense_keys"] if "dense_keys" in kwargs else []
+ sparse_keys = kwargs["sparse_keys"] if "sparse_keys" in kwargs else []
+ dense_shapes = kwargs["dense_shapes"] if "dense_shapes" in kwargs else []
+
+ # Returns dict w/ Tensors and SparseTensors
+ out = tf.parse_single_example(**kwargs)
+
+ # Check shapes
+ self.assertEqual(len(dense_keys), len(dense_shapes))
+ for (k, s) in zip(dense_keys, dense_shapes):
+ self.assertEqual(tuple(out[k].get_shape()), s)
+ for k in sparse_keys:
+ self.assertEqual(tuple(out[k].indices.get_shape().as_list()), (None, 1))
+ self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
+ self.assertEqual(tuple(out[k].shape.get_shape().as_list()), (1,))
+
+ # Check values
+ result = flatten_values_tensors_or_sparse(out.values()) # flatten values
+ if expected_err_re is None:
+ tf_result = sess.run(result)
+ _compare_output_to_expected(self, out, expected_values, tf_result)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ sess.run(result)
+
+ def testSingleExampleWithSparseAndDense(self):
+ dense_types = [tf.int64, tf.string, tf.float32]
+ dense_shapes = [(1, 3), (3, 3), (2,)]
+ dense_defaults = {
+ "a": [1, 2, 3],
+ "b": np.random.rand(3, 3).astype(np.str),
+ # Feature "c" must be provided
+ }
+
+ original = example(features=features(
+ {"c": float_feature([3, 4]),
+ "st_a": float_feature([3.0, 4.0])}))
+
+ serialized = original.SerializeToString()
+
+ expected_st_a = (
+ np.array([[0], [1]], dtype=np.int64), # indices
+ np.array([3.0, 4.0], dtype=np.float32), # values
+ np.array([2], dtype=np.int64)) # shape: max_values = 2
+
+ expected_output = {
+ "st_a": expected_st_a,
+ "a": [dense_defaults["a"]],
+ "b": dense_defaults["b"],
+ "c": np.array([3, 4], dtype=np.float32),
+ }
+
+ self._test(
+ {
+ "names": "in1",
+ "serialized": tf.convert_to_tensor(serialized),
+ "dense_defaults": dense_defaults,
+ "dense_types": dense_types,
+ "sparse_keys": ["st_a"],
+ "sparse_types": [tf.float32],
+ "dense_keys": ["a", "b", "c"],
+ "dense_shapes": dense_shapes
+ }, expected_output)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
new file mode 100644
index 0000000000..b9a65726ee
--- /dev/null
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -0,0 +1,819 @@
+"""Functional tests for pooling operations."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+from tensorflow.python.ops import gen_nn_ops
+
+
+def GetInceptionMaxPoolShapes():
+ """Iterator for some of the max pool ops in the Inception 2015 model.
+
+ Yields:
+ Tuple (name, input_size, filter_size, out_size, strides, padding)
+ """
+ names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
+ input_sizes = [[32, 71, 71, 192],
+ [32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]]
+ filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1],
+ [1, 3, 3, 1], [1, 3, 3, 1]]
+ output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288],
+ [32, 8, 8, 1248], [32, 8, 8, 2048]]
+ strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1],
+ [1, 1, 1, 1]]
+ paddings = ["VALID", "VALID", "VALID", "SAME"]
+ for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
+ strides, paddings):
+ yield n, i, f, o, s, p
+
+
+class PoolingTest(tf.test.TestCase):
+
+ def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
+ expected, use_gpu):
+ """Verifies the output values of the pooling function.
+
+ Args:
+ pool_func: Function to be called, co.MaxPool, co.AvgPool,
+ or the Lua version.
+ input_sizes: Input tensor dimensions.
+ ksize: The kernel size dimensions
+ strides: The stride dimensions
+ padding: Padding type.
+ expected: An array containing the expected operation outputs.
+ use_gpu: Whether we are running on GPU.
+ """
+ total_size = 1
+ for s in input_sizes:
+ total_size *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x = [f * 1.0 for f in range(1, total_size + 1)]
+ with self.test_session(use_gpu=use_gpu) as sess:
+ t = tf.constant(x, shape=input_sizes)
+ t = pool_func(t, ksize=ksize, strides=strides, padding=padding)
+ actual = t.eval()
+ self.assertAllClose(expected, actual.flatten())
+ self.assertShapeEqual(actual, t)
+
+ def _testAvgPoolValidPadding(self, use_gpu):
+ expected_output = [7.0, 8.0, 9.0]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="VALID",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testAvgPoolSamePadding(self, use_gpu):
+ expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
+ # input is:
+ # [1.0, 2.0
+ # 3.0 4.0]
+ #
+ # Window of [x, x] should do:
+ # [avg(1.0, 2.0), avg(2.0, padded0),
+ # avg(3.0, 4.0), avg(4.0, padded0)]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
+ ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu)
+
+ # Window of [x,
+ # x] should do:
+ # [avg(1.0, 3.0), avg(2.0, 4.0)
+ # avg(3.0, padded0), avg(4.0, padded0)]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
+ ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu)
+
+ def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
+ ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[2.0, 3.0, 3.0, 4.0,
+ 6.0, 7.0, 7.0, 8.0,
+ 10.0, 11.0, 11.0, 12.0,
+ 14.0, 15.0, 15.0, 16.0],
+ use_gpu=use_gpu)
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
+ ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[3.0, 4.0, 5.0, 6.0,
+ 5.0, 6.0, 7.0, 8.0,
+ 11.0, 12.0, 13.0, 14.0,
+ 13.0, 14.0, 15.0, 16.0],
+ use_gpu=use_gpu)
+
+ def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
+ padding="VALID",
+ expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
+ use_gpu=use_gpu)
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
+ padding="VALID",
+ expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
+ use_gpu=use_gpu)
+
+ def _testAvgPoolSamePadding4(self, use_gpu):
+ expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0,
+ 44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testAvgPoolSamePaddingPacket4(self, use_gpu):
+ expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0,
+ 45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
+ ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testAvgPoolSamePaddingPacket8(self, use_gpu):
+ expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0,
+ 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0,
+ 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0,
+ 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0,
+ 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0,
+ 219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0,
+ 235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0,
+ 247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0,
+ 331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0,
+ 347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0,
+ 363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0,
+ 375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0,
+ 427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0,
+ 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0,
+ 459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0,
+ 471.0, 472.0, 473.0, 474.0, 475.0, 476.0]
+ self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8],
+ ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def testAvgPooling(self):
+ for use_gpu in True, False:
+ self._testAvgPoolValidPadding(use_gpu)
+ self._testAvgPoolSamePadding(use_gpu)
+ self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
+ self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
+ self._testAvgPoolValidPaddingUnevenStride(use_gpu)
+ self._testAvgPoolSamePadding4(use_gpu)
+ self._testAvgPoolSamePaddingPacket4(use_gpu)
+ self._testAvgPoolSamePaddingPacket8(use_gpu)
+
+ def _testMaxPoolValidPadding(self, use_gpu):
+ expected_output = [13.0, 14.0, 15.0]
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="VALID",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testMaxPoolSamePadding(self, use_gpu):
+ expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
+ # input is:
+ # [1.0, 2.0
+ # 3.0 4.0]
+ #
+ # Window of [x, x] should do:
+ #
+ # [max(1.0, 2.0), max(2.0, padded0),
+ # max(3.0, 4.0), max(4.0, padded0)]
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1],
+ ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
+ padding="SAME",
+ expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu)
+
+ def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
+ ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
+ padding="VALID",
+ expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
+ use_gpu=use_gpu)
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
+ padding="VALID",
+ expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
+ use_gpu=use_gpu)
+
+ def _testMaxPoolSamePaddingPacket4(self, use_gpu):
+ expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0,
+ 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0]
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4],
+ ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def _testMaxPoolSamePaddingPacket8(self, use_gpu):
+ expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0,
+ 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0,
+ 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0,
+ 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0,
+ 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
+ 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0,
+ 305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0,
+ 313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0,
+ 401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0,
+ 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
+ 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0,
+ 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0,
+ 465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0,
+ 481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0,
+ 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
+ 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0]
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8],
+ ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
+ padding="SAME",
+ expected=expected_output, use_gpu=use_gpu)
+
+ def testMaxPooling(self):
+ for use_gpu in True, False:
+ self._testMaxPoolValidPadding(use_gpu)
+ self._testMaxPoolSamePadding(use_gpu)
+ self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
+ self._testMaxPoolValidPaddingUnevenStride(use_gpu)
+ self._testMaxPoolSamePaddingPacket4(use_gpu)
+ self._testMaxPoolSamePaddingPacket8(use_gpu)
+
+ # Tests for DepthwiseMaxPooling on CPU only.
+ def testDepthwiseMaxPool1x1DepthWindow1(self):
+ # input is:
+ # [1.0, ..., 10.0] along depth,
+ #
+ # We maxpool by depth in patches of 2.
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10],
+ ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
+ padding="SAME",
+ expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False)
+
+ def testDepthwiseMaxPool2x2DepthWindow3(self):
+ # input is:
+ #
+ # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
+ # output. Each node has contiguous values, so the depthwise max
+ # should be multiples of 3.0.
+ self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6],
+ ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3],
+ padding="SAME",
+ expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
+ use_gpu=False)
+
+ def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides,
+ error_msg, use_gpu=False):
+ t = tf.constant(1.0, shape=in_size)
+ with self.assertRaisesRegexp(ValueError, error_msg):
+ t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME")
+
+ def testDepthwiseMaxPoolInvalidConfigs(self):
+ self._testDepthwiseMaxPoolInvalidConfig(
+ [1, 2, 2, 4], [1, 2, 2, 2],
+ [1, 1, 1, 2], "exactly one of pooling across depth")
+ self._testDepthwiseMaxPoolInvalidConfig(
+ [1, 2, 2, 4], [1, 1, 1, 2],
+ [1, 1, 1, 1], "depth window to equal the depth stride")
+ self._testDepthwiseMaxPoolInvalidConfig(
+ [1, 2, 2, 4], [1, 1, 1, 3],
+ [1, 1, 1, 3], "evenly divide")
+ if tf.test.IsBuiltWithCuda():
+ with self.test_session(use_gpu=True):
+ t = tf.constant(1.0, shape=[1, 2, 2, 4])
+ with self.assertRaisesOpError("for CPU devices"):
+ tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
+ padding="SAME").eval()
+
+ # The following are tests that verify that the CPU and GPU implementations
+ # produce the same resuts.
+ def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
+ tensor_input = np.random.rand(*input_shape).astype(np.float32)
+ with self.test_session(use_gpu=True):
+ t = tf.constant(tensor_input, shape=input_shape)
+ out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
+ gpu_val = out_op.eval()
+ with self.test_session(use_gpu=False):
+ t = tf.constant(tensor_input, shape=input_shape)
+ out_op = tf.nn.max_pool(t, ksize, strides, padding)
+ cpu_val = out_op.eval()
+ self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
+
+ def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
+ padding):
+ # Generate numbers in a narrow range, so that there are many duplicates
+ # in the input.
+ tensor_input = np.random.random_integers(0, 3,
+ input_shape).astype(np.float32)
+ tensor_output = np.random.rand(*output_shape).astype(np.float32)
+ with self.test_session(use_gpu=True):
+ t = tf.constant(tensor_input, shape=input_shape)
+ _, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
+ argmax = argmax_op.eval()
+ grad_in = tf.constant(tensor_output, shape=output_shape)
+ out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
+ ksize, strides, padding)
+ gpu_val = out_op.eval()
+ self.assertShapeEqual(gpu_val, out_op)
+ with self.test_session(use_gpu=False):
+ t = tf.constant(tensor_input, shape=input_shape)
+ out_op = tf.nn.max_pool(t, ksize, strides, padding)
+ orig_out = out_op.eval()
+ grad_in = tf.constant(tensor_output, shape=output_shape)
+ out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize,
+ strides, padding)
+ cpu_val = out_op.eval()
+ self.assertShapeEqual(cpu_val, out_op)
+ self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
+
+ def testMaxPoolingWithArgmax(self):
+ # MaxPoolWithArgMax is implemented only on GPU.
+ if not tf.test.IsBuiltWithCuda():
+ return
+ tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
+ with self.test_session(use_gpu=True) as sess:
+ t = tf.constant(tensor_input, shape=[1, 3, 3, 1])
+ out_op, argmax_op = tf.nn.max_pool_with_argmax(t,
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 1, 1],
+ Targmax=tf.int64,
+ padding="VALID")
+ out, argmax = sess.run([out_op, argmax_op])
+ self.assertShapeEqual(out, out_op)
+ self.assertShapeEqual(argmax, argmax_op)
+ self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0])
+ self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
+
+ def testMaxPoolingGradWithArgmax(self):
+ # MaxPoolWithArgMax is implemented only on GPU.
+ if not tf.test.IsBuiltWithCuda():
+ return
+ orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
+ tensor_input = [11.0, 12.0, 13.0, 14.0]
+ tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
+ with self.test_session(use_gpu=True) as sess:
+ orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1])
+ t = tf.constant(tensor_input, shape=[1, 2, 2, 1])
+ argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1],
+ dtype=tf.int64)
+ out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax,
+ ksize=[1, 2, 2, 1],
+ strides=[1, 1, 1, 1],
+ padding="VALID")
+ out = out_op.eval().flatten()
+ self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0,
+ 14.0, 0.0, 0.0, 0.0])
+
+ def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes,
+ window_rows, window_cols, row_stride,
+ col_stride, padding, use_gpu,
+ x_init_value=None):
+ """Verifies the gradients of the avg pooling function.
+
+ Args:
+ pool_func: Function to be called, co.MaxPool, co.AvgPool,
+ or the Lua version.
+ input_sizes: Input tensor dimensions.
+ output_sizes: Output tensor dimensions.
+ window_rows: kernel size in row dim
+ window_cols: kernel size in col dim
+ row_stride: Row Stride.
+ col_stride: Col Stride.
+ padding: Padding type.
+ use_gpu: whether we are running on GPU
+ x_init_value: Values to be passed to the gradient checker.
+ """
+ total_size = 1
+ for s in input_sizes:
+ total_size *= s
+ # Initializes the input tensor with array containing incrementing
+ # numbers from 1.
+ x = [f * 1.0 for f in range(1, total_size + 1)]
+ with self.test_session(use_gpu=use_gpu):
+ input_tensor = tf.constant(x, shape=input_sizes, name="input")
+ if pool_func == tf.nn.avg_pool:
+ func_name = "avg_pool"
+ err_margin = 1e-4
+ else:
+ if x_init_value is None:
+ x_init_value = np.asfarray(
+ np.arange(1, total_size + 1),
+ dtype=np.float32).reshape(input_sizes)
+ func_name = "max_pool"
+ err_margin = 1e-3
+ t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1],
+ strides=[1, row_stride, col_stride, 1],
+ padding=padding, name=func_name)
+ err = gc.ComputeGradientError(
+ input_tensor, input_sizes, t, output_sizes,
+ x_init_value=x_init_value, delta=1e-2)
+ print "%s gradient error = " % func_name, err
+ self.assertLess(err, err_margin)
+
+ def _testMaxPoolGradValidPadding1_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[1, 3, 3, 1],
+ output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1,
+ col_stride=1, padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradValidPadding2_1_6(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 6, 6, 3],
+ output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1,
+ col_stride=1, padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradValidPadding2_1_7(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 7, 7, 3],
+ output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1,
+ col_stride=1, padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradValidPadding2_2(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
+ col_stride=2, padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradSamePadding1_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def _testMaxPoolGradSamePadding2_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def _testMaxPoolGradSamePadding2_2(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
+ col_stride=2, padding="SAME", use_gpu=use_gpu)
+
+ def _testMaxPoolGradSamePadding3_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.max_pool, input_sizes=[1, 7, 7, 1],
+ output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def testMaxPoolGrad(self):
+ for use_gpu in True, False:
+ self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu)
+ self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu)
+ self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu)
+ self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu)
+ self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu)
+ self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu)
+ self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu)
+ self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu)
+
+ def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
+ window_cols, row_stride, col_stride, padding):
+ """Max Pooling Gradient.
+
+ Args:
+ orig_input: A float Tensor. The original input tensor.
+ orig_output: A float Tensor. The original output tensor.
+ grad: A float Tensor.
+ The 4D (batch x rows x cols x depth) output backprop.
+ window_rows: integer. Kernel size along rows dimension.
+ window_cols: integer. Kernel size along cols dimension.
+ row_stride: integer. Stride along rows dimension
+ col_stride: integer. Stride along cols dimension
+ padding: PoolingOpDef.Padding. Padding type.
+
+ Returns:
+ A Tensor.
+ """
+ return gen_nn_ops._max_pool_grad(
+ orig_input, orig_output, grad,
+ [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
+ padding)
+
+ def _testMaxPoolGradDirect(self, input_data, output_backprop,
+ expected_input_backprop, input_sizes, output_sizes,
+ window_rows, window_cols, row_stride, col_stride,
+ padding, use_gpu):
+ with self.test_session(use_gpu=use_gpu) as sess:
+ input_tensor = tf.constant(input_data, shape=input_sizes)
+ output_tensor = tf.nn.max_pool(
+ input_tensor, [1, window_rows, window_cols, 1],
+ [1, row_stride, col_stride, 1], padding)
+ output_backprop_tensor = tf.constant(output_backprop,
+ shape=output_sizes)
+
+ input_backprop_tensor = self._MaxPoolGrad(
+ input_tensor, output_tensor, output_backprop_tensor,
+ window_rows, window_cols, row_stride, col_stride, padding)
+
+ actual_input_backprop = input_backprop_tensor.eval()
+ self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
+ actual_input_backprop = actual_input_backprop.flatten()
+ actual_input_backprop = self._GetNdArray(actual_input_backprop)
+
+ actual_output = output_tensor.eval().flatten()
+ actual_output = self._GetNdArray(actual_output)
+
+ self.assertAllClose(expected_input_backprop, actual_input_backprop,
+ rtol=1e-6, atol=1e-6)
+
+ def _testMaxPoolGradDirect1_1(self):
+ input_data = [
+ 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0]
+ output_backprop = [
+ 11.0, 12.0, 13.0,
+ 15.0, 16.0, 17.0,
+ 19.0, 20.0, 21.0]
+ expected_input_backprop = [
+ 11.0, 12.0, 13.0, 0.0,
+ 15.0, 16.0, 17.0, 0.0,
+ 19.0, 20.0, 21.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+
+ for use_gpu in True, False:
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradDirect1_2(self):
+ input_data = [
+ 1.0, 0.0, 1.0, 0.0,
+ 0.0, 1.0, 0.0, 1.0,
+ 1.0, 0.0, 1.0, 0.0,
+ 0.0, 1.0, 0.0, 1.0]
+ output_backprop = [
+ 11.0, 12.0, 13.0,
+ 15.0, 16.0, 17.0,
+ 19.0, 20.0, 21.0]
+ expected_input_backprop = [
+ 11.0, 0.0, 25.0, 0.0,
+ 0.0, 31.0, 0.0, 17.0,
+ 19.0, 0.0, 41.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+
+ for use_gpu in True, False:
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=use_gpu)
+
+ def _testMaxPoolGradDirect1_3(self):
+ input_data = [
+ 1.0, 0.0, 1.0, 0.0,
+ 0.0, 1.0, 0.0, 1.0,
+ 1.0, 0.0, 1.0, 0.0,
+ 0.0, 1.0, 0.0, 1.0,]
+ output_backprop = [
+ 11.0, 12.0, 13.0, 14.0,
+ 15.0, 16.0, 17.0, 18.0,
+ 19.0, 20.0, 21.0, 22.0,
+ 23.0, 24.0, 25.0, 26.0]
+ expected_input_backprop = [
+ 54, 0.0, 62, 0.0,
+ 0.0, 60, 0.0, 22.0,
+ 47, 0.0, 51, 0.0,
+ 0.0, 0.0, 0.0, 0.0,]
+
+ for use_gpu in True, False:
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1],
+ window_rows=3, window_cols=3, row_stride=1, col_stride=1,
+ padding="SAME", use_gpu=use_gpu)
+
+ def _testMaxPoolGradDirectWithNans2_1(self):
+ input_data = [float("nan")] * 16
+ output_backprop = [
+ 11.0, 12.0, 13.0,
+ 15.0, 16.0, 17.0,
+ 19.0, 20.0, 21.0]
+ # Test the CPU implementation, which propagates diffs in case of NaN
+ expected_input_backprop_tf_cpu = [
+ 11.0, 12.0, 13.0, 0.0,
+ 15.0, 16.0, 17.0, 0.0,
+ 19.0, 20.0, 21.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop_tf_cpu,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=False)
+
+ if not tf.test.IsBuiltWithCuda():
+ return
+
+ # Test the GPU implementation that uses cudnn for now.
+ # It does not propagate the diff in cases of NaNs
+ expected_input_backprop_cudnn = [
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop_cudnn,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=True)
+
+ def _testMaxPoolGradDirectWithNans2_2(self):
+ input_data = [float("nan")] * 16
+ output_backprop = [
+ float("nan"), 12.0, 13.0,
+ 15.0, float("nan"), 17.0,
+ 19.0, 20.0, float("nan")]
+ # Test the CPU implementation, which propagates diffs in case of NaN
+ expected_input_backprop_tf_cpu = [
+ float("nan"), 12.0, 13.0, 0.0,
+ 15.0, float("nan"), 17.0, 0.0,
+ 19.0, 20.0, float("nan"), 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop_tf_cpu,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=False)
+
+ if not tf.test.IsBuiltWithCuda():
+ return
+
+ # Test the GPU implementation that uses cudnn for now.
+ # It does not propagate the diff in cases of NaNs
+ expected_input_backprop_cudnn = [
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]
+ self._testMaxPoolGradDirect(
+ input_data, output_backprop, expected_input_backprop_cudnn,
+ input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
+ window_rows=2, window_cols=2, row_stride=1, col_stride=1,
+ padding="VALID", use_gpu=True)
+
+ def testMaxPoolGradDirect(self):
+ self._testMaxPoolGradDirect1_1()
+ self._testMaxPoolGradDirect1_2()
+ self._testMaxPoolGradDirect1_3()
+ self._testMaxPoolGradDirectWithNans2_1()
+ self._testMaxPoolGradDirectWithNans2_2()
+
+ def testAvgPoolGrad(self):
+ for use_gpu in False, True:
+ self._testAvgPoolGradValidPadding1_1(use_gpu)
+ self._testAvgPoolGradValidPadding2_1(use_gpu)
+ self._testAvgPoolGradValidPadding2_2(use_gpu)
+ self._testAvgPoolGradSamePadding1_1(use_gpu)
+ self._testAvgPoolGradSamePadding2_1(use_gpu)
+ self._testAvgPoolGradSamePadding2_2(use_gpu)
+ self._testAvgPoolGradSamePadding3_1(use_gpu)
+
+ def _testAvgPoolGradValidPadding1_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
+ output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1,
+ col_stride=1, padding="VALID", use_gpu=use_gpu)
+
+ def _testAvgPoolGradValidPadding2_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
+ output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1,
+ col_stride=1, padding="VALID", use_gpu=use_gpu)
+
+ def _testAvgPoolGradValidPadding2_2(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 2, 2, 3],
+ output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
+ col_stride=2, padding="VALID", use_gpu=use_gpu)
+
+ def _testAvgPoolGradSamePadding1_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def _testAvgPoolGradSamePadding2_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def _testAvgPoolGradSamePadding2_2(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
+ output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
+ col_stride=2, padding="SAME", use_gpu=use_gpu)
+
+ def _testAvgPoolGradSamePadding3_1(self, use_gpu):
+ self._ConstructAndTestGradient(
+ tf.nn.avg_pool, input_sizes=[1, 7, 7, 1],
+ output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
+ col_stride=1, padding="SAME", use_gpu=use_gpu)
+
+ def testShapeFunctionEdgeCases(self):
+ # All shapes unknown.
+ for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
+ p = tf.nn.max_pool(tf.placeholder(tf.float32),
+ ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
+ padding="SAME")
+ self.assertEqual([None, None, None, None], p.get_shape().as_list())
+ p, am = tf.nn.max_pool_with_argmax(
+ tf.placeholder(tf.float32),
+ ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
+ padding="SAME")
+ self.assertEqual([None, None, None, None], p.get_shape().as_list())
+ self.assertEqual([None, None, None, None], am.get_shape().as_list())
+
+ # Incorrect input shape.
+ for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
+ tf.nn.max_pool_with_argmax]:
+ with self.assertRaises(ValueError):
+ pool_func(tf.placeholder(tf.float32, shape=[1, 3]),
+ ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME")
+
+ # Illegal strides.
+ for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
+ tf.nn.max_pool_with_argmax]:
+ with self.assertRaisesRegexp(ValueError, "strides in the batch"):
+ pool_func(tf.placeholder(tf.float32),
+ ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME")
+ with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
+ tf.nn.avg_pool(tf.placeholder(tf.float32),
+ ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME")
+
+ # Filter larger than input.
+ for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
+ tf.nn.max_pool_with_argmax]:
+ with self.assertRaisesRegexp(ValueError,
+ "filter must not be larger than the input"):
+ pool_func(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME")
+ with self.assertRaisesRegexp(ValueError,
+ "filter must not be larger than the input"):
+ pool_func(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME")
+
+ # Stride larger than filter.
+ for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
+ tf.nn.max_pool_with_argmax]:
+ with self.assertRaisesRegexp(
+ ValueError, "stride must be less than or equal to filter"):
+ pool_func(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME")
+ with self.assertRaisesRegexp(
+ ValueError, "stride must be less than or equal to filter"):
+ pool_func(tf.placeholder(tf.float32,
+ shape=[32, 20, 20, 3]),
+ ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME")
+
+
+def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
+ def Test(self):
+ # MaxPoolWithArgMax is implemented only on GPU.
+ if not tf.test.IsBuiltWithCuda():
+ return
+ self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
+ return Test
+
+
+def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
+ def Test(self):
+ # MaxPoolWithArgMax is implemented only on GPU.
+ if not tf.test.IsBuiltWithCuda():
+ return
+ self._CompareMaxPoolingBk(input_size, output_size,
+ filter_size, strides, padding)
+ return Test
+
+
+if __name__ == "__main__":
+ for (name_, input_size_, filter_size_, output_size_, stride_,
+ padding_) in GetInceptionMaxPoolShapes():
+ setattr(PoolingTest, "testMaxPoolFwd_" + name_,
+ GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
+ setattr(PoolingTest, "testMaxPoolGrad_" + name_,
+ GetMaxPoolGradTest(input_size_, filter_size_, output_size_,
+ stride_, padding_))
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/random_ops_test.py b/tensorflow/python/kernel_tests/random_ops_test.py
new file mode 100644
index 0000000000..311f0e3e5e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/random_ops_test.py
@@ -0,0 +1,242 @@
+"""Tests for tensorflow.ops.random_ops."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class RandomNormalTest(tf.test.TestCase):
+
+ def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
+ def func():
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ rng = tf.random_normal(
+ [num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
+ ret = np.empty([10, num])
+ for i in xrange(10):
+ ret[i, :] = sess.run(rng)
+ return ret
+ return func
+
+ # Asserts that different trials (1000 samples per trial) is unlikely
+ # to see the same sequence of values. Will catch buggy
+ # implementations which uses the same random number seed.
+ def testDistinct(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
+ x = sampler()
+ y = sampler()
+ # Number of different samples.
+ count = (x == y).sum()
+ if count >= 10:
+ print "x = ", x
+ print "y = ", y
+ print "count = ", count
+ self.assertTrue(count < 10)
+
+ # Checks that the CPU and GPU implementation returns the same results,
+ # given the same random seed
+ def testCPUGPUMatch(self):
+ for dt in tf.float32, tf.float64:
+ results = {}
+ for use_gpu in [False, True]:
+ sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
+ results[use_gpu] = sampler()
+ self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
+
+ def testSeed(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ self.assertAllEqual(sx(), sy())
+
+ def testNoCSE(self):
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ shape = [2, 3, 4]
+ rnd1 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
+ rnd2 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
+ diff = rnd2 - rnd1
+ self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
+
+
+class TruncatedNormalTest(tf.test.TestCase):
+
+ def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
+ def func():
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ rng = tf.truncated_normal(
+ [num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
+ ret = np.empty([10, num])
+ for i in xrange(10):
+ ret[i, :] = sess.run(rng)
+ return ret
+ return func
+
+ # Asserts that different trials (1000 samples per trial) is unlikely
+ # to see the same sequence of values. Will catch buggy
+ # implementations which uses the same random number seed.
+ def testDistinct(self):
+ # NOTE: RandomParameters on GPU is not supported.
+ for use_gpu in [False]:
+ for dt in tf.float32, tf.float64:
+ sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
+ x = sampler()
+ y = sampler()
+ # Number of different samples.
+ count = (x == y).sum()
+ if count >= 10:
+ print "x = ", x
+ print "y = ", y
+ print "count = ", count
+ self.assertTrue(count < 10)
+
+ # Checks that the CPU and GPU implementation returns the same results,
+ # given the same random seed
+ def testCPUGPUMatch(self):
+ for dt in tf.float32, tf.float64:
+ results = {}
+ for use_gpu in [False, True]:
+ # We need a particular larger number of samples to test multiple rounds
+ # on GPU
+ sampler = self._Sampler(1000000, 0.0, 1.0, dt, use_gpu=use_gpu,
+ seed=12345)
+ results[use_gpu] = sampler()
+ self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
+
+ def testSeed(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ self.assertAllEqual(sx(), sy())
+
+ # The effective standard deviation of truncated normal is 85% of the
+ # requested one.
+ def testStdDev(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ stddev = 3.0
+ sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=use_gpu)
+ x = sampler()
+ print "std(x)", np.std(x), abs(np.std(x) / stddev - 0.85)
+ self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
+
+ def testNoCSE(self):
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ shape = [2, 3, 4]
+ rnd1 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
+ rnd2 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
+ diff = rnd2 - rnd1
+ self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
+
+
+class RandomUniformTest(tf.test.TestCase):
+
+ def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
+ def func():
+ with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
+ rng = tf.random_uniform(
+ [num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
+ ret = np.empty([10, num])
+ for i in xrange(10):
+ ret[i, :] = sess.run(rng)
+ return ret
+ return func
+
+ def testRange(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sampler = self._Sampler(1000, -2., 8., dt, use_gpu=use_gpu)
+ x = sampler()
+ self.assertTrue(-2 <= np.min(x))
+ self.assertTrue(np.max(x) <= 8)
+
+ # Asserts that different trials (1000 samples per trial) is unlikely
+ # to see the same sequence of values. Will catch buggy
+ # implementations which uses the same random number seed.
+ def testDistinct(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
+ x = sampler()
+ y = sampler()
+ count = (x == y).sum()
+ if count >= 10:
+ print "x = ", x
+ print "y = ", y
+ print "count = ", count
+ self.assertTrue(count < 10)
+
+ # Checks that the CPU and GPU implementation returns the same results,
+ # given the same random seed
+ def testCPUGPUMatch(self):
+ for dt in tf.float32, tf.float64:
+ results = {}
+ for use_gpu in [False, True]:
+ sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
+ results[use_gpu] = sampler()
+ self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
+
+ def testSeed(self):
+ for use_gpu in [False, True]:
+ for dt in tf.float32, tf.float64:
+ sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
+ self.assertAllEqual(sx(), sy())
+
+ def testNoCSE(self):
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ shape = [2, 3, 4]
+ rnd1 = tf.random_uniform(shape, 0.0, 1.0,
+ dtype=tf.float32)
+ rnd2 = tf.random_uniform(shape, 0.0, 1.0,
+ dtype=tf.float32)
+ diff = (rnd2 - rnd1).eval()
+ self.assertTrue(np.linalg.norm(diff) > 0.1)
+
+
+class RandomShapeTest(tf.test.TestCase):
+
+ def testRandomParameters(self):
+ # Fully known shape.
+ rnd1 = tf.truncated_normal([1, 2, 3])
+ self.assertEqual([1, 2, 3], rnd1.get_shape())
+ # Partially known shape.
+ rnd2 = tf.truncated_normal(tf.placeholder(tf.int32, shape=(3,)))
+ self.assertEqual([None, None, None], rnd2.get_shape().as_list())
+ # Unknown shape.
+ rnd3 = tf.truncated_normal(tf.placeholder(tf.int32))
+ self.assertIs(None, rnd3.get_shape().ndims)
+
+ def testRandomNormal(self):
+ # Fully known shape.
+ rnd1 = tf.random_normal([1, 2, 3])
+ self.assertEqual([1, 2, 3], rnd1.get_shape())
+ # Partially known shape.
+ rnd2 = tf.random_normal(tf.placeholder(tf.int32, shape=(3,)))
+ self.assertEqual([None, None, None], rnd2.get_shape().as_list())
+ # Unknown shape.
+ rnd3 = tf.random_normal(tf.placeholder(tf.int32))
+ self.assertIs(None, rnd3.get_shape().ndims)
+
+ def testRandomUniform(self):
+ # Fully known shape.
+ rnd1 = tf.random_uniform([1, 2, 3])
+ self.assertEqual([1, 2, 3], rnd1.get_shape())
+ # Partially known shape.
+ rnd2 = tf.random_uniform(
+ tf.placeholder(tf.int32, shape=(3,)))
+ self.assertEqual([None, None, None], rnd2.get_shape().as_list())
+ # Unknown shape.
+ rnd3 = tf.random_uniform(tf.placeholder(tf.int32))
+ self.assertIs(None, rnd3.get_shape().ndims)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/random_shuffle_queue_test.py b/tensorflow/python/kernel_tests/random_shuffle_queue_test.py
new file mode 100644
index 0000000000..343ffdcb76
--- /dev/null
+++ b/tensorflow/python/kernel_tests/random_shuffle_queue_test.py
@@ -0,0 +1,1054 @@
+"""Tests for tensorflow.ops.data_flow_ops.Queue."""
+import random
+import re
+import time
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class RandomShuffleQueueTest(tf.test.TestCase):
+
+ def setUp(self):
+ # Useful for debugging when a test times out.
+ super(RandomShuffleQueueTest, self).setUp()
+ tf.logging.error("Starting: %s", self._testMethodName)
+
+ def tearDown(self):
+ super(RandomShuffleQueueTest, self).tearDown()
+ tf.logging.error("Finished: %s", self._testMethodName)
+
+ def testEnqueue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 5, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ self.assertAllEqual(0, q.size().eval())
+ enqueue_op.run()
+ self.assertAllEqual(1, q.size().eval())
+
+ def testEnqueueWithShape(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shapes=tf.TensorShape([3, 2]))
+ enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
+ enqueue_correct_op.run()
+ self.assertAllEqual(1, q.size().eval())
+ with self.assertRaises(ValueError):
+ q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
+
+ def testEnqueueManyWithShape(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(
+ 10, 5, [tf.int32, tf.int32],
+ shapes=[(), (2,)])
+ q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
+ self.assertAllEqual(4, q.size().eval())
+
+ q2 = tf.RandomShuffleQueue(10, 5, tf.int32, shapes=tf.TensorShape([3]))
+ q2.enqueue(([1, 2, 3],))
+ q2.enqueue_many(([[1, 2, 3]],))
+
+ def testScalarShapes(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(
+ 10, 0, [tf.int32, tf.int32],
+ shapes=[(), (1,)])
+ q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
+ q.enqueue([9, [10]]).run()
+ dequeue_t = q.dequeue()
+ results = []
+ for _ in range(2):
+ a, b = sess.run(dequeue_t)
+ results.append((a, b))
+ a, b = sess.run(q.dequeue_many(3))
+ for i in range(3):
+ results.append((a[i], b[i]))
+ self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
+ results)
+
+ def testParallelEnqueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Run one producer thread for each element in elems.
+ def enqueue(enqueue_op):
+ sess.run(enqueue_op)
+ threads = [self.checkedThread(target=enqueue, args=(e,))
+ for e in enqueue_ops]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ # Dequeue every element using a single thread.
+ results = []
+ for _ in xrange(len(elems)):
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems, results)
+
+ def testParallelDequeue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ # Enqueue every element using a single thread.
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ # Run one consumer thread for each element in elems.
+ results = []
+
+ def dequeue():
+ results.append(sess.run(dequeued_t))
+ threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+ self.assertItemsEqual(elems, results)
+
+ def testDequeue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ vals = [dequeued_t.eval() for _ in xrange(len(elems))]
+ self.assertItemsEqual(elems, vals)
+
+ def testEnqueueAndBlockingDequeue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(3, 0, tf.float32)
+ elems = [10.0, 20.0, 30.0]
+ enqueue_ops = [q.enqueue((x,)) for x in elems]
+ dequeued_t = q.dequeue()
+
+ def enqueue():
+ # The enqueue_ops should run after the dequeue op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ for enqueue_op in enqueue_ops:
+ sess.run(enqueue_op)
+
+ results = []
+
+ def dequeue():
+ for _ in xrange(len(elems)):
+ results.append(sess.run(dequeued_t))
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ enqueue_thread.start()
+ dequeue_thread.start()
+ enqueue_thread.join()
+ dequeue_thread.join()
+
+ self.assertItemsEqual(elems, results)
+
+ def testMultiEnqueueAndDequeue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(
+ 10, 0, (tf.int32, tf.float32))
+ elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
+ enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
+ dequeued_t = q.dequeue()
+
+ for enqueue_op in enqueue_ops:
+ enqueue_op.run()
+
+ results = []
+ for _ in xrange(len(elems)):
+ x, y = sess.run(dequeued_t)
+ results.append((x, y))
+ self.assertItemsEqual(elems, results)
+
+ def testQueueSizeEmpty(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 5, tf.float32)
+ self.assertEqual(0, q.size().eval())
+
+ def testQueueSizeAfterEnqueueAndDequeue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ dequeued_t = q.dequeue()
+ size = q.size()
+ self.assertEqual([], size.get_shape())
+
+ enqueue_op.run()
+ self.assertEqual([1], size.eval())
+ dequeued_t.op.run()
+ self.assertEqual([0], size.eval())
+
+ def testEnqueueMany(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue()
+ enqueue_op.run()
+ enqueue_op.run()
+
+ results = []
+ for _ in range(8):
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems + elems, results)
+
+ def testEmptyEnqueueMany(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 5, tf.float32)
+ empty_t = tf.constant([], dtype=tf.float32,
+ shape=[0, 2, 3])
+ enqueue_op = q.enqueue_many((empty_t,))
+ size_t = q.size()
+
+ self.assertEqual(0, size_t.eval())
+ enqueue_op.run()
+ self.assertEqual(0, size_t.eval())
+
+ def testEmptyDequeueMany(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32, shapes=())
+ enqueue_op = q.enqueue((10.0,))
+ dequeued_t = q.dequeue_many(0)
+
+ self.assertEqual([], dequeued_t.eval().tolist())
+ enqueue_op.run()
+ self.assertEqual([], dequeued_t.eval().tolist())
+
+ def testEmptyDequeueManyWithNoShape(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ enqueue_op = q.enqueue(
+ (tf.constant([10.0, 20.0], shape=(1, 2)),))
+ dequeued_t = q.dequeue_many(0)
+
+ # Expect the operation to fail due to the shape not being constrained.
+ with self.assertRaisesOpError(
+ "requires the components to have specified shapes"):
+ dequeued_t.eval()
+
+ enqueue_op.run()
+
+ # Unlike tf.Queue, RandomShuffleQueue does not make any
+ # attempt to support DequeueMany with unspecified shapes, even if
+ # a shape could be inferred from the elements enqueued.
+ with self.assertRaisesOpError(
+ "requires the components to have specified shapes"):
+ dequeued_t.eval()
+
+ def testMultiEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(
+ 10, 0, (tf.float32, tf.int32))
+ float_elems = [10.0, 20.0, 30.0, 40.0]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
+ enqueue_op = q.enqueue_many((float_elems, int_elems))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+ enqueue_op.run()
+
+ results = []
+ for _ in range(8):
+ float_val, int_val = sess.run(dequeued_t)
+ results.append((float_val, [int_val[0], int_val[1]]))
+ expected = zip(float_elems, int_elems) + zip(float_elems, int_elems)
+ self.assertItemsEqual(expected, results)
+
+ def testDequeueMany(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(5)
+
+ enqueue_op.run()
+
+ results = dequeued_t.eval().tolist()
+ results.extend(dequeued_t.eval())
+ self.assertItemsEqual(elems, results)
+
+ def testMultiDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(
+ 10, 0, (tf.float32, tf.int32),
+ shapes=((), (2,)))
+ float_elems = [
+ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+ int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
+ [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
+ enqueue_op = q.enqueue_many((float_elems, int_elems))
+ dequeued_t = q.dequeue_many(4)
+ dequeued_single_t = q.dequeue()
+
+ enqueue_op.run()
+
+ results = []
+ float_val, int_val = sess.run(dequeued_t)
+ self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
+ self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
+ results.extend(zip(float_val, int_val.tolist()))
+
+ float_val, int_val = sess.run(dequeued_t)
+ results.extend(zip(float_val, int_val.tolist()))
+
+ float_val, int_val = sess.run(dequeued_single_t)
+ self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
+ self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
+ results.append((float_val, int_val.tolist()))
+
+ float_val, int_val = sess.run(dequeued_single_t)
+ results.append((float_val, int_val.tolist()))
+
+ self.assertItemsEqual(zip(float_elems, int_elems), results)
+
+ def testHighDimension(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(
+ 10, 0, tf.int32, ((4, 4, 4, 4)))
+ elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(10)
+
+ enqueue_op.run()
+ self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
+
+ def testParallelEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
+ elems = [10.0 * x for x in range(100)]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(1000)
+
+ # Enqueue 100 items in parallel on 10 threads.
+ def enqueue():
+ sess.run(enqueue_op)
+ threads = [self.checkedThread(target=enqueue) for _ in range(10)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+
+ self.assertItemsEqual(dequeued_t.eval(), elems * 10)
+
+ def testParallelDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
+ elems = [10.0 * x for x in range(1000)]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(100)
+
+ enqueue_op.run()
+
+ # Dequeue 100 items in parallel on 10 threads.
+ dequeued_elems = []
+
+ def dequeue():
+ dequeued_elems.extend(sess.run(dequeued_t))
+ threads = [self.checkedThread(target=dequeue) for _ in range(10)]
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
+ self.assertItemsEqual(elems, dequeued_elems)
+
+ def testBlockingDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ dequeued_t = q.dequeue_many(4)
+
+ dequeued_elems = []
+
+ def enqueue():
+ # The enqueue_op should run after the dequeue op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ sess.run(enqueue_op)
+
+ def dequeue():
+ dequeued_elems.extend(sess.run(dequeued_t).tolist())
+
+ enqueue_thread = self.checkedThread(target=enqueue)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ enqueue_thread.start()
+ dequeue_thread.start()
+ enqueue_thread.join()
+ dequeue_thread.join()
+
+ self.assertItemsEqual(elems, dequeued_elems)
+
+ def testDequeueManyWithTensorParameter(self):
+ with self.test_session():
+ # Define a first queue that contains integer counts.
+ dequeue_counts = [random.randint(1, 10) for _ in range(100)]
+ count_q = tf.RandomShuffleQueue(100, 0, tf.int32)
+ enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
+ total_count = sum(dequeue_counts)
+
+ # Define a second queue that contains total_count elements.
+ elems = [random.randint(0, 100) for _ in range(total_count)]
+ q = tf.RandomShuffleQueue(
+ total_count, 0, tf.int32, ((),))
+ enqueue_elems_op = q.enqueue_many((elems,))
+
+ # Define a subgraph that first dequeues a count, then DequeuesMany
+ # that number of elements.
+ dequeued_t = q.dequeue_many(count_q.dequeue())
+
+ enqueue_counts_op.run()
+ enqueue_elems_op.run()
+
+ dequeued_elems = []
+ for _ in dequeue_counts:
+ dequeued_elems.extend(dequeued_t.eval())
+ self.assertItemsEqual(elems, dequeued_elems)
+
+ def testDequeueFromClosedQueue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 2, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+ close_op.run()
+ results = [dequeued_t.eval() for _ in elems]
+ expected = [[elem] for elem in elems]
+ self.assertItemsEqual(expected, results)
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ dequeued_t.eval()
+
+ def testBlockingDequeueFromClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 2, tf.float32)
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ results = []
+ def dequeue():
+ for _ in elems:
+ results.append(sess.run(dequeued_t))
+ self.assertItemsEqual(elems, results)
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ # The dequeue thread blocked when it hit the min_size requirement.
+ self.assertEqual(len(results), 2)
+ close_op.run()
+ dequeue_thread.join()
+ # Once the queue is closed, the min_size requirement is lifted.
+ self.assertEqual(len(results), 4)
+
+ def testBlockingDequeueFromClosedEmptyQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32)
+ close_op = q.close()
+ dequeued_t = q.dequeue()
+
+ finished = [] # Needs to be a mutable type
+ def dequeue():
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+ finished.append(True)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ self.assertEqual(len(finished), 0)
+ close_op.run()
+ dequeue_thread.join()
+ self.assertEqual(len(finished), 1)
+
+ def testBlockingDequeueManyFromClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(4)
+
+ enqueue_op.run()
+
+ progress = [] # Must be mutable
+ def dequeue():
+ self.assertItemsEqual(elems, sess.run(dequeued_t))
+ progress.append(1)
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+ progress.append(2)
+
+ self.assertEqual(len(progress), 0)
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ for _ in range(100):
+ time.sleep(0.01)
+ if len(progress) == 1: break
+ self.assertEqual(len(progress), 1)
+ time.sleep(0.01)
+ close_op.run()
+ dequeue_thread.join()
+ self.assertEqual(len(progress), 2)
+
+ def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(3)
+ cleanup_dequeue_t = q.dequeue_many(q.size())
+
+ enqueue_op.run()
+
+ results = []
+ def dequeue():
+ results.extend(sess.run(dequeued_t))
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+ # However, the last result was dequeued before the queue was closed,
+ # so nothing more is added to results.
+ results.extend(sess.run(cleanup_dequeue_t))
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ self.assertEqual(len(results), 3)
+ close_op.run()
+ dequeue_thread.join()
+ self.assertEqual(len(results), 3)
+
+ def testBlockingDequeueManyFromClosedEmptyQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
+ close_op = q.close()
+ dequeued_t = q.dequeue_many(4)
+
+ def dequeue():
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
+ "is closed and has insufficient"):
+ sess.run(dequeued_t)
+
+ dequeue_thread = self.checkedThread(target=dequeue)
+ dequeue_thread.start()
+ # The close_op should run after the dequeue_thread has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ close_op.run()
+ dequeue_thread.join()
+
+ def testEnqueueToClosedQueue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 4, tf.float32)
+ enqueue_op = q.enqueue((10.0,))
+ close_op = q.close()
+
+ enqueue_op.run()
+ close_op.run()
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
+ enqueue_op.run()
+
+ def testEnqueueManyToClosedQueue(self):
+ with self.test_session():
+ q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ close_op = q.close()
+
+ enqueue_op.run()
+ close_op.run()
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
+ enqueue_op.run()
+
+ def testBlockingEnqueueToFullQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue((50.0,))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ sess.run(blocking_enqueue_op)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+ # The dequeue ops should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ results = []
+ for _ in elems:
+ results.append(dequeued_t.eval())
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems + [50.0], results)
+ # There wasn't room for 50.0 in the queue when the first element was
+ # dequeued.
+ self.assertNotEqual(50.0, results[0])
+ thread.join()
+
+ def testBlockingEnqueueManyToFullQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
+ dequeued_t = q.dequeue()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ sess.run(blocking_enqueue_op)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+ # The dequeue ops should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ results = []
+ for _ in elems:
+ time.sleep(0.01)
+ results.append(dequeued_t.eval())
+ results.append(dequeued_t.eval())
+ results.append(dequeued_t.eval())
+ self.assertItemsEqual(elems + [50.0, 60.0], results)
+ # There wasn't room for 50.0 or 60.0 in the queue when the first
+ # element was dequeued.
+ self.assertNotEqual(50.0, results[0])
+ self.assertNotEqual(60.0, results[0])
+ # Similarly for 60.0 and the second element.
+ self.assertNotEqual(60.0, results[1])
+
+ def testBlockingEnqueueToClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0, 40.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue((50.0,))
+ dequeued_t = q.dequeue()
+ close_op = q.close()
+
+ enqueue_op.run()
+
+ def blocking_enqueue():
+ # Expect the operation to succeed since it will complete
+ # before the queue is closed.
+ sess.run(blocking_enqueue_op)
+
+ # Expect the operation to fail due to the queue being closed.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
+ sess.run(blocking_enqueue_op)
+ thread1 = self.checkedThread(target=blocking_enqueue)
+ thread1.start()
+
+ # The close_op should run after the first blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ def blocking_close():
+ sess.run(close_op)
+ thread2 = self.checkedThread(target=blocking_close)
+ thread2.start()
+
+ # Wait for the close op to block before unblocking the enqueue.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ results = []
+ # Dequeue to unblock the first blocking_enqueue_op, after which the
+ # close will complete.
+ results.append(dequeued_t.eval())
+ self.assertTrue(results[0] in elems)
+ thread2.join()
+ thread1.join()
+
+ def testBlockingEnqueueManyToClosedQueue(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
+ elems = [10.0, 20.0, 30.0]
+ enqueue_op = q.enqueue_many((elems,))
+ blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
+ close_op = q.close()
+ size_t = q.size()
+
+ enqueue_op.run()
+ self.assertEqual(size_t.eval(), 3)
+
+ def blocking_enqueue():
+ # This will block until the dequeue after the close.
+ sess.run(blocking_enqueue_op)
+ # At this point the close operation will become unblocked, so the
+ # next enqueue will fail.
+ with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
+ sess.run(blocking_enqueue_op)
+ thread1 = self.checkedThread(target=blocking_enqueue)
+ thread1.start()
+ # The close_op should run after the blocking_enqueue_op has blocked.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ # First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
+ # elements, and is blocked waiting for one more element to be dequeue.
+ self.assertEqual(size_t.eval(), 4)
+
+ def blocking_close():
+ sess.run(close_op)
+ thread2 = self.checkedThread(target=blocking_close)
+ thread2.start()
+
+ # The close_op should run before the second blocking_enqueue_op
+ # has started.
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+
+ # Unblock the first blocking_enqueue_op in blocking_enqueue.
+ q.dequeue().eval()
+
+ thread2.join()
+ thread1.join()
+
+ def testSharedQueueSameSession(self):
+ with self.test_session():
+ q1 = tf.RandomShuffleQueue(
+ 1, 0, tf.float32, ((),), shared_name="shared_queue")
+ q1.enqueue((10.0,)).run()
+
+ q2 = tf.RandomShuffleQueue(
+ 1, 0, tf.float32, ((),), shared_name="shared_queue")
+
+ q1_size_t = q1.size()
+ q2_size_t = q2.size()
+
+ self.assertEqual(q1_size_t.eval(), 1)
+ self.assertEqual(q2_size_t.eval(), 1)
+
+ self.assertEqual(q2.dequeue().eval(), 10.0)
+
+ self.assertEqual(q1_size_t.eval(), 0)
+ self.assertEqual(q2_size_t.eval(), 0)
+
+ q2.enqueue((20.0,)).run()
+
+ self.assertEqual(q1_size_t.eval(), 1)
+ self.assertEqual(q2_size_t.eval(), 1)
+
+ self.assertEqual(q1.dequeue().eval(), 20.0)
+
+ self.assertEqual(q1_size_t.eval(), 0)
+ self.assertEqual(q2_size_t.eval(), 0)
+
+ def testIncompatibleSharedQueueErrors(self):
+ with self.test_session():
+ q_a_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_a")
+ q_a_2 = tf.RandomShuffleQueue(
+ 15, 5, tf.float32, shared_name="q_a")
+ q_a_1.queue_ref.eval()
+ with self.assertRaisesOpError("capacity"):
+ q_a_2.queue_ref.eval()
+
+ q_b_1 = tf.RandomShuffleQueue(
+ 10, 0, tf.float32, shared_name="q_b")
+ q_b_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_b")
+ q_b_1.queue_ref.eval()
+ with self.assertRaisesOpError("min_after_dequeue"):
+ q_b_2.queue_ref.eval()
+
+ q_c_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_c")
+ q_c_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.int32, shared_name="q_c")
+ q_c_1.queue_ref.eval()
+ with self.assertRaisesOpError("component types"):
+ q_c_2.queue_ref.eval()
+
+ q_d_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_d")
+ q_d_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
+ q_d_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_d_2.queue_ref.eval()
+
+ q_e_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
+ q_e_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_e")
+ q_e_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_e_2.queue_ref.eval()
+
+ q_f_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
+ q_f_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
+ q_f_1.queue_ref.eval()
+ with self.assertRaisesOpError("component shapes"):
+ q_f_2.queue_ref.eval()
+
+ q_g_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, shared_name="q_g")
+ q_g_2 = tf.RandomShuffleQueue(
+ 10, 5, (tf.float32, tf.int32), shared_name="q_g")
+ q_g_1.queue_ref.eval()
+ with self.assertRaisesOpError("component types"):
+ q_g_2.queue_ref.eval()
+
+ q_h_1 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, seed=12, shared_name="q_h")
+ q_h_2 = tf.RandomShuffleQueue(
+ 10, 5, tf.float32, seed=21, shared_name="q_h")
+ q_h_1.queue_ref.eval()
+ with self.assertRaisesOpError("random seeds"):
+ q_h_2.queue_ref.eval()
+
+ def testSelectQueue(self):
+ with self.test_session():
+ num_queues = 10
+ qlist = list()
+ for _ in xrange(num_queues):
+ qlist.append(
+ tf.RandomShuffleQueue(10, 0, tf.float32))
+ # Enqueue/Dequeue into a dynamically selected queue
+ for _ in xrange(20):
+ index = np.random.randint(num_queues)
+ q = tf.RandomShuffleQueue.from_list(index, qlist)
+ q.enqueue((10.,)).run()
+ self.assertEqual(q.dequeue().eval(), 10.0)
+
+ def testSelectQueueOutOfRange(self):
+ with self.test_session():
+ q1 = tf.RandomShuffleQueue(10, 0, tf.float32)
+ q2 = tf.RandomShuffleQueue(15, 0, tf.float32)
+ enq_q = tf.RandomShuffleQueue.from_list(3, [q1, q2])
+ with self.assertRaisesOpError("Index must be in the range"):
+ enq_q.dequeue().eval()
+
+ def _blockingDequeue(self, sess, dequeue_op):
+ with self.assertRaisesOpError("Dequeue operation was cancelled"):
+ sess.run(dequeue_op)
+
+ def _blockingDequeueMany(self, sess, dequeue_many_op):
+ with self.assertRaisesOpError("Dequeue operation was cancelled"):
+ sess.run(dequeue_many_op)
+
+ def _blockingEnqueue(self, sess, enqueue_op):
+ with self.assertRaisesOpError("Enqueue operation was cancelled"):
+ sess.run(enqueue_op)
+
+ def _blockingEnqueueMany(self, sess, enqueue_many_op):
+ with self.assertRaisesOpError("Enqueue operation was cancelled"):
+ sess.run(enqueue_many_op)
+
+ def testResetOfBlockingOperation(self):
+ with self.test_session() as sess:
+ q_empty = tf.RandomShuffleQueue(
+ 5, 0, tf.float32, ((),))
+ dequeue_op = q_empty.dequeue()
+ dequeue_many_op = q_empty.dequeue_many(1)
+
+ q_full = tf.RandomShuffleQueue(5, 0, tf.float32, ((),))
+ sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
+ enqueue_op = q_full.enqueue((6.0,))
+ enqueue_many_op = q_full.enqueue_many(([6.0],))
+
+ threads = [
+ self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
+ self.checkedThread(self._blockingDequeueMany, args=(sess,
+ dequeue_many_op)),
+ self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
+ self.checkedThread(self._blockingEnqueueMany, args=(sess,
+ enqueue_many_op))]
+ for t in threads:
+ t.start()
+ time.sleep(0.1)
+ sess.close() # Will cancel the blocked operations.
+ for t in threads:
+ t.join()
+
+ def testDequeueManyInDifferentOrders(self):
+ with self.test_session():
+ # Specify seeds to make the test deterministic
+ # (https://en.wikipedia.org/wiki/Taxicab_number).
+ q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
+ ((),), seed=1729)
+ q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
+ ((),), seed=87539319)
+ enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
+ enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
+ deq1 = q1.dequeue_many(5)
+ deq2 = q2.dequeue_many(5)
+
+ enq1.run()
+ enq1.run()
+ enq2.run()
+ enq2.run()
+
+ results = [[], [], [], []]
+
+ results[0].extend(deq1.eval())
+ results[1].extend(deq2.eval())
+
+ q1.close().run()
+ q2.close().run()
+
+ results[2].extend(deq1.eval())
+ results[3].extend(deq2.eval())
+
+ # No two should match
+ for i in range(1, 4):
+ for j in range(i):
+ self.assertNotEqual(results[i], results[j])
+
+ def testDequeueInDifferentOrders(self):
+ with self.test_session():
+ # Specify seeds to make the test deterministic
+ # (https://en.wikipedia.org/wiki/Taxicab_number).
+ q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
+ ((),), seed=1729)
+ q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
+ ((),), seed=87539319)
+ enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
+ enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
+ deq1 = q1.dequeue()
+ deq2 = q2.dequeue()
+
+ enq1.run()
+ enq1.run()
+ enq2.run()
+ enq2.run()
+
+ results = [[], [], [], []]
+
+ for _ in range(5):
+ results[0].append(deq1.eval())
+ results[1].append(deq2.eval())
+
+ q1.close().run()
+ q2.close().run()
+
+ for _ in range(5):
+ results[2].append(deq1.eval())
+ results[3].append(deq2.eval())
+
+ # No two should match
+ for i in range(1, 4):
+ for j in range(i):
+ self.assertNotEqual(results[i], results[j])
+
+ def testBigEnqueueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(
+ 5, 0, tf.int32, ((),))
+ elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ enq = q.enqueue_many((elem,))
+ deq = q.dequeue()
+ size_op = q.size()
+
+ enq_done = []
+ def blocking_enqueue():
+ enq_done.append(False)
+ # This will fill the queue and then block until enough dequeues happen.
+ sess.run(enq)
+ enq_done.append(True)
+ thread = self.checkedThread(target=blocking_enqueue)
+ thread.start()
+
+ # The enqueue should start and then block.
+ results = []
+ results.append(deq.eval()) # Will only complete after the enqueue starts.
+ self.assertEqual(len(enq_done), 1)
+ self.assertEqual(sess.run(size_op), 5)
+
+ for _ in range(3):
+ results.append(deq.eval())
+
+ time.sleep(0.1)
+ self.assertEqual(len(enq_done), 1)
+ self.assertEqual(sess.run(size_op), 5)
+
+ # This dequeue will unblock the thread.
+ results.append(deq.eval())
+ time.sleep(0.1)
+ self.assertEqual(len(enq_done), 2)
+ thread.join()
+
+ for i in range(5):
+ self.assertEqual(size_op.eval(), 5 - i)
+ results.append(deq.eval())
+ self.assertEqual(size_op.eval(), 5 - i - 1)
+
+ self.assertItemsEqual(elem, results)
+
+ def testBigDequeueMany(self):
+ with self.test_session() as sess:
+ q = tf.RandomShuffleQueue(2, 0, tf.int32, ((),))
+ elem = range(4)
+ enq_list = [q.enqueue((e,)) for e in elem]
+ deq = q.dequeue_many(4)
+
+ results = []
+ def blocking_dequeue():
+ # Will only complete after 4 enqueues complete.
+ results.extend(sess.run(deq))
+ thread = self.checkedThread(target=blocking_dequeue)
+ thread.start()
+ # The dequeue should start and then block.
+ for enq in enq_list:
+ # TODO(mrry): Figure out how to do this without sleeping.
+ time.sleep(0.1)
+ self.assertEqual(len(results), 0)
+ sess.run(enq)
+
+ # Enough enqueued to unblock the dequeue
+ thread.join()
+ self.assertItemsEqual(elem, results)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/reader_ops_test.py b/tensorflow/python/kernel_tests/reader_ops_test.py
new file mode 100644
index 0000000000..484e3eca43
--- /dev/null
+++ b/tensorflow/python/kernel_tests/reader_ops_test.py
@@ -0,0 +1,362 @@
+"""Tests for Reader ops from io_ops."""
+
+import os
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class IdentityReaderTest(tf.test.TestCase):
+
+ def _ExpectRead(self, sess, key, value, expected):
+ k, v = sess.run([key, value])
+ self.assertAllEqual(expected, k)
+ self.assertAllEqual(expected, v)
+
+ def testOneEpoch(self):
+ with self.test_session() as sess:
+ reader = tf.IdentityReader("test_reader")
+ work_completed = reader.num_work_units_completed()
+ produced = reader.num_records_produced()
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queued_length = queue.size()
+ key, value = reader.read(queue)
+
+ self.assertAllEqual(0, work_completed.eval())
+ self.assertAllEqual(0, produced.eval())
+ self.assertAllEqual(0, queued_length.eval())
+
+ queue.enqueue_many([["A", "B", "C"]]).run()
+ queue.close().run()
+ self.assertAllEqual(3, queued_length.eval())
+
+ self._ExpectRead(sess, key, value, "A")
+ self.assertAllEqual(1, produced.eval())
+
+ self._ExpectRead(sess, key, value, "B")
+
+ self._ExpectRead(sess, key, value, "C")
+ self.assertAllEqual(3, produced.eval())
+ self.assertAllEqual(0, queued_length.eval())
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ sess.run([key, value])
+
+ self.assertAllEqual(3, work_completed.eval())
+ self.assertAllEqual(3, produced.eval())
+ self.assertAllEqual(0, queued_length.eval())
+
+ def testMultipleEpochs(self):
+ with self.test_session() as sess:
+ reader = tf.IdentityReader("test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ enqueue = queue.enqueue_many([["DD", "EE"]])
+ key, value = reader.read(queue)
+
+ enqueue.run()
+ self._ExpectRead(sess, key, value, "DD")
+ self._ExpectRead(sess, key, value, "EE")
+ enqueue.run()
+ self._ExpectRead(sess, key, value, "DD")
+ self._ExpectRead(sess, key, value, "EE")
+ enqueue.run()
+ self._ExpectRead(sess, key, value, "DD")
+ self._ExpectRead(sess, key, value, "EE")
+ queue.close().run()
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ sess.run([key, value])
+
+ def testSerializeRestore(self):
+ with self.test_session() as sess:
+ reader = tf.IdentityReader("test_reader")
+ produced = reader.num_records_produced()
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue.enqueue_many([["X", "Y", "Z"]]).run()
+ key, value = reader.read(queue)
+
+ self._ExpectRead(sess, key, value, "X")
+ self.assertAllEqual(1, produced.eval())
+ state = reader.serialize_state().eval()
+
+ self._ExpectRead(sess, key, value, "Y")
+ self._ExpectRead(sess, key, value, "Z")
+ self.assertAllEqual(3, produced.eval())
+
+ queue.enqueue_many([["Y", "Z"]]).run()
+ queue.close().run()
+ reader.restore_state(state).run()
+ self.assertAllEqual(1, produced.eval())
+ self._ExpectRead(sess, key, value, "Y")
+ self._ExpectRead(sess, key, value, "Z")
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ sess.run([key, value])
+ self.assertAllEqual(3, produced.eval())
+
+ self.assertEqual(str, type(state))
+
+ with self.assertRaises(ValueError):
+ reader.restore_state([])
+
+ with self.assertRaises(ValueError):
+ reader.restore_state([state, state])
+
+ with self.assertRaisesOpError(
+ "Could not parse state for IdentityReader 'test_reader'"):
+ reader.restore_state(state[1:]).run()
+
+ with self.assertRaisesOpError(
+ "Could not parse state for IdentityReader 'test_reader'"):
+ reader.restore_state(state[:-1]).run()
+
+ with self.assertRaisesOpError(
+ "Could not parse state for IdentityReader 'test_reader'"):
+ reader.restore_state(state + "ExtraJunk").run()
+
+ with self.assertRaisesOpError(
+ "Could not parse state for IdentityReader 'test_reader'"):
+ reader.restore_state("PREFIX" + state).run()
+
+ with self.assertRaisesOpError(
+ "Could not parse state for IdentityReader 'test_reader'"):
+ reader.restore_state("BOGUS" + state[5:]).run()
+
+ def testReset(self):
+ with self.test_session() as sess:
+ reader = tf.IdentityReader("test_reader")
+ work_completed = reader.num_work_units_completed()
+ produced = reader.num_records_produced()
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queued_length = queue.size()
+ key, value = reader.read(queue)
+
+ queue.enqueue_many([["X", "Y", "Z"]]).run()
+ self._ExpectRead(sess, key, value, "X")
+ self.assertLess(0, queued_length.eval())
+ self.assertAllEqual(1, produced.eval())
+
+ self._ExpectRead(sess, key, value, "Y")
+ self.assertLess(0, work_completed.eval())
+ self.assertAllEqual(2, produced.eval())
+
+ reader.reset().run()
+ self.assertAllEqual(0, work_completed.eval())
+ self.assertAllEqual(0, produced.eval())
+ self.assertAllEqual(1, queued_length.eval())
+ self._ExpectRead(sess, key, value, "Z")
+
+ queue.enqueue_many([["K", "L"]]).run()
+ self._ExpectRead(sess, key, value, "K")
+
+
+class WholeFileReaderTest(tf.test.TestCase):
+
+ def setUp(self):
+ super(WholeFileReaderTest, self).setUp()
+ self._filenames = [os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
+ for i in range(3)]
+ self._content = ["One\na\nb\n", "Two\nC\nD", "Three x, y, z"]
+ for fn, c in zip(self._filenames, self._content):
+ open(fn, "w").write(c)
+
+ def tearDown(self):
+ super(WholeFileReaderTest, self).tearDown()
+ for fn in self._filenames:
+ os.remove(fn)
+
+ def _ExpectRead(self, sess, key, value, index):
+ k, v = sess.run([key, value])
+ self.assertAllEqual(self._filenames[index], k)
+ self.assertAllEqual(self._content[index], v)
+
+ def testOneEpoch(self):
+ with self.test_session() as sess:
+ reader = tf.WholeFileReader("test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ queue.enqueue_many([self._filenames]).run()
+ queue.close().run()
+ key, value = reader.read(queue)
+
+ self._ExpectRead(sess, key, value, 0)
+ self._ExpectRead(sess, key, value, 1)
+ self._ExpectRead(sess, key, value, 2)
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ sess.run([key, value])
+
+ def testInfiniteEpochs(self):
+ with self.test_session() as sess:
+ reader = tf.WholeFileReader("test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ enqueue = queue.enqueue_many([self._filenames])
+ key, value = reader.read(queue)
+
+ enqueue.run()
+ self._ExpectRead(sess, key, value, 0)
+ self._ExpectRead(sess, key, value, 1)
+ enqueue.run()
+ self._ExpectRead(sess, key, value, 2)
+ self._ExpectRead(sess, key, value, 0)
+ self._ExpectRead(sess, key, value, 1)
+ enqueue.run()
+ self._ExpectRead(sess, key, value, 2)
+ self._ExpectRead(sess, key, value, 0)
+
+
+class TextLineReaderTest(tf.test.TestCase):
+
+ def setUp(self):
+ super(TextLineReaderTest, self).setUp()
+ self._num_files = 2
+ self._num_lines = 5
+
+ def _LineText(self, f, l):
+ return "%d: %d" % (f, l)
+
+ def _CreateFiles(self):
+ filenames = []
+ for i in range(self._num_files):
+ fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
+ filenames.append(fn)
+ f = open(fn, "w")
+ for j in range(self._num_lines):
+ f.write(self._LineText(i, j))
+ # Always include a newline after the record unless it is
+ # at the end of the file, in which case we include it sometimes.
+ if j + 1 != self._num_lines or i == 0:
+ f.write("\n")
+ return filenames
+
+ def testOneEpoch(self):
+ files = self._CreateFiles()
+ with self.test_session() as sess:
+ reader = tf.TextLineReader(name="test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ key, value = reader.read(queue)
+
+ queue.enqueue_many([files]).run()
+ queue.close().run()
+ for i in range(self._num_files):
+ for j in range(self._num_lines):
+ k, v = sess.run([key, value])
+ self.assertAllEqual("%s:%d" % (files[i], j + 1), k)
+ self.assertAllEqual(self._LineText(i, j), v)
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ k, v = sess.run([key, value])
+
+ def testSkipHeaderLines(self):
+ files = self._CreateFiles()
+ with self.test_session() as sess:
+ reader = tf.TextLineReader(skip_header_lines=1, name="test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ key, value = reader.read(queue)
+
+ queue.enqueue_many([files]).run()
+ queue.close().run()
+ for i in range(self._num_files):
+ for j in range(self._num_lines - 1):
+ k, v = sess.run([key, value])
+ self.assertAllEqual("%s:%d" % (files[i], j + 2), k)
+ self.assertAllEqual(self._LineText(i, j + 1), v)
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ k, v = sess.run([key, value])
+
+
+class FixedLengthRecordReaderTest(tf.test.TestCase):
+
+ def setUp(self):
+ super(FixedLengthRecordReaderTest, self).setUp()
+ self._num_files = 2
+ self._num_records = 7
+ self._header_bytes = 5
+ self._record_bytes = 3
+ self._footer_bytes = 2
+
+ def _Record(self, f, r):
+ return str(f * 2 + r) * self._record_bytes
+
+ def _CreateFiles(self):
+ filenames = []
+ for i in range(self._num_files):
+ fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
+ filenames.append(fn)
+ f = open(fn, "w")
+ f.write("H" * self._header_bytes)
+ for j in range(self._num_records):
+ f.write(self._Record(i, j))
+ f.write("F" * self._footer_bytes)
+ return filenames
+
+ def testOneEpoch(self):
+ files = self._CreateFiles()
+ with self.test_session() as sess:
+ reader = tf.FixedLengthRecordReader(
+ header_bytes=self._header_bytes,
+ record_bytes=self._record_bytes,
+ footer_bytes=self._footer_bytes,
+ name="test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ key, value = reader.read(queue)
+
+ queue.enqueue_many([files]).run()
+ queue.close().run()
+ for i in range(self._num_files):
+ for j in range(self._num_records):
+ k, v = sess.run([key, value])
+ self.assertAllEqual("%s:%d" % (files[i], j), k)
+ self.assertAllEqual(self._Record(i, j), v)
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ k, v = sess.run([key, value])
+
+
+class TFRecordReaderTest(tf.test.TestCase):
+
+ def setUp(self):
+ super(TFRecordReaderTest, self).setUp()
+ self._num_files = 2
+ self._num_records = 7
+
+ def _Record(self, f, r):
+ return "Record %d of file %d" % (r, f)
+
+ def _CreateFiles(self):
+ filenames = []
+ for i in range(self._num_files):
+ fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
+ filenames.append(fn)
+ writer = tf.python_io.TFRecordWriter(fn)
+ for j in range(self._num_records):
+ writer.write(self._Record(i, j))
+ return filenames
+
+ def testOneEpoch(self):
+ files = self._CreateFiles()
+ with self.test_session() as sess:
+ reader = tf.TFRecordReader(name="test_reader")
+ queue = tf.FIFOQueue(99, [tf.string], shapes=())
+ key, value = reader.read(queue)
+
+ queue.enqueue_many([files]).run()
+ queue.close().run()
+ for i in range(self._num_files):
+ for j in range(self._num_records):
+ k, v = sess.run([key, value])
+ self.assertTrue(k.startswith("%s:" % files[i]))
+ self.assertAllEqual(self._Record(i, j), v)
+
+ with self.assertRaisesOpError("is closed and has insufficient elements "
+ "\\(requested 1, current size 0\\)"):
+ k, v = sess.run([key, value])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/reduction_ops_test.py b/tensorflow/python/kernel_tests/reduction_ops_test.py
new file mode 100644
index 0000000000..e5cab62c09
--- /dev/null
+++ b/tensorflow/python/kernel_tests/reduction_ops_test.py
@@ -0,0 +1,533 @@
+"""Functional tests for reduction ops."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class SumReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.sum(np_ans, keepdims=keep_dims)
+ else:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ for ra in reduction_axes.ravel()[::-1]:
+ np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ if reduction_axes is not None and np.shape(reduction_axes) == (1,):
+ # Test scalar reduction_axes argument
+ self._compareAll(x, reduction_axes[0])
+ self._compare(x, reduction_axes, False, use_gpu=True)
+ self._compare(x, reduction_axes, False, use_gpu=False)
+ self._compare(x, reduction_axes, True, use_gpu=True)
+ self._compare(x, reduction_axes, True, use_gpu=False)
+
+ def testFloatReduce1D(self):
+ # Create a 1D array of floats
+ np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
+ self._compareAll(np_arr, [0])
+
+ def testFloatReduce2D(self):
+ # Create a 2D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [0, 1])
+
+ def testFloatReduce3D(self):
+ # Create a 3D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testFloatReduce4D(self):
+ # Create a 4D array of floats and reduce across some
+ # dimensions
+ np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ # Need specialization for reduce(4D, [0, 2])
+ # self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+ self._compareAll(np_arr, [1, 2, 3])
+ self._compareAll(np_arr, [0, 1, 2, 3])
+
+ def testFloatReduce5D(self):
+ # Create a 5D array of floats and reduce across some dimensions
+ np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ # Need specialization for reduce(4D, [0, 2])
+ # self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+ self._compareAll(np_arr, [1, 2, 3])
+ self._compareAll(np_arr, [0, 1, 2, 3])
+ self._compareAll(np_arr, [1, 2, 3, 4])
+ self._compareAll(np_arr, [0, 1, 2, 3, 4])
+
+ # Simple tests for various tf.
+ def testDoubleReduce1D(self):
+ np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
+ self._compare(np_arr, [], False)
+ self._compare(np_arr, [0], False)
+
+ def testInt32Reduce1D(self):
+ np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
+ self._compare(np_arr, [], False)
+ self._compare(np_arr, [0], False)
+
+ def testInvalidIndex(self):
+ np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
+ input_tensor = tf.convert_to_tensor(np_arr)
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Invalid reduction dimension" in e.message):
+ tf.reduce_sum(input_tensor, [-1])
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Invalid reduction dimension" in e.message):
+ tf.reduce_sum(input_tensor, [2])
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: "Invalid reduction dimension" in e.message):
+ tf.reduce_sum(input_tensor, [0, 2])
+
+ # Int64??
+
+ def _compareGradient(self, shape, sum_shape, reduction_axes):
+ if reduction_axes is not None and np.shape(reduction_axes) == (1,):
+ # Test scalar reduction_axes argument
+ self._compareGradient(shape, sum_shape, reduction_axes[0])
+ x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_sum(t, reduction_axes)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t,
+ shape,
+ su,
+ sum_shape,
+ x_init_value=x,
+ delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient(self):
+ self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
+
+ def testGradient2(self):
+ self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
+
+ def testGradient3(self):
+ self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
+
+ def testGradient4(self):
+ self._compareGradient([2, 3, 4, 2], [], None)
+
+
+class MeanReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims):
+ np_sum = x
+ count = 1
+ for ra in reduction_axes[::-1]:
+ np_sum = np.sum(np_sum, axis=ra, keepdims=keep_dims)
+ count *= x.shape[ra]
+ np_ans = np_sum / count
+ with self.test_session():
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False)
+ self._compare(x, reduction_axes, True)
+
+ def testFloatReduce3D(self):
+ # Create a 3D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testGradient(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_mean(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ su = tf.reduce_mean(t, [0, 1, 2, 3])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [1], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ su = tf.reduce_mean(t, [])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+
+class ProdReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.prod(np_ans, keepdims=keep_dims)
+ else:
+ for ra in reduction_axes[::-1]:
+ np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session():
+ if reduction_axes is not None:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False)
+ self._compare(x, reduction_axes, True)
+
+ def testFloatReduce3D(self):
+ # Create a 3D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testGradient(self):
+ s = [2, 3, 4, 2]
+ # NOTE(kearnes): divide by 20 so product is a reasonable size
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+
+ su = tf.reduce_prod(t, [])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ su = tf.reduce_prod(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ su = tf.reduce_prod(t, [0, 1, 2, 3])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [1], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ # NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
+ x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_prod(t, [])
+ jacob_t, _ = gradient_checker.ComputeGradient(
+ t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
+ with self.assertRaisesOpError("Tensor had NaN values"):
+ tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
+
+
+class MinReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.amin(np_ans, keepdims=keep_dims)
+ else:
+ for ra in reduction_axes[::-1]:
+ np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session(use_gpu=use_gpu):
+ if reduction_axes is not None:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False, use_gpu=True)
+ self._compare(x, reduction_axes, False, use_gpu=False)
+ self._compare(x, reduction_axes, True, use_gpu=True)
+ self._compare(x, reduction_axes, True, use_gpu=False)
+
+ def testFloatReduce3D(self):
+ # Create a 3D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testGradient(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_min(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient2(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_min(t, [1])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 4, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient3(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_min(t, [2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 3, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient4(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_min(t)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [1], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+
+class MaxReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.amax(np_ans, keepdims=keep_dims)
+ else:
+ for ra in reduction_axes[::-1]:
+ np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session(use_gpu=use_gpu):
+ if reduction_axes is not None:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False, use_gpu=True)
+ self._compare(x, reduction_axes, False, use_gpu=False)
+ self._compare(x, reduction_axes, True, use_gpu=True)
+ self._compare(x, reduction_axes, True, use_gpu=False)
+
+ def testFloatReduce3D(self):
+ # Create a 3D array of floats and reduce across all possible
+ # dimensions
+ np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testGradient(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_max(t, [1, 2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient2(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_max(t, [1])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 4, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient3(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_max(t, [2])
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [2, 3, 2], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+ def testGradient4(self):
+ s = [2, 3, 4, 2]
+ x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
+ with self.test_session():
+ t = tf.convert_to_tensor(x)
+ su = tf.reduce_max(t)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ t, s, su, [1], x_init_value=x, delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
+
+
+class AllReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.all(np_ans, keepdims=keep_dims)
+ else:
+ for ra in reduction_axes[::-1]:
+ np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session(use_gpu=use_gpu):
+ if reduction_axes is not None:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllEqual(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False, use_gpu=True)
+ self._compare(x, reduction_axes, False, use_gpu=False)
+ self._compare(x, reduction_axes, True, use_gpu=True)
+ self._compare(x, reduction_axes, True, use_gpu=False)
+
+ def testAll3D(self):
+ # Create a 3D array of bools and reduce across all possible
+ # dimensions
+ np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+
+class AnyReductionTest(tf.test.TestCase):
+
+ def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
+ np_ans = x
+ if reduction_axes is None:
+ np_ans = np.any(np_ans, keepdims=keep_dims)
+ else:
+ for ra in reduction_axes[::-1]:
+ np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
+ with self.test_session(use_gpu=use_gpu):
+ if reduction_axes is not None:
+ reduction_axes = np.array(reduction_axes).astype(np.int32)
+ tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
+ out = tf_ans.eval()
+ self.assertAllEqual(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareAll(self, x, reduction_axes):
+ self._compare(x, reduction_axes, False, use_gpu=True)
+ self._compare(x, reduction_axes, False, use_gpu=False)
+ self._compare(x, reduction_axes, True, use_gpu=True)
+ self._compare(x, reduction_axes, True, use_gpu=False)
+
+ def testAll3D(self):
+ # Create a 3D array of bools and reduce across all possible
+ # dimensions
+ np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
+ self._compareAll(np_arr, None)
+ self._compareAll(np_arr, [])
+ self._compareAll(np_arr, [0])
+ self._compareAll(np_arr, [1])
+ self._compareAll(np_arr, [2])
+ self._compareAll(np_arr, [0, 1])
+ self._compareAll(np_arr, [1, 2])
+ self._compareAll(np_arr, [0, 2])
+ self._compareAll(np_arr, [0, 1, 2])
+
+ def testPartialShapes(self):
+ # Input shape is unknown.
+ c_unknown = tf.placeholder(tf.float32)
+ s_unknown = tf.reduce_sum(c_unknown, [1, 2])
+ self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
+
+ # Input shape only has known rank.
+ c_known_rank = tf.placeholder(tf.float32)
+ c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
+ s_known_rank = tf.reduce_sum(c_known_rank, [1, 2], keep_dims=True)
+ self.assertEqual(3, s_known_rank.get_shape().ndims)
+
+ # Reduction indices are unknown.
+ unknown_indices = tf.placeholder(tf.int32)
+ c_unknown_indices = tf.constant([[10.0], [20.0]])
+ s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
+ keep_dims=False)
+ self.assertEqual(tensor_shape.unknown_shape(),
+ s_unknown_indices.get_shape())
+ s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
+ keep_dims=True)
+ self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/relu_op_test.py b/tensorflow/python/kernel_tests/relu_op_test.py
new file mode 100644
index 0000000000..a4b353f253
--- /dev/null
+++ b/tensorflow/python/kernel_tests/relu_op_test.py
@@ -0,0 +1,181 @@
+"""Tests for Relu and ReluGrad."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class ReluTest(tf.test.TestCase):
+
+ def _npRelu(self, np_features):
+ return np.maximum(np_features, np.zeros(np_features.shape))
+
+ def testNpRelu(self):
+ self.assertAllClose(
+ np.array([[0.0, 0.7, 0.0, 0.3, 0.0],
+ [0.1, 0.0, 0.5, 0.0, 0.9]]),
+ self._npRelu(np.array([[-0.9, 0.7, -0.5, 0.3, -0.1],
+ [0.1, -0.3, 0.5, -0.7, 0.9]])))
+
+ def _testRelu(self, np_features, use_gpu=False):
+ np_relu = self._npRelu(np_features)
+ with self.test_session(use_gpu=use_gpu):
+ relu = tf.nn.relu(np_features)
+ tf_relu = relu.eval()
+ self.assertAllClose(np_relu, tf_relu)
+ self.assertShapeEqual(np_relu, relu)
+
+ def testNumbers(self):
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testRelu(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=False)
+ if t in [np.float, np.double]:
+ self._testRelu(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=True)
+
+ # The gradient test for ReLU is a bit tricky as the derivative is not well
+ # defined at around zero and we want to avoid that in terms of input values.
+ def testGradientFloat(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], name="x")
+ y = tf.nn.relu(x, name="relu")
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float32, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print "relu (float) gradient err = ", err
+ self.assertLess(err, 1e-4)
+
+ def testGradientNaN(self):
+ with self.test_session():
+ # Note the NaN is injected as an input to the gradient calculation.
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, np.nan, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], name="x")
+ y = tf.nn.relu(x, name="relu")
+ grad_ys = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], name="ys")
+ g_op = tf.gradients(
+ [y], [x], grad_ys=[grad_ys], name="gradients")[0]
+ try:
+ g_op.op.run()
+ assert False, "ReluGrad should have failed due to CheckNumerics."
+ except Exception as e: # pylint: disable=broad-except
+ assert "ReluGrad input is not finite." in str(e)
+
+ def testGradientDouble(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], dtype=tf.float64, name="x")
+ y = tf.nn.relu(x, name="relu")
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float64, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print "relu (double) gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+ def testGradGradFloat(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], name="x")
+ y = tf.nn.relu(x, name="relu")
+ z = tf.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float32, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], z[0], [2, 5],
+ x_init_value=x_init)
+ print "relu (float) gradient of gradient err = ", err
+ self.assertLess(err, 1e-4)
+
+ def testGradGradDouble(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], dtype=tf.float64, name="x")
+ y = tf.nn.relu(x, name="relu")
+ z = tf.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float64, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], z[0], [2, 5],
+ x_init_value=x_init)
+ print "relu (double) gradient of gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+
+class Relu6Test(tf.test.TestCase):
+
+ def _npRelu6(self, np_features):
+ sixes = np.copy(np_features)
+ sixes.fill(6.0)
+ return np.minimum(np.maximum(np_features, np.zeros(np_features.shape)),
+ sixes)
+
+ def testNpRelu6(self):
+ self.assertAllClose(
+ np.array([[0.0, 0.7, 0.0, 0.3, 6.0],
+ [0.1, 0.0, 6.0, 0.0, 0.9]]),
+ self._npRelu6(np.array([[-0.9, 0.7, -0.5, 0.3, 6.0],
+ [0.1, -0.3, 6.5, -0.7, 0.9]])))
+
+ def _testRelu6(self, np_features, use_gpu=False):
+ np_relu6 = self._npRelu6(np_features)
+ with self.test_session(use_gpu=use_gpu):
+ relu6 = tf.nn.relu6(np_features)
+ tf_relu6 = relu6.eval()
+ self.assertAllClose(np_relu6, tf_relu6)
+ self.assertShapeEqual(np_relu6, relu6)
+
+ def testNumbers(self):
+ for t in [np.int32, np.int64, np.float, np.double]:
+ self._testRelu6(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=False)
+ if t in [np.float, np.double]:
+ self._testRelu6(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=True)
+
+ # The gradient test for ReLU6 is a bit tricky as the derivative is
+ # not well defined at around zero and six and we want to avoid that
+ # in terms of input values.
+ def testGradientFloat(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
+ shape=[2, 5], name="x")
+ y = tf.nn.relu6(x, name="relu6")
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
+ dtype=np.float32, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print "relu6 (float) gradient err = ", err
+ self.assertLess(err, 1e-4)
+
+ def testGradientDouble(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 6.1, 6.3, 6.5, 6.7, 6.9],
+ shape=[2, 5], dtype=tf.float64, name="x")
+ y = tf.nn.relu6(x, name="relu6")
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
+ dtype=np.float64, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print "relu6 (double) gradient err = ", err
+ self.assertLess(err, 1e-10)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/reshape_op_test.py b/tensorflow/python/kernel_tests/reshape_op_test.py
new file mode 100644
index 0000000000..65b0e6d4bf
--- /dev/null
+++ b/tensorflow/python/kernel_tests/reshape_op_test.py
@@ -0,0 +1,106 @@
+"""Tests for tensorflow.ops.reshape_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class ReshapeTest(tf.test.TestCase):
+
+ def _testReshape(self, x, y, use_gpu=False):
+ with self.test_session(use_gpu=use_gpu):
+ np_ans = x.reshape(y)
+ tf_ans = tf.reshape(x, y)
+ out = tf_ans.eval()
+ self.assertEqual(tf_ans.get_shape(), out.shape)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _testBothReshape(self, x, y):
+ self._testReshape(x, y, False)
+ self._testReshape(x, y, True)
+
+ def testFloatBasic(self):
+ x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
+ self._testBothReshape(x, [2, 3])
+
+ def testDoubleBasic(self):
+ x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
+ self._testBothReshape(x, [2, 3])
+
+ def testInt32Basic(self):
+ x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
+ self._testBothReshape(x, [2, 3])
+
+ def testSComplexBasic(self):
+ x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
+ self._testBothReshape(x, [2, 3])
+
+ def testFloatReshapeThreeDimensions(self):
+ x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
+ self._testBothReshape(x, [3, 3, 3])
+
+ def testFloatUnspecifiedDimOnly(self):
+ x = np.arange(1., 7.).reshape([6]).astype(np.float32)
+ self._testBothReshape(x, [-1])
+
+ def testFloatUnspecifiedDimBegin(self):
+ x = np.arange(1., 7.).reshape([6]).astype(np.float32)
+ self._testBothReshape(x, [-1, 2])
+
+ def testFloatUnspecifiedDimEnd(self):
+ x = np.arange(1., 7.).reshape([6]).astype(np.float32)
+ self._testBothReshape(x, [3, -1])
+
+ # TODO(vrv): Add tests for failure conditions once python test_util
+ # reports errors.
+
+ def testFloatReshapeGradThreeDimensions(self):
+ x = np.arange(1., 25.).reshape([1, 24]).astype(np.float32)
+ s = list(np.shape(x))
+ with self.test_session():
+ input_tensor = tf.constant(x, shape=[2, 3, 4])
+ reshape_out = tf.reshape(input_tensor, [1, 8, 3])
+ err = gc.ComputeGradientError(input_tensor, s,
+ reshape_out, s, x_init_value=x)
+ print "Reshape gradient error = " % err
+ self.assertLess(err, 1e-3)
+
+ def testFloatEmpty(self):
+ x = np.empty((0, 0, 0, 0), dtype=np.float32)
+ self._testBothReshape(x, [1, 2, 3, 0])
+ self._testBothReshape(x, [1, 0, 0, 4])
+ self._testBothReshape(x, [0, 0, 0, 0])
+ self._testBothReshape(x, [1, 2, 0])
+ self._testBothReshape(x, [0, 0, 0])
+ self._testBothReshape(x, [1, -1, 5])
+
+ def testErrors(self):
+ x = tf.constant(0.0, shape=[1, 0, 3])
+ with self.assertRaisesRegexp(
+ ValueError, "cannot infer the missing input size"):
+ tf.reshape(x, [0, -1, 5])
+
+ y = tf.constant(0.0, shape=[23, 29, 31])
+ with self.assertRaisesRegexp(ValueError, "isn't divisible by 17"):
+ tf.reshape(y, [17, -1])
+
+ def testPartialShapes(self):
+ x = tf.placeholder(tf.float32)
+
+ # Unknown input shape, partial new shape.
+ y = tf.reshape(x, [1, 1, -1, 1])
+ self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
+
+ # Unknown input shape, unknown new shape.
+ y = tf.reshape(x, tf.placeholder(tf.int32))
+ self.assertEqual(None, y.get_shape().ndims)
+
+ # Unknown input shape, known rank for new shape.
+ y = tf.reshape(x, tf.placeholder(tf.int32, shape=(3,)))
+ self.assertEqual([None, None, None], y.get_shape().as_list())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/reverse_sequence_op_test.py b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
new file mode 100644
index 0000000000..7cfbcd7946
--- /dev/null
+++ b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
@@ -0,0 +1,109 @@
+"""Tests for tensorflow.ops.reverse_sequence_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class ReverseSequenceTest(tf.test.TestCase):
+
+ def _testReverseSequence(self, x, seq_dim, seq_lengths,
+ truth, use_gpu=False, expected_err_re=None):
+ with self.test_session(use_gpu=use_gpu):
+ ans = tf.reverse_sequence(x,
+ seq_dim=seq_dim,
+ seq_lengths=seq_lengths)
+ if expected_err_re is None:
+ tf_ans = ans.eval()
+ self.assertAllClose(tf_ans, truth, atol=1e-10)
+ self.assertShapeEqual(truth, ans)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ ans.eval()
+
+ def _testBothReverseSequence(self, x, seq_dim, seq_lengths,
+ truth, expected_err_re=None):
+ self._testReverseSequence(x, seq_dim, seq_lengths,
+ truth, True, expected_err_re)
+ self._testReverseSequence(x, seq_dim, seq_lengths,
+ truth, False, expected_err_re)
+
+ def _testBasic(self, dtype):
+ x = np.asarray([
+ [[1, 2, 3, 4], [5, 6, 7, 8]],
+ [[9, 10, 11, 12], [13, 14, 15, 16]],
+ [[17, 18, 19, 20], [21, 22, 23, 24]]], dtype=dtype)
+ x = x.reshape(3, 2, 4, 1, 1)
+
+ # reverse dim 2 up to (0:3, none, 0:4) along dim=0
+ seq_dim = 2
+ seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
+
+ truth = np.asarray(
+ [[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
+ [[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
+ [[20, 19, 18, 17], [24, 23, 22, 21]]], # reverse 0:4 (all)
+ dtype=dtype)
+ truth = truth.reshape(3, 2, 4, 1, 1)
+ self._testBothReverseSequence(x, seq_dim, seq_lengths, truth)
+
+ def testFloatBasic(self):
+ self._testBasic(np.float32)
+
+ def testDoubleBasic(self):
+ self._testBasic(np.float64)
+
+ def testInt32Basic(self):
+ self._testBasic(np.int32)
+
+ def testInt64Basic(self):
+ self._testBasic(np.int64)
+
+ def testSComplexBasic(self):
+ self._testBasic(np.complex64)
+
+ def testFloatReverseSequenceGrad(self):
+ x = np.asarray([
+ [[1, 2, 3, 4], [5, 6, 7, 8]],
+ [[9, 10, 11, 12], [13, 14, 15, 16]],
+ [[17, 18, 19, 20], [21, 22, 23, 24]]], dtype=np.float)
+ x = x.reshape(3, 2, 4, 1, 1)
+
+ # reverse dim 2 up to (0:3, none, 0:4) along dim=0
+ seq_dim = 2
+ seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
+
+ with self.test_session():
+ input_t = tf.constant(x, shape=x.shape)
+ seq_lengths_t = tf.constant(seq_lengths, shape=seq_lengths.shape)
+ reverse_sequence_out = tf.reverse_sequence(input_t,
+ seq_dim=seq_dim,
+ seq_lengths=seq_lengths_t)
+ err = gc.ComputeGradientError(input_t,
+ x.shape,
+ reverse_sequence_out,
+ x.shape,
+ x_init_value=x)
+ print "ReverseSequence gradient error = %g" % err
+ self.assertLess(err, 1e-8)
+
+ def testShapeFunctionEdgeCases(self):
+ # Batch size mismatched between input and seq_lengths.
+ with self.assertRaises(ValueError):
+ tf.reverse_sequence(
+ tf.placeholder(tf.float32, shape=(32, 2, 3)),
+ seq_lengths=tf.placeholder(tf.int64, shape=(33,)),
+ seq_dim=3)
+
+ # seq_dim out of bounds.
+ with self.assertRaisesRegexp(ValueError, "seq_dim must be < input.dims()"):
+ tf.reverse_sequence(
+ tf.placeholder(tf.float32, shape=(32, 2, 3)),
+ seq_lengths=tf.placeholder(tf.int64, shape=(32,)),
+ seq_dim=3)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/save_restore_ops_test.py b/tensorflow/python/kernel_tests/save_restore_ops_test.py
new file mode 100644
index 0000000000..d59d76c58f
--- /dev/null
+++ b/tensorflow/python/kernel_tests/save_restore_ops_test.py
@@ -0,0 +1,21 @@
+"""Tests for tensorflow.ops.io_ops."""
+import tensorflow.python.platform
+
+import tensorflow as tf
+from tensorflow.python.ops import gen_io_ops
+
+
+class ShardedFileOpsTest(tf.test.TestCase):
+
+ def testShardedFileName(self):
+ with tf.Session(
+ target="",
+ config=tf.ConfigProto(device_count={"CPU": 2})):
+ self.assertEqual(gen_io_ops._sharded_filename("foo", 4, 100).eval(),
+ "foo-00004-of-00100")
+ self.assertEqual(gen_io_ops._sharded_filespec("foo", 100).eval(),
+ "foo-?????-of-00100")
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/scatter_ops_test.py b/tensorflow/python/kernel_tests/scatter_ops_test.py
new file mode 100644
index 0000000000..dd645819a3
--- /dev/null
+++ b/tensorflow/python/kernel_tests/scatter_ops_test.py
@@ -0,0 +1,49 @@
+"""Tests for tensorflow.ops.tf.scatter."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class ScatterTest(tf.test.TestCase):
+
+ def _VariableRankTest(self, np_scatter, tf_scatter):
+ np.random.seed(8)
+ with self.test_session():
+ for indices_shape in (), (2,), (2, 3), (2, 3, 4):
+ for extra_shape in (), (5,), (5, 6):
+ # Generate random indices with no duplicates for easy numpy comparison
+ size = np.prod(indices_shape, dtype=np.int32)
+ indices = np.arange(2 * size)
+ np.random.shuffle(indices)
+ indices = indices[:size].reshape(indices_shape)
+ updates = np.random.randn(*(indices_shape + extra_shape))
+ old = np.random.randn(*((2 * size,) + extra_shape))
+ # Scatter via numpy
+ new = old.copy()
+ np_scatter(new, indices, updates)
+ # Scatter via tensorflow
+ ref = tf.Variable(old)
+ ref.initializer.run()
+ tf_scatter(ref, indices, updates).eval()
+ # Compare
+ self.assertAllClose(ref.eval(), new)
+
+ def testVariableRankUpdate(self):
+ def update(ref, indices, updates):
+ ref[indices] = updates
+ self._VariableRankTest(update, tf.scatter_update)
+
+ def testVariableRankAdd(self):
+ def add(ref, indices, updates):
+ ref[indices] += updates
+ self._VariableRankTest(add, tf.scatter_add)
+
+ def testVariableRankSub(self):
+ def sub(ref, indices, updates):
+ ref[indices] -= updates
+ self._VariableRankTest(sub, tf.scatter_sub)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
new file mode 100644
index 0000000000..558ce06285
--- /dev/null
+++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
@@ -0,0 +1,269 @@
+"""Functional tests for segment reduction ops."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class SegmentReductionHelper(tf.test.TestCase):
+
+ def _input(self, input_shape, dtype=tf.int32):
+ num_elem = 1
+ for x in input_shape:
+ num_elem *= x
+ values = range(1, num_elem + 1)
+ np_values = np.array(values).reshape(input_shape).astype(
+ dtype.as_numpy_dtype)
+ return tf.constant(values, shape=input_shape,
+ dtype=dtype), np_values
+
+ def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
+ if not x.size: return np.array([])
+ indices = np.asarray(indices)
+ if num_out_rows is None:
+ num_out_rows = indices[-1] + 1
+ output = [None] * num_out_rows
+ slice_shape = x.shape[indices.ndim:]
+ x_flat = x.reshape((indices.size,) + slice_shape)
+ for i, index in enumerate(indices.ravel()):
+ if output[index] is not None:
+ output[index] = op1(output[index], x_flat[i])
+ else:
+ output[index] = x_flat[i]
+ # zero initialize values that are still uncalcuated.
+ output = [o if o is not None else np.zeros(slice_shape) for o in output]
+ if op2 is not None:
+ output = [op2(o) for o in output]
+ output = [o.reshape(slice_shape) for o in output]
+ return np.array(output)
+
+ def _assertAllClose(self, indices, np_x, tf_x):
+ for i in set(np.asarray(indices).ravel()):
+ self.assertAllClose(np_x[i], tf_x[i])
+
+ def _mean_cum_op(self, x, y):
+ return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
+
+ def _mean_reduce_op(self, x):
+ return x[0] / x[1] if isinstance(x, tuple) else x
+
+
+class SegmentReductionOpTest(SegmentReductionHelper):
+
+ def testValues(self):
+ dtypes = [tf.float32,
+ tf.float64,
+ tf.int64,
+ tf.int32]
+
+ # Each item is np_op1, np_op2, tf_op
+ ops_list = [(np.add, None, tf.segment_sum),
+ (self._mean_cum_op, self._mean_reduce_op,
+ tf.segment_mean),
+ (np.ndarray.__mul__, None, tf.segment_prod),
+ (np.minimum, None, tf.segment_min),
+ (np.maximum, None, tf.segment_max)]
+
+ n = 10
+ shape = [n, 2]
+ indices = [int(i / 3) for i in range(n)]
+ for dtype in dtypes:
+ with self.test_session(use_gpu=False):
+ tf_x, np_x = self._input(shape, dtype=dtype)
+ for np_op1, np_op2, tf_op in ops_list:
+ np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
+ s = tf_op(data=tf_x, segment_ids=indices)
+ tf_ans = s.eval()
+ self._assertAllClose(indices, np_ans, tf_ans)
+ # NOTE(mrry): The static shape inference that computes
+ # `tf_ans.shape` can only infer that sizes from dimension 1
+ # onwards, because the size of dimension 0 is data-dependent
+ # and may therefore vary dynamically.
+ self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
+
+ def testSegmentIdsShape(self):
+ shape = [4, 4]
+ tf_x, _ = self._input(shape)
+ indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
+ with self.assertRaises(ValueError):
+ tf.segment_sum(data=tf_x, segment_ids=indices)
+
+ def testSegmentIdsSize(self):
+ shape = [4, 4]
+ with self.test_session():
+ tf_x, _ = self._input(shape)
+ indices = [0, 1]
+ s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ with self.assertRaisesOpError("segment_ids should be the same size"):
+ s.eval()
+
+ def testGradient(self):
+ shape = [4, 4]
+ indices = [0, 1, 2, 2]
+ for tf_op in [tf.segment_sum,
+ tf.segment_mean,
+ tf.segment_min,
+ tf.segment_max]:
+ with self.test_session():
+ tf_x, np_x = self._input(shape, dtype=tf.float64)
+ s = tf_op(data=tf_x, segment_ids=indices)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double),
+ delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+
+class UnsortedSegmentSumTest(SegmentReductionHelper):
+
+ def testValues(self):
+ dtypes = [tf.float32,
+ tf.float64,
+ tf.int64,
+ tf.int32]
+ indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
+ num_segments = 12
+ for indices in indices_flat, indices_flat.reshape(5, 2):
+ shape = indices.shape + (2,)
+ for dtype in dtypes:
+ with self.test_session(use_gpu=False):
+ tf_x, np_x = self._input(shape, dtype=dtype)
+ np_ans = self._segmentReduce(indices,
+ np_x,
+ np.add,
+ op2=None,
+ num_out_rows=num_segments)
+ s = tf.unsorted_segment_sum(data=tf_x,
+ segment_ids=indices,
+ num_segments=num_segments)
+ tf_ans = s.eval()
+ self._assertAllClose(indices, np_ans, tf_ans)
+ self.assertShapeEqual(np_ans, s)
+
+ def testGradient(self):
+ num_cols = 2
+ indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
+ num_segments = max(indices_flat) + 3
+ for indices in indices_flat, indices_flat.reshape(5, 2):
+ shape = indices.shape + (num_cols,)
+ with self.test_session():
+ tf_x, np_x = self._input(shape, dtype=tf.float64)
+ s = tf.unsorted_segment_sum(data=tf_x,
+ segment_ids=indices,
+ num_segments=num_segments)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ tf_x,
+ shape,
+ s,
+ [num_segments, num_cols],
+ x_init_value=np_x.astype(np.double),
+ delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+ def testGradientMatchesSegmentSum(self):
+ # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
+ # and compare the outputs, which should be identical.
+ # NB: for this test to work, indices must be valid for SegmentSum, namely
+ # it must be sorted, the indices must be contiguous, and num_segments
+ # must be max(indices) + 1.
+ indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
+ n = len(indices)
+ num_cols = 2
+ shape = [n, num_cols]
+ num_segments = max(indices) + 1
+ with self.test_session():
+ tf_x, np_x = self._input(shape, dtype=tf.float64)
+ # Results from UnsortedSegmentSum
+ unsorted_s = tf.unsorted_segment_sum(data=tf_x,
+ segment_ids=indices,
+ num_segments=num_segments)
+ unsorted_jacob_t, unsorted_jacob_n = gradient_checker.ComputeGradient(
+ tf_x, shape, unsorted_s, [num_segments, num_cols],
+ x_init_value=np_x.astype(np.double),
+ delta=1)
+ # Results from SegmentSum
+ sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
+ sorted_jacob_t, sorted_jacob_n = gradient_checker.ComputeGradient(
+ tf_x, shape, sorted_s, [num_segments, num_cols],
+ x_init_value=np_x.astype(np.double),
+ delta=1)
+ self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
+ self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
+
+
+class SparseSegmentReductionHelper(SegmentReductionHelper):
+
+ def _sparse_input(self, input_shape, num_indices,
+ dtype=tf.int32):
+ a, b = super(SparseSegmentReductionHelper, self)._input(input_shape,
+ dtype)
+ indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
+ return (tf.constant(indices, dtype=tf.int32),
+ indices, a, b)
+
+ def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
+ return self._segmentReduce(segment_indices, x[indices], op1, op2)
+
+
+class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
+
+ def testValues(self):
+ dtypes = [tf.float32,
+ tf.float64,
+ tf.int64,
+ tf.int32]
+
+ mean_dtypes = [tf.float32,
+ tf.float64]
+
+ # Each item is np_op1, np_op2, tf_op
+ ops_list = [(np.add, None, tf.sparse_segment_sum),
+ (self._mean_cum_op, self._mean_reduce_op,
+ tf.sparse_segment_mean)]
+
+ n = 400
+ shape = [n, 2]
+ segment_indices = []
+ for i in range(20):
+ for _ in range(i + 1):
+ segment_indices.append(i)
+ num_indices = len(segment_indices)
+ for dtype in dtypes:
+ with self.test_session(use_gpu=False):
+ tf_indices, np_indices, tf_x, np_x = self._sparse_input(shape,
+ num_indices,
+ dtype=dtype)
+ for np_op1, np_op2, tf_op in ops_list:
+ if tf_op == tf.sparse_segment_mean and dtype not in mean_dtypes:
+ continue
+ np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
+ np_op1, np_op2)
+ s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
+ tf_ans = s.eval()
+ self._assertAllClose(segment_indices, np_ans, tf_ans)
+ # NOTE(mrry): The static shape inference that computes
+ # `tf_ans.shape` can only infer that sizes from dimension 1
+ # onwards, because the size of dimension 0 is data-dependent
+ # and may therefore vary dynamically.
+ self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
+
+ def testGradient(self):
+ shape = [10, 4]
+
+ segment_indices = [0, 1, 2, 2]
+ num_indices = len(segment_indices)
+ for tf_op in [tf.sparse_segment_sum,
+ tf.sparse_segment_mean]:
+ with self.test_session():
+ tf_indices, _, tf_x, np_x = self._sparse_input(
+ shape, num_indices, dtype=tf.float64)
+ s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
+ jacob_t, jacob_n = gradient_checker.ComputeGradient(
+ tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double),
+ delta=1)
+ self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/shape_ops_test.py b/tensorflow/python/kernel_tests/shape_ops_test.py
new file mode 100644
index 0000000000..ac97180dbe
--- /dev/null
+++ b/tensorflow/python/kernel_tests/shape_ops_test.py
@@ -0,0 +1,389 @@
+"""Tests for various tensorflow.ops.tf."""
+import tensorflow.python.platform
+
+import numpy as np
+
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class ShapeOpsTest(tf.test.TestCase):
+
+ def _compareShape(self, x, use_gpu=False):
+ np_ans = np.array(np.shape(x))
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.shape(x)
+ result = tf_ans.eval()
+ self.assertAllEqual(np_ans, result)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareRank(self, x, use_gpu=False):
+ np_ans = np.asarray(np.ndim(x))
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.rank(x)
+ result = tf_ans.eval()
+ self.assertAllEqual(np_ans, result)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _compareSize(self, x, use_gpu=False):
+ np_ans = np.asarray(np.size(x))
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.size(x)
+ result = tf_ans.eval()
+ self.assertAllEqual(np_ans, result)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def _testCpu(self, x):
+ self._compareShape(x, use_gpu=False)
+ self._compareRank(x, use_gpu=False)
+ self._compareSize(x, use_gpu=False)
+
+ def _testGpu(self, x):
+ self._compareShape(x, use_gpu=True)
+ self._compareRank(x, use_gpu=True)
+ self._compareSize(x, use_gpu=True)
+
+ def _testAll(self, x):
+ self._testCpu(x)
+ self._testGpu(x)
+
+ def testBasic(self):
+ self._testAll(np.zeros([2]))
+ self._testAll(np.zeros([2, 3]))
+ self._testAll(np.zeros([2, 3, 5]))
+ self._testAll(np.zeros([2, 3, 5, 7]))
+ self._testAll(np.zeros([2, 3, 5, 7, 11]))
+ self._testAll(np.zeros([2, 3, 5, 7, 11, 13]))
+
+ def _compareExpandDims(self, x, dim, use_gpu):
+ np_ans = np.expand_dims(x, axis=dim)
+ with self.test_session(use_gpu=use_gpu):
+ tensor = tf.expand_dims(x, dim)
+ tf_ans = tensor.eval()
+ self.assertShapeEqual(np_ans, tensor)
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def _compareExpandDimsAll(self, x, dim):
+ self._compareExpandDims(x, dim, False)
+ self._compareExpandDims(x, dim, True)
+
+ def testExpandDims(self):
+ self._compareExpandDimsAll(np.zeros([2]), 0)
+ self._compareExpandDimsAll(np.zeros([2]), 1)
+ self._compareExpandDimsAll(np.zeros([2]), -1)
+
+ self._compareExpandDimsAll(np.zeros([2, 3]), 0)
+ self._compareExpandDimsAll(np.zeros([2, 3]), 1)
+ self._compareExpandDimsAll(np.zeros([2, 3]), 2)
+ self._compareExpandDimsAll(np.zeros([2, 3]), -1)
+ self._compareExpandDimsAll(np.zeros([2, 3]), -2)
+
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
+
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
+ self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
+
+ def testExpandDimsErrors(self):
+ with self.test_session():
+ self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5)
+ self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4)
+
+ def testExpandDimsGradient(self):
+ with self.test_session():
+ inp = tf.constant(np.random.rand(4, 2).astype("f"),
+ dtype=tf.float32)
+ squeezed = tf.expand_dims(inp, 1)
+
+ err = gc.ComputeGradientError(inp, [4, 2], squeezed, [4, 1, 2])
+ self.assertLess(err, 1e-3)
+
+ def testExpandDimsScalar(self):
+ with self.test_session():
+ inp = tf.constant(7)
+ self.assertAllEqual([7], tf.expand_dims(inp, 0).eval())
+ self.assertAllEqual([7], tf.expand_dims(inp, -1).eval())
+
+ def _compareSqueeze(self, x, squeeze_dims, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ if squeeze_dims:
+ np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
+ tensor = tf.squeeze(x, squeeze_dims)
+ tf_ans = tensor.eval()
+ else:
+ np_ans = np.squeeze(x)
+ tensor = tf.squeeze(x)
+ tf_ans = tensor.eval()
+ self.assertShapeEqual(np_ans, tensor)
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def _compareSqueezeAll(self, x, squeeze_dims=None):
+ if squeeze_dims is None:
+ squeeze_dims = []
+ self._compareSqueeze(x, squeeze_dims, False)
+ self._compareSqueeze(x, squeeze_dims, True)
+
+ def testSqueeze(self):
+ # Nothing to squeeze.
+ self._compareSqueezeAll(np.zeros([2]))
+ self._compareSqueezeAll(np.zeros([2, 3]))
+
+ # Squeeze the middle element away.
+ self._compareSqueezeAll(np.zeros([2, 1, 2]))
+
+ # Squeeze on both ends.
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
+
+ def testSqueezeSpecificDimension(self):
+ # Positive squeeze dim index.
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
+
+ # Negative squeeze dim index.
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
+ self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
+
+ def testSqueezeAllOnes(self):
+ # Numpy squeezes a 1 element tensor into a zero dimensional tensor.
+ # Verify that we do the same.
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ tensor = tf.squeeze(np.zeros([1, 1, 1]), [])
+ self.assertEqual(np.shape(1), tensor.get_shape())
+ tf_ans = tensor.eval()
+ self.assertEqual(np.shape(1), tf_ans.shape)
+
+ def testSqueezeOnlyOnes(self):
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ input_1x1x3 = np.zeros([1, 1, 3])
+ self._compareSqueezeAll(input_1x1x3)
+ self._compareSqueezeAll(input_1x1x3, [0])
+ self._compareSqueezeAll(input_1x1x3, [1])
+ self.assertRaises(ValueError, tf.squeeze, input_1x1x3, [2])
+
+ def testSqueezeErrors(self):
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [-4])
+ self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [0, -4])
+ self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [3])
+ self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [2, 3])
+
+ def testSqueezeGradient(self):
+ with self.test_session():
+ inp = np.random.rand(4, 2).astype("f")
+ a = tf.reshape(inp, [4, 1, 2])
+ squeezed = tf.squeeze(a, [])
+
+ err = gc.ComputeGradientError(a, [4, 1, 2], squeezed, [4, 2])
+ self.assertLess(err, 1e-3)
+
+ def testSqueezeGradientWithSqueezeDims(self):
+ with self.test_session():
+ inp = np.random.rand(4, 2).astype("f")
+ a = tf.reshape(inp, [4, 1, 2, 1])
+ squeezed = tf.squeeze(a, [1])
+
+ err = gc.ComputeGradientError(a, [4, 1, 2, 1], squeezed, [4, 2, 1])
+ self.assertLess(err, 1e-3)
+
+
+class TileTest(tf.test.TestCase):
+
+ def testScalar(self):
+ with self.test_session():
+ a = tf.constant(7, shape=[], dtype=tf.float32)
+ tiled = tf.tile(a, [])
+ result = tiled.eval()
+ self.assertEqual(result.shape, ())
+ self.assertEqual([], tiled.get_shape())
+ self.assertEqual(7, result)
+
+ def testSimple(self):
+ with self.test_session():
+ inp = np.random.rand(4, 1).astype("f")
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=[4, 1], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 4])
+ result = tiled.eval()
+ self.assertEqual(result.shape, (4, 4))
+ self.assertEqual([4, 4], tiled.get_shape())
+ self.assertTrue((result == np.tile(inp, (1, 4))).all())
+
+ def testTypes(self):
+ types_to_test = {
+ "bool": (tf.bool, bool),
+ "float32": (tf.float32, float),
+ "float64": (tf.float64, float),
+ "uint8": (tf.uint8, int),
+ "int32": (tf.int32, int),
+ "int64": (tf.int64, int),
+ "string": (tf.string, str)
+ }
+ for dtype_np, v in types_to_test.iteritems():
+ with self.test_session():
+ dtype_tf = v[0]
+ cast = v[1]
+ inp = np.random.rand(4, 1).astype(dtype_np)
+ a = tf.constant([cast(x) for x in inp.ravel(order="C")],
+ shape=[4, 1],
+ dtype=dtype_tf)
+ tiled = tf.tile(a, [1, 4])
+ result = tiled.eval()
+ self.assertEqual(result.shape, (4, 4))
+ self.assertEqual([4, 4], tiled.get_shape())
+ self.assertTrue((result == np.tile(inp, (1, 4))).all())
+
+ def testInvalidDim(self):
+ with self.test_session():
+ inp = np.random.rand(4, 1).astype("f")
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=[4, 1], dtype=tf.float32)
+ # Wrong length of multiples.
+ with self.assertRaises(ValueError):
+ tf.tile(a, [1, 4, 2])
+ # Wrong rank for multiples.
+ with self.assertRaises(ValueError):
+ tf.tile(a, [[2, 3], [3, 4]]).eval()
+
+ def _RunAndVerifyResult(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ # Random dims of rank 5
+ input_shape = np.random.randint(1, 4, size=5)
+ inp = np.random.rand(*input_shape).astype("f")
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=input_shape, dtype=tf.float32)
+ multiples = np.random.randint(1, 4, size=5).astype(np.int32)
+ tiled = tf.tile(a, multiples)
+ result = tiled.eval()
+ self.assertTrue((np.array(multiples) * np.array(inp.shape) ==
+ np.array(result.shape)).all())
+ self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
+ self.assertShapeEqual(result, tiled)
+
+ def testRandom(self):
+ for _ in range(5):
+ self._RunAndVerifyResult(use_gpu=False)
+ for _ in range(5):
+ self._RunAndVerifyResult(use_gpu=True)
+
+ def testGradientSimpleReduction(self):
+ with self.test_session():
+ inp = np.random.rand(4, 1).astype("f")
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=[4, 1], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 4])
+ grad_shape = [4, 4]
+ grad_inp = np.random.rand(*grad_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=grad_shape)
+ grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ self.assertShapeEqual(inp, grad)
+ result = grad.eval()
+ self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
+
+ def testGradientStridedReduction(self):
+ with self.test_session():
+ inp = np.random.rand(4, 2).astype("f")
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=[4, 2], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 2])
+ grad_shape = [4, 4]
+ grad_inp = np.random.rand(*grad_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=grad_shape)
+ grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ self.assertShapeEqual(inp, grad)
+ result = grad.eval()
+ expected_shape = [4, 2]
+ expected = np.zeros(expected_shape)
+ expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
+ expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
+ self.assertTrue((np.abs(expected - result) < 1e-3).all())
+
+ def testGradientSimpleReductionOnGPU(self):
+ with self.test_session(use_gpu=True):
+ inp = np.random.rand(4, 1).astype("f")
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=[4, 1], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 4])
+ grad_shape = [4, 4]
+ grad_inp = np.random.rand(*grad_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=grad_shape)
+ grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ result = grad.eval()
+ self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
+
+ def testGradientStridedReductionOnGPU(self):
+ with self.test_session(use_gpu=True):
+ inp = np.random.rand(4, 2).astype("f")
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=[4, 2], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 2])
+ grad_shape = [4, 4]
+ grad_inp = np.random.rand(*grad_shape).astype("f")
+ grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
+ shape=grad_shape)
+ grad = tf.gradients([tiled], [a], [grad_tensor])[0]
+ result = grad.eval()
+ expected_shape = [4, 2]
+ expected = np.zeros(expected_shape)
+ expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
+ expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
+ self.assertAllClose(expected, result, 1e-3)
+
+ def _RunAndVerifyGradientResult(self, input_shape, multiples):
+ with self.test_session():
+ # Random values
+ inp = np.random.rand(*input_shape)
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=input_shape, dtype=tf.float64)
+ tiled = tf.tile(a, multiples)
+ grad_shape = list(np.array(multiples) * np.array(inp.shape))
+ err = gc.ComputeGradientError(a, list(input_shape), tiled, grad_shape,
+ x_init_value=inp)
+ print "tile(float) error = ", err
+ self.assertLess(err, 1e-3)
+
+ def testGradientRandom(self):
+ self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
+ self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
+ self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
+
+ def testGradientStridedReductionGC(self):
+ with self.test_session():
+ inp = np.random.rand(4, 2).astype("f")
+ a = tf.constant([float(x) for x in inp.flatten()],
+ shape=[4, 2], dtype=tf.float32)
+ tiled = tf.tile(a, [1, 2])
+ err = gc.ComputeGradientError(a, [4, 2], tiled, [4, 4])
+ self.assertLess(err, 1e-3)
+
+ def testShapeFunctionEdgeCases(self):
+ # Unknown multiples shape.
+ inp = tf.constant(0.0, shape=[4, 4, 4, 4])
+ tiled = tf.tile(inp, tf.placeholder(tf.int32))
+ self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
+
+ # Unknown input shape.
+ inp = tf.placeholder(tf.float32)
+ tiled = tf.tile(inp, [2, 2, 2, 2])
+ self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
+
+ # Unknown input and multiples shape.
+ inp = tf.placeholder(tf.float32)
+ tiled = tf.tile(inp, tf.placeholder(tf.int32))
+ self.assertIs(None, tiled.get_shape().ndims)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/slice_op_test.py b/tensorflow/python/kernel_tests/slice_op_test.py
new file mode 100644
index 0000000000..62d7e31dfc
--- /dev/null
+++ b/tensorflow/python/kernel_tests/slice_op_test.py
@@ -0,0 +1,235 @@
+"""Functional tests for slice op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SliceTest(tf.test.TestCase):
+
+ def _testEmpty(self, use_gpu):
+ inp = np.random.rand(4, 4).astype("f")
+ for k in xrange(4):
+ with self.test_session(use_gpu=use_gpu):
+ a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
+ slice_t = a[2, k:k]
+ slice_val = slice_t.eval()
+ self.assertAllEqual(slice_val, inp[2, k:k])
+
+ def testEmptyAll(self):
+ self._testEmpty(use_gpu=False)
+ self._testEmpty(use_gpu=True)
+
+ def _testInt32(self, use_gpu):
+ inp = np.random.rand(4, 4).astype("i")
+ for k in xrange(4):
+ with self.test_session(use_gpu=use_gpu):
+ a = tf.constant(inp, shape=[4, 4], dtype=tf.int32)
+ slice_t = a[2, k:k]
+ slice_val = slice_t.eval()
+ self.assertAllEqual(slice_val, inp[2, k:k])
+
+ def testInt32(self):
+ self._testEmpty(use_gpu=False)
+ self._testEmpty(use_gpu=True)
+
+ def _testSelectAll(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = np.random.rand(4, 4, 4, 4).astype("f")
+ a = tf.constant(inp, shape=[4, 4, 4, 4],
+ dtype=tf.float32)
+
+ slice_explicit_t = tf.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
+ slice_implicit_t = a[:, :, :, :]
+
+ self.assertAllEqual(inp, slice_explicit_t.eval())
+ self.assertAllEqual(inp, slice_implicit_t.eval())
+ self.assertEqual(inp.shape, slice_explicit_t.get_shape())
+ self.assertEqual(inp.shape, slice_implicit_t.get_shape())
+
+ def testSelectAll(self):
+ for _ in range(10):
+ self._testSelectAll(use_gpu=False)
+ self._testSelectAll(use_gpu=True)
+
+ def _testSingleDimension(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = np.random.rand(10).astype("f")
+ a = tf.constant(inp, shape=[10], dtype=tf.float32)
+
+ hi = np.random.random_integers(0, 9)
+ scalar_t = a[hi]
+ scalar_val = scalar_t.eval()
+ self.assertAllEqual(scalar_val, inp[hi])
+
+ lo = np.random.random_integers(0, hi)
+ slice_t = a[lo:hi]
+ slice_val = slice_t.eval()
+ self.assertAllEqual(slice_val, inp[lo:hi])
+
+ def testSingleDimension(self):
+ for _ in range(10):
+ self._testSingleDimension(use_gpu=False)
+ self._testSingleDimension(use_gpu=True)
+
+ def _testSliceMatrixDim0(self, x, begin, size, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ tf_ans = tf.slice(x, [begin, 0], [size, x.shape[1]]).eval()
+ np_ans = x[begin:begin+size, :]
+ self.assertAllEqual(tf_ans, np_ans)
+
+ def testSliceMatrixDim0(self):
+ for use_gpu in [False, True]:
+ x = np.random.rand(8, 4).astype("f")
+ self._testSliceMatrixDim0(x, 1, 2, use_gpu)
+ self._testSliceMatrixDim0(x, 3, 3, use_gpu)
+ y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
+ self._testSliceMatrixDim0(y, 1, 2, use_gpu)
+ self._testSliceMatrixDim0(y, 3, 3, use_gpu)
+
+ def _testIndexAndSlice(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = np.random.rand(4, 4).astype("f")
+ a = tf.constant(inp, shape=[4, 4], dtype=tf.float32)
+
+ x, y = np.random.random_integers(0, 3, size=2).tolist()
+ slice_t = a[x, 0:y]
+ slice_val = slice_t.eval()
+ self.assertAllEqual(slice_val, inp[x, 0:y])
+
+ def testSingleElementAll(self):
+ for _ in range(10):
+ self._testIndexAndSlice(use_gpu=False)
+ self._testIndexAndSlice(use_gpu=True)
+
+ def _testSimple(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu) as sess:
+ inp = np.random.rand(4, 4).astype("f")
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=[4, 4], dtype=tf.float32)
+ slice_t = tf.slice(a, [0, 0], [2, 2])
+ slice2_t = a[:2, :2]
+ slice_val, slice2_val = sess.run([slice_t, slice2_t])
+ self.assertAllEqual(slice_val, inp[:2, :2])
+ self.assertAllEqual(slice2_val, inp[:2, :2])
+ self.assertEqual(slice_val.shape, slice_t.get_shape())
+ self.assertEqual(slice2_val.shape, slice2_t.get_shape())
+
+ def testSimpleAll(self):
+ self._testSimple(use_gpu=False)
+ self._testSimple(use_gpu=True)
+
+ def _testComplex(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = np.random.rand(4, 10, 10, 4).astype("f")
+ a = tf.constant(inp, dtype=tf.float32)
+
+ x = np.random.random_integers(0, 9)
+ z = np.random.random_integers(0, 9)
+ y = np.random.random_integers(0, z)
+ slice_t = a[:, x, y:z, :]
+ self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
+
+ def testComplex(self):
+ for _ in range(10):
+ self._testComplex(use_gpu=False)
+ self._testComplex(use_gpu=True)
+
+ def _RunAndVerifyResult(self, use_gpu):
+ # Random dims of rank 5
+ input_shape = np.random.randint(0, 20, size=5)
+ inp = np.random.rand(*input_shape).astype("f")
+ with self.test_session(use_gpu=use_gpu) as sess:
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=input_shape, dtype=tf.float32)
+ indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
+ sizes = [np.random.randint(0, input_shape[i] - indices[i] + 1)
+ for i in range(5)]
+ slice_t = tf.slice(a, indices, sizes)
+ slice2_t = a[indices[0]:indices[0]+sizes[0],
+ indices[1]:indices[1]+sizes[1],
+ indices[2]:indices[2]+sizes[2],
+ indices[3]:indices[3]+sizes[3],
+ indices[4]:indices[4]+sizes[4]]
+
+ slice_val, slice2_val = sess.run([slice_t, slice2_t])
+
+ expected_val = inp[indices[0]:indices[0]+sizes[0],
+ indices[1]:indices[1]+sizes[1],
+ indices[2]:indices[2]+sizes[2],
+ indices[3]:indices[3]+sizes[3],
+ indices[4]:indices[4]+sizes[4]]
+ self.assertAllEqual(slice_val, expected_val)
+ self.assertAllEqual(slice2_val, expected_val)
+ self.assertEqual(expected_val.shape, slice_t.get_shape())
+ self.assertEqual(expected_val.shape, slice2_t.get_shape())
+
+ def testRandom(self):
+ for _ in range(10):
+ self._RunAndVerifyResult(use_gpu=False)
+ self._RunAndVerifyResult(use_gpu=True)
+
+ def _testGradientSlice(self, input_shape, slice_begin, slice_size, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ num_inputs = np.prod(input_shape)
+ num_grads = np.prod(slice_size)
+ inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
+ a = tf.constant([float(x) for x in inp.ravel(order="C")],
+ shape=input_shape, dtype=tf.float32)
+ slice_t = tf.slice(a, slice_begin, slice_size)
+ grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
+ grad_tensor = tf.constant(grads)
+ grad = tf.gradients(slice_t, [a], grad_tensor)[0]
+ result = grad.eval()
+
+ # Create a zero tensor of the input shape ane place
+ # the grads into the right location to compare against TensorFlow.
+ np_ans = np.zeros(input_shape)
+ slices = []
+ for i in xrange(len(input_shape)):
+ slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
+ np_ans[slices] = grads
+
+ self.assertAllClose(np_ans, result)
+
+ def _testGradientVariableSize(self, use_gpu):
+ with self.test_session(use_gpu=use_gpu):
+ inp = tf.constant([1.0, 2.0, 3.0], name="in")
+ out = tf.slice(inp, [1], [-1])
+ grad_actual = tf.gradients(out, inp)[0].eval()
+ self.assertAllClose([0., 1., 1.], grad_actual)
+
+ def _testGradientsSimple(self, use_gpu):
+ # Slice the middle square out of a 4x4 input
+ self._testGradientSlice([4, 4], [1, 1], [2, 2], use_gpu)
+
+ # Slice the upper left square out of a 4x4 input
+ self._testGradientSlice([4, 4], [0, 0], [2, 2], use_gpu)
+
+ # Slice a non-square input starting from (2,1)
+ self._testGradientSlice([4, 4], [2, 1], [1, 2], use_gpu)
+
+ # Slice a 3D tensor
+ self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1], use_gpu)
+
+ # Use -1 as a slice dimension.
+ self._testGradientVariableSize(use_gpu)
+
+ def testGradientsAll(self):
+ self._testGradientsSimple(use_gpu=False)
+ self._testGradientsSimple(use_gpu=True)
+
+ def testNotIterable(self):
+ # NOTE(mrry): If we register __getitem__ as an overloaded
+ # operator, Python will valiantly attempt to iterate over the
+ # Tensor from 0 to infinity. This test ensures that this
+ # unintended behavior is prevented.
+ c = tf.constant(5.0)
+ with self.assertRaisesWithPredicateMatch(
+ TypeError,
+ lambda e: "'Tensor' object is not iterable" in e.message):
+ for _ in c:
+ pass
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/softmax_op_test.py b/tensorflow/python/kernel_tests/softmax_op_test.py
new file mode 100644
index 0000000000..fd25970093
--- /dev/null
+++ b/tensorflow/python/kernel_tests/softmax_op_test.py
@@ -0,0 +1,65 @@
+"""Tests for SoftmaxOp."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SoftmaxTest(tf.test.TestCase):
+
+ def _npSoftmax(self, features):
+ batch_dim = 0
+ class_dim = 1
+ batch_size = features.shape[batch_dim]
+ e = np.exp(features -
+ np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
+ return e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
+
+ def _testSoftmax(self, np_features, use_gpu=False):
+ np_softmax = self._npSoftmax(np_features)
+ with self.test_session(use_gpu=use_gpu):
+ tf_softmax = tf.nn.softmax(np_features)
+ out = tf_softmax.eval()
+ self.assertAllClose(np_softmax, out)
+ self.assertShapeEqual(np_softmax, tf_softmax)
+ # Bonus check: the softmaxes should add to one in each
+ # batch element.
+ self.assertAllClose(np.ones(out.shape[0]),
+ np.sum(out, axis=1))
+
+ def _testAll(self, features):
+ self._testSoftmax(features, use_gpu=False)
+ self._testSoftmax(features, use_gpu=True)
+
+ def testNpSoftmax(self):
+ features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
+ # Batch 0: All exps are 1. The expected result is
+ # [0.25, 0.25, 0.25, 0.25]
+ #
+ # Batch 1:
+ # exps = [1., 2.718, 7.389, 20.085]
+ # sum = 31.192
+ # Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
+ np_sm = self._npSoftmax(np.array(features))
+ self.assertAllClose(
+ np.array([[0.25, 0.25, 0.25, 0.25],
+ [0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
+ np_sm,
+ rtol=1.e-5, atol=1.e-5)
+
+ def testShapeMismatch(self):
+ with self.assertRaises(ValueError):
+ tf.nn.softmax([0., 1., 2., 3.])
+
+ def testFloat(self):
+ self._testAll(
+ np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
+
+ def testDouble(self):
+ self._testSoftmax(
+ np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
+ use_gpu=False)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/softplus_op_test.py b/tensorflow/python/kernel_tests/softplus_op_test.py
new file mode 100644
index 0000000000..25b68aa659
--- /dev/null
+++ b/tensorflow/python/kernel_tests/softplus_op_test.py
@@ -0,0 +1,47 @@
+"""Tests for Softplus and SoftplusGrad."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class SoftplusTest(tf.test.TestCase):
+
+ def _npSoftplus(self, np_features):
+ return np.log(1 + np.exp(np_features))
+
+ def _testSoftplus(self, np_features, use_gpu=False):
+ np_softplus = self._npSoftplus(np_features)
+ with self.test_session(use_gpu=use_gpu):
+ softplus = tf.nn.softplus(np_features)
+ tf_softplus = softplus.eval()
+ self.assertAllClose(np_softplus, tf_softplus)
+ self.assertShapeEqual(np_softplus, softplus)
+
+ def testNumbers(self):
+ for t in [np.float, np.double]:
+ self._testSoftplus(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=False)
+ self._testSoftplus(
+ np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
+ use_gpu=True)
+
+ def testGradient(self):
+ with self.test_session():
+ x = tf.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5], name="x")
+ y = tf.nn.softplus(x, name="softplus")
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float32, order="F")
+ err = gc.ComputeGradientError(x, [2, 5], y, [2, 5], x_init_value=x_init)
+ print "softplus (float) gradient err = ", err
+ self.assertLess(err, 1e-4)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_concat_op_test.py b/tensorflow/python/kernel_tests/sparse_concat_op_test.py
new file mode 100644
index 0000000000..0f5650b89c
--- /dev/null
+++ b/tensorflow/python/kernel_tests/sparse_concat_op_test.py
@@ -0,0 +1,260 @@
+"""Tests for SparseConcat."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SparseConcatTest(tf.test.TestCase):
+
+ def _SparseTensor_UnknownShape(self, ind_shape=None, val_shape=None,
+ shape_shape=None):
+ return tf.SparseTensor(
+ tf.placeholder(tf.int64, shape=ind_shape),
+ tf.placeholder(tf.float32, shape=val_shape),
+ tf.placeholder(tf.int64, shape=shape_shape))
+
+ def _SparseTensor_3x3(self):
+ # [ 1]
+ # [2 ]
+ # [3 4]
+ ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
+ val = np.array([1, 2, 3, 4])
+ shape = np.array([3, 3])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_3x5(self):
+ # [ ]
+ # [ 1 ]
+ # [2 1 0]
+ ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
+ val = np.array([1, 2, 1, 0])
+ shape = np.array([3, 5])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_3x2(self):
+ # [ ]
+ # [1 ]
+ # [2 ]
+ ind = np.array([[1, 0], [2, 0]])
+ val = np.array([1, 2])
+ shape = np.array([3, 2])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_2x3(self):
+ # [ 1 ]
+ # [1 2]
+ ind = np.array([[0, 1], [1, 0], [1, 2]])
+ val = np.array([1, 1, 2])
+ shape = np.array([2, 3])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_2x3x4(self):
+ ind = np.array([
+ [0, 0, 1],
+ [0, 1, 0], [0, 1, 2],
+ [1, 0, 3],
+ [1, 1, 1], [1, 1, 3],
+ [1, 2, 2]])
+ val = np.array([1, 10, 12, 103, 111, 113, 122])
+ shape = np.array([2, 3, 4])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.float32),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_String3x3(self):
+ # [ a]
+ # [b ]
+ # [c d]
+ ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
+ val = np.array(["a", "b", "c", "d"])
+ shape = np.array([3, 3])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.string),
+ tf.constant(shape, tf.int64))
+
+ def _SparseTensor_String3x5(self):
+ # [ ]
+ # [ e ]
+ # [f g h]
+ ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
+ val = np.array(["e", "f", "g", "h"])
+ shape = np.array([3, 5])
+ return tf.SparseTensor(
+ tf.constant(ind, tf.int64),
+ tf.constant(val, tf.string),
+ tf.constant(shape, tf.int64))
+
+ def testConcat1(self):
+ with self.test_session(use_gpu=False) as sess:
+ # concat(A):
+ # [ 1]
+ # [2 ]
+ # [3 4]
+ sp_a = self._SparseTensor_3x3()
+
+ sp_concat = tf.sparse_concat(1, [sp_a])
+
+ self.assertEqual(sp_concat.indices.get_shape(), [4, 2])
+ self.assertEqual(sp_concat.values.get_shape(), [4])
+ self.assertEqual(sp_concat.shape.get_shape(), [2])
+
+ concat_out = sess.run(sp_concat)
+
+ self.assertAllEqual(
+ concat_out.indices, [[0, 2], [1, 0], [2, 0], [2, 2]])
+ self.assertAllEqual(concat_out.values, [1, 2, 3, 4])
+ self.assertAllEqual(concat_out.shape, [3, 3])
+
+ def testConcat2(self):
+ with self.test_session(use_gpu=False) as sess:
+ # concat(A, B):
+ # [ 1 ]
+ # [2 1 ]
+ # [3 4 2 1 0]
+ sp_a = self._SparseTensor_3x3()
+ sp_b = self._SparseTensor_3x5()
+
+ sp_concat = tf.sparse_concat(1, [sp_a, sp_b])
+
+ self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
+ self.assertEqual(sp_concat.values.get_shape(), [8])
+ self.assertEqual(sp_concat.shape.get_shape(), [2])
+
+ concat_out = sess.run(sp_concat)
+
+ self.assertAllEqual(
+ concat_out.indices,
+ [[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]])
+ self.assertAllEqual(concat_out.values, [1, 2, 1, 3, 4, 2, 1, 0])
+ self.assertAllEqual(concat_out.shape, [3, 8])
+
+ def testConcatDim0(self):
+ with self.test_session(use_gpu=False) as sess:
+ # concat(A, D):
+ # [ 1]
+ # [2 ]
+ # [3 4]
+ # [ 1 ]
+ # [1 2]
+ sp_a = self._SparseTensor_3x3()
+ sp_d = self._SparseTensor_2x3()
+
+ sp_concat = tf.sparse_concat(0, [sp_a, sp_d])
+
+ self.assertEqual(sp_concat.indices.get_shape(), [7, 2])
+ self.assertEqual(sp_concat.values.get_shape(), [7])
+ self.assertEqual(sp_concat.shape.get_shape(), [2])
+
+ concat_out = sess.run(sp_concat)
+
+ self.assertAllEqual(
+ concat_out.indices,
+ [[0, 2], [1, 0], [2, 0], [2, 2], [3, 1], [4, 0], [4, 2]])
+ self.assertAllEqual(
+ concat_out.values, np.array([1, 2, 3, 4, 1, 1, 2]))
+ self.assertAllEqual(
+ concat_out.shape, np.array([5, 3]))
+
+ def testConcat3(self):
+ with self.test_session(use_gpu=False) as sess:
+ # concat(A, B, C):
+ # [ 1 ]
+ # [2 1 1 ]
+ # [3 4 2 1 0 2 ]
+ sp_a = self._SparseTensor_3x3()
+ sp_b = self._SparseTensor_3x5()
+ sp_c = self._SparseTensor_3x2()
+
+ sp_concat = tf.sparse_concat(1, [sp_a, sp_b, sp_c])
+
+ self.assertEqual(sp_concat.indices.get_shape(), [10, 2])
+ self.assertEqual(sp_concat.values.get_shape(), [10])
+ self.assertEqual(sp_concat.shape.get_shape(), [2])
+
+ concat_out = sess.run(sp_concat)
+
+ self.assertAllEqual(
+ concat_out.indices,
+ [[0, 2], [1, 0], [1, 4], [1, 8], [2, 0], [2, 2], [2, 3], [2, 6],
+ [2, 7], [2, 8]])
+ self.assertAllEqual(concat_out.values, [1, 2, 1, 1, 3, 4, 2, 1, 0, 2])
+ self.assertAllEqual(concat_out.shape, [3, 10])
+
+ def testConcatNonNumeric(self):
+ with self.test_session(use_gpu=False) as sess:
+ # concat(A, B):
+ # [ a ]
+ # [b e ]
+ # [c d f g h]
+ sp_a = self._SparseTensor_String3x3()
+ sp_b = self._SparseTensor_String3x5()
+
+ sp_concat = tf.sparse_concat(1, [sp_a, sp_b])
+
+ self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
+ self.assertEqual(sp_concat.values.get_shape(), [8])
+ self.assertEqual(sp_concat.shape.get_shape(), [2])
+
+ concat_out = sess.run(sp_concat)
+
+ self.assertAllEqual(
+ concat_out.indices,
+ [[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]])
+ self.assertAllEqual(
+ concat_out.values, ["a", "b", "e", "c", "d", "f", "g", "h"])
+ self.assertAllEqual(concat_out.shape, [3, 8])
+
+ def testMismatchedRank(self):
+ with self.test_session(use_gpu=False):
+ sp_a = self._SparseTensor_3x3()
+ sp_e = self._SparseTensor_2x3x4()
+
+ # Rank mismatches can be caught at shape-inference time
+ with self.assertRaises(ValueError):
+ tf.sparse_concat(1, [sp_a, sp_e])
+
+ def testMismatchedShapes(self):
+ with self.test_session(use_gpu=False) as sess:
+ sp_a = self._SparseTensor_3x3()
+ sp_b = self._SparseTensor_3x5()
+ sp_c = self._SparseTensor_3x2()
+ sp_d = self._SparseTensor_2x3()
+ sp_concat = tf.sparse_concat(1, [sp_a, sp_b, sp_c, sp_d])
+
+ # Shape mismatches can only be caught when the op is run
+ with self.assertRaisesOpError("Input shapes must match"):
+ sess.run(sp_concat)
+
+ def testShapeInferenceUnknownShapes(self):
+ with self.test_session(use_gpu=False):
+ sp_inputs = [
+ self._SparseTensor_UnknownShape(),
+ self._SparseTensor_UnknownShape(val_shape=[3]),
+ self._SparseTensor_UnknownShape(ind_shape=[1, 3]),
+ self._SparseTensor_UnknownShape(shape_shape=[3])]
+
+ sp_concat = tf.sparse_concat(0, sp_inputs)
+
+ self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3])
+ self.assertEqual(sp_concat.values.get_shape().as_list(), [None])
+ self.assertEqual(sp_concat.shape.get_shape(), [3])
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_matmul_op_test.py b/tensorflow/python/kernel_tests/sparse_matmul_op_test.py
new file mode 100644
index 0000000000..d87d15cae9
--- /dev/null
+++ b/tensorflow/python/kernel_tests/sparse_matmul_op_test.py
@@ -0,0 +1,82 @@
+"""Tests for tensorflow.ops.tf.matmul."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+def RandMatrix(rows, cols, tr):
+ if tr:
+ rows, cols = cols, rows
+ return (np.clip(np.random.uniform(low=-100.0, high=100.0, size=rows * cols),
+ 0, 100) / 100).reshape([rows, cols]).astype(np.float32)
+
+
+class SparseMatMulTest(tf.test.TestCase):
+
+ def _testCpuMatmul(self, x, y, tr_a=False, tr_b=False,
+ sp_a=True, sp_b=False):
+ x_mat = np.matrix(x)
+ if tr_a:
+ x_mat = np.transpose(x_mat)
+ y_mat = np.matrix(y)
+ if tr_b:
+ y_mat = np.transpose(y_mat)
+ np_ans = x_mat * y_mat
+ with self.test_session(use_gpu=False):
+ tf_ans = tf.matmul(x, y,
+ transpose_a=tr_a, transpose_b=tr_b,
+ a_is_sparse=sp_a,
+ b_is_sparse=sp_b)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out)
+ self.assertShapeEqual(np_ans, tf_ans)
+
+ def testFloatBasic(self):
+ x = np.arange(0., 4.).reshape([4, 1]).astype(np.float32)
+ y = np.arange(-1., 1.).reshape([1, 2]).astype(np.float32)
+ self._testCpuMatmul(x, y)
+
+ # Tests testing random sized matrices.
+ def testFloatRandom(self):
+ for _ in range(10):
+ for tr_a in [True, False]:
+ for tr_b in [True, False]:
+ for sp_a in [True, False]:
+ for sp_b in [True, False]:
+ n, k, m = np.random.randint(1, 100, size=3)
+ x = RandMatrix(n, k, tr_a)
+ y = RandMatrix(k, m, tr_b)
+ self._testCpuMatmul(x, y, tr_a, tr_b, sp_a, sp_b)
+
+
+class MatMulGradientTest(tf.test.TestCase):
+
+ def _testGradients(self, tr_a, tr_b, sp_a, sp_b, name):
+ with self.test_session():
+ a = tf.constant(RandMatrix(3, 2, tr_a), dtype=tf.float32)
+ b = tf.constant(RandMatrix(2, 4, tr_b), dtype=tf.float32)
+ m = tf.matmul(a, b,
+ name=name,
+ transpose_a=tr_a,
+ transpose_b=tr_b,
+ a_is_sparse=sp_a,
+ b_is_sparse=sp_b)
+ err = (gc.ComputeGradientError(a, [2, 3] if tr_a else [3, 2], m, [3, 4]) +
+ gc.ComputeGradientError(b, [4, 2] if tr_b else [2, 4], m, [3, 4]))
+ print "sparse_matmul gradient err = ", err
+ self.assertLess(err, 1e-3)
+
+ def testGradientInput(self):
+ for tr_a in [True, False]:
+ for tr_b in [True, False]:
+ for sp_a in [True, False]:
+ for sp_b in [True, False]:
+ name = "sparse_matmul_%s_%s_%s_%s" % (tr_a, tr_b, sp_a, sp_b)
+ self._testGradients(tr_a, tr_b, sp_a, sp_b, name)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_reorder_op_test.py b/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
new file mode 100644
index 0000000000..c3bcc25311
--- /dev/null
+++ b/tensorflow/python/kernel_tests/sparse_reorder_op_test.py
@@ -0,0 +1,56 @@
+"""Tests for SparseReorder."""
+
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SparseReorderTest(tf.test.TestCase):
+
+ def _SparseTensorPlaceholder(self):
+ return tf.SparseTensor(
+ tf.placeholder(tf.int64),
+ tf.placeholder(tf.int32),
+ tf.placeholder(tf.int64))
+
+ def _SparseTensorValue_5x6(self, permutation):
+ ind = np.array([
+ [0, 0],
+ [1, 0], [1, 3], [1, 4],
+ [3, 2], [3, 3]]).astype(np.int64)
+ val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
+
+ ind = ind[permutation]
+ val = val[permutation]
+
+ shape = np.array([5, 6]).astype(np.int64)
+ return tf.SparseTensorValue(ind, val, shape)
+
+ def testAlreadyInOrder(self):
+ with self.test_session(use_gpu=False) as sess:
+ sp_input = self._SparseTensorPlaceholder()
+ input_val = self._SparseTensorValue_5x6(np.arange(6))
+ sp_output = tf.sparse_reorder(sp_input)
+
+ output_val = sess.run(sp_output, {sp_input: input_val})
+ self.assertAllEqual(output_val.indices, input_val.indices)
+ self.assertAllEqual(output_val.values, input_val.values)
+ self.assertAllEqual(output_val.shape, input_val.shape)
+
+ def testOutOfOrder(self):
+ expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
+ with self.test_session(use_gpu=False) as sess:
+ for _ in range(5): # To test various random permutations
+ sp_input = self._SparseTensorPlaceholder()
+ input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
+ sp_output = tf.sparse_reorder(sp_input)
+
+ output_val = sess.run(sp_output, {sp_input: input_val})
+ self.assertAllEqual(output_val.indices, expected_output_val.indices)
+ self.assertAllEqual(output_val.values, expected_output_val.values)
+ self.assertAllEqual(output_val.shape, expected_output_val.shape)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py b/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
new file mode 100644
index 0000000000..2bab89923e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py
@@ -0,0 +1,111 @@
+"""Tests for tensorflow.kernels.sparse_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+def _SparseToDense(sparse_indices, output_size, sparse_values,
+ default_value):
+ return tf.sparse_to_dense(sparse_indices, output_size,
+ sparse_values, default_value)
+
+
+class SparseToDenseTest(tf.test.TestCase):
+
+ def testInt(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([1, 3], [5], 1, 0).eval()
+ np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testFloat(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0).eval()
+ np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testString(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([1, 3], [5], "a", "b").eval()
+ np_ans = np.array(["b", "a", "b", "a", "b"]).astype(np.string_)
+ self.assertAllEqual(np_ans, tf_ans)
+
+ def testSetValue(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1).eval()
+ np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testSetSingleValue(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([1, 3], [5], 1, -1).eval()
+ np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def test2d(self):
+ # pylint: disable=bad-whitespace
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1).eval()
+ np_ans = np.array([[-1, -1, -1, -1],
+ [-1, -1, -1, 1],
+ [ 1, -1, -1, -1]]).astype(np.int32)
+ self.assertAllClose(np_ans, tf_ans)
+
+ def test3d(self):
+ with self.test_session(use_gpu=False):
+ tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1).eval()
+ np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
+ np_ans[1, 3, 0] = 1
+ np_ans[2, 0, 1] = 1
+ self.assertAllClose(np_ans, tf_ans)
+
+ def testBadShape(self):
+ with self.test_session():
+ with self.assertRaisesWithPredicateMatch(
+ ValueError, lambda e: ("Input shape should be a vector" == str(e))):
+ _SparseToDense([1, 3], [[5], [3]], 1, -1)
+
+ def testBadValue(self):
+ with self.test_session():
+ dense = _SparseToDense([1, 3], [5], [[5], [3]], -1)
+ with self.assertRaisesOpError(
+ r"sparse_values has incorrect shape \[2,1\], "
+ r"should be \[\] or \[2\]"):
+ dense.eval()
+
+ def testBadNumValues(self):
+ with self.test_session():
+ dense = _SparseToDense([1, 3], [5], [1, 2, 3], -1)
+ with self.assertRaisesOpError(
+ r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
+ dense.eval()
+
+ def testBadDefault(self):
+ with self.test_session():
+ dense = _SparseToDense([1, 3], [5], [1, 2], [1, 2])
+ with self.assertRaisesOpError("default_value should be a scalar"):
+ dense.eval()
+
+ def testShapeInferenceKnownShape(self):
+ with self.test_session(use_gpu=False):
+ indices = tf.placeholder(tf.int64)
+
+ shape = [4, 5, 6]
+ output = tf.sparse_to_dense(indices, shape, 1, 0)
+ self.assertEqual(output.get_shape(), [4, 5, 6])
+
+ shape = tf.placeholder(tf.int64, shape=(3,))
+ output = tf.sparse_to_dense(indices, shape, 1, 0)
+ self.assertEqual(output.get_shape().as_list(), [None, None, None])
+
+ def testShapeInferenceUnknownShape(self):
+ with self.test_session(use_gpu=False):
+ indices = tf.placeholder(tf.int64)
+ shape = tf.placeholder(tf.int64)
+ output = tf.sparse_to_dense(indices, shape, 1, 0)
+ self.assertEqual(output.get_shape().ndims, None)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/sparsemask_op_test.py b/tensorflow/python/kernel_tests/sparsemask_op_test.py
new file mode 100644
index 0000000000..ffde8f7944
--- /dev/null
+++ b/tensorflow/python/kernel_tests/sparsemask_op_test.py
@@ -0,0 +1,32 @@
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SparseMaskTest(tf.test.TestCase):
+
+ def testBasic(self):
+ values = np.random.rand(4, 4).astype(np.single)
+ indices = np.array([0, 2, 3, 4], dtype=np.int32)
+ mask_indices = np.array([0], dtype=np.int32)
+
+ out_values = values[1:, :]
+ out_indices = np.array([2, 3, 4], dtype=np.int32)
+
+ with self.test_session() as sess:
+ values_tensor = tf.convert_to_tensor(values)
+ indices_tensor = tf.convert_to_tensor(indices)
+ mask_indices_tensor = tf.convert_to_tensor(mask_indices)
+
+ t = tf.IndexedSlices(values_tensor, indices_tensor)
+ masked_t = tf.sparse_mask(t, mask_indices_tensor)
+
+ tf_out_values, tf_out_indices = sess.run([masked_t.values,
+ masked_t.indices])
+
+ self.assertAllEqual(tf_out_values, out_values)
+ self.assertAllEqual(tf_out_indices, out_indices)
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py
new file mode 100644
index 0000000000..19906aa02b
--- /dev/null
+++ b/tensorflow/python/kernel_tests/split_op_test.py
@@ -0,0 +1,132 @@
+"""Functional tests for Split Op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class SplitOpTest(tf.test.TestCase):
+
+ def _compare(self, x, dim, num, use_gpu):
+ np_ans = np.split(x, num, dim)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ tf_ans = tf.split(dim, num, x)
+ out = sess.run(tf_ans)
+ self.assertEqual(num, len(np_ans))
+ self.assertEqual(num, len(np_ans))
+ self.assertEqual(num, len(out))
+ for i in range(num):
+ self.assertAllEqual(np_ans[i], out[i])
+ self.assertShapeEqual(np_ans[i], tf_ans[i])
+
+ def _testSplitRows(self, use_gpu):
+ inp = np.random.rand(4, 4).astype("f")
+ self._compare(inp, 0, 4, use_gpu)
+
+ def testSplitRowsAll(self):
+ self._testSplitRows(use_gpu=False)
+ self._testSplitRows(use_gpu=True)
+
+ def _testSplitCols(self, use_gpu):
+ inp = np.random.rand(4, 4).astype("f")
+ self._compare(inp, 1, 4, use_gpu)
+
+ def testSplitColsAll(self):
+ self._testSplitRows(use_gpu=False)
+ self._testSplitCols(use_gpu=True)
+
+ def _testEmpty(self, x, dim, num, expected_shape):
+ with self.test_session() as sess:
+ tf_ans = tf.split(dim, num, x)
+ out = sess.run(tf_ans)
+ self.assertEqual(x.size, 0)
+ self.assertEqual(len(out), num)
+ for i in range(num):
+ self.assertEqual(out[i].shape, expected_shape)
+ self.assertEqual(expected_shape, tf_ans[i].get_shape())
+
+ def testEmpty(self):
+ # Note: np.split returns a rank-0 empty ndarray
+ # if the input ndarray is empty.
+ inp = np.random.rand(8, 0, 21).astype("f")
+ self._testEmpty(inp, 0, 2, (4, 0, 21))
+ self._testEmpty(inp, 0, 4, (2, 0, 21))
+ self._testEmpty(inp, 1, 4, (8, 0, 21))
+ self._testEmpty(inp, 2, 3, (8, 0, 7))
+ self._testEmpty(inp, 2, 7, (8, 0, 3))
+
+ def testIdentity(self):
+ inp = np.random.rand(2, 2, 2).astype("f")
+ for use_gpu in [False, True]:
+ self._compare(inp, 0, 1, use_gpu)
+ self._compare(inp, 1, 1, use_gpu)
+ self._compare(inp, 2, 1, use_gpu)
+
+ def testSplitDim0(self):
+ for use_gpu in [False, True]:
+ self._compare(np.random.rand(6, 10, 18).astype("f"), 0, 3, use_gpu)
+ self._compare(np.random.rand(6, 7, 18).astype("f"), 0, 3, use_gpu)
+ self._compare(np.random.rand(6, 7, 9).astype("f"), 0, 3, use_gpu)
+
+ def _RunAndVerify(self, use_gpu):
+ # Random dims of rank 5
+ shape = np.random.randint(0, 5, size=5)
+ split_dim = np.random.randint(0, 5)
+ num_split = np.random.randint(2, 8)
+ shape[split_dim] = np.random.randint(2, 5) * num_split
+ inp = np.random.rand(*shape).astype("f")
+ with self.test_session(use_gpu=use_gpu) as sess:
+ result = sess.run(tf.split(split_dim, num_split, inp))
+ slices = [slice(0, x) for x in shape]
+ offset = 0
+ length = shape[split_dim] / num_split
+ for i in range(num_split):
+ slices[split_dim] = slice(offset, offset + length)
+ offset += length
+ self.assertAllEqual(result[i], inp[slices])
+
+ def testRandom(self):
+ for _ in range(5):
+ self._RunAndVerify(use_gpu=False)
+ self._RunAndVerify(use_gpu=True)
+
+ def _testGradientsSimple(self, use_gpu):
+ inp = np.random.rand(4, 4).astype("f")
+ with self.test_session(use_gpu=use_gpu):
+ inp_tensor = tf.convert_to_tensor(inp)
+ s = tf.split(1, 4, inp_tensor)
+ inp_grads = [np.random.rand(4, 1).astype("f") for _ in range(4)]
+ grad_tensors = [tf.constant(x) for x in inp_grads]
+ grad = tf.gradients(s, [inp_tensor], grad_tensors)[0]
+ result = grad.eval()
+ for i in range(4):
+ self.assertAllEqual(result[:, i:i+1], inp_grads[i])
+
+ def testGradientsAll(self):
+ self._testGradientsSimple(use_gpu=False)
+ self._testGradientsSimple(use_gpu=True)
+
+ def testShapeFunctionEdgeCases(self):
+ # split_dim greater than rank of input.
+ with self.assertRaises(ValueError):
+ tf.split(2, 4, [[0, 1], [2, 3]])
+
+ # num_split does not evenly divide the size in split_dim.
+ with self.assertRaisesRegexp(ValueError, "should evenly divide"):
+ tf.split(0, 3, [0, 1, 2, 3])
+
+ # Unknown split_dim.
+ splits = tf.split(tf.placeholder(tf.int32),
+ 4, [[0, 1, 2, 3]])
+ for s in splits:
+ self.assertEqual([None, None], s.get_shape().as_list())
+
+ # Unknown split_dim and input shape.
+ splits = tf.split(tf.placeholder(tf.int32),
+ 4, tf.placeholder(tf.float32))
+ for s in splits:
+ self.assertEqual(None, s.get_shape().ndims)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py b/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
new file mode 100644
index 0000000000..8615b271b8
--- /dev/null
+++ b/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
@@ -0,0 +1,34 @@
+"""Tests for StringToHashBucket op from string_ops."""
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+class StringToHashBucketOpTest(tf.test.TestCase):
+
+ def testStringToOneHashBucket(self):
+ with self.test_session():
+ input_string = tf.placeholder(tf.string)
+ output = tf.string_to_hash_bucket(input_string, 1)
+ result = output.eval(feed_dict={
+ input_string: ['a', 'b', 'c']
+ })
+
+ self.assertAllEqual([0, 0, 0], result)
+
+ def testStringToHashBuckets(self):
+ with self.test_session():
+ input_string = tf.placeholder(tf.string)
+ output = tf.string_to_hash_bucket(input_string, 10)
+ result = output.eval(feed_dict={
+ input_string: ['a', 'b', 'c']
+ })
+
+ # Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
+ # Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
+ # Hash64('c') -> 14899841994519054197 -> mod 10 -> 7
+ self.assertAllEqual([8, 0, 7], result)
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/string_to_number_op_test.py b/tensorflow/python/kernel_tests/string_to_number_op_test.py
new file mode 100644
index 0000000000..39505e18ba
--- /dev/null
+++ b/tensorflow/python/kernel_tests/string_to_number_op_test.py
@@ -0,0 +1,66 @@
+"""Tests for StringToNumber op from parsing_ops."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+
+_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
+
+
+class StringToNumberOpTest(tf.test.TestCase):
+
+ def testToFloat(self):
+ with self.test_session():
+ input_string = tf.placeholder(tf.string)
+ output = tf.string_to_number(
+ input_string,
+ out_type=tf.float32)
+
+ result = output.eval(feed_dict={
+ input_string: ["0",
+ "3",
+ "-1",
+ "1.12",
+ "0xF",
+ " -10.5",
+ "3.40282e+38",
+ # The next two exceed maximum value for float, so we
+ # expect +/-INF to be returned instead.
+ "3.40283e+38",
+ "-3.40283e+38",
+ "NAN",
+ "INF"]
+ })
+
+ self.assertAllClose([0, 3, -1, 1.12, 0xF, -10.5, 3.40282e+38,
+ float("INF"), float("-INF"), float("NAN"),
+ float("INF")], result)
+
+ with self.assertRaisesOpError(_ERROR_MESSAGE + "10foobar"):
+ output.eval(feed_dict={input_string: ["10foobar"]})
+
+ def testToInt32(self):
+ with self.test_session():
+ input_string = tf.placeholder(tf.string)
+ output = tf.string_to_number(
+ input_string,
+ out_type=tf.int32)
+
+ result = output.eval(feed_dict={
+ input_string: ["0", "3", "-1", " -10", "-2147483648", "2147483647"]
+ })
+
+ self.assertAllEqual([0, 3, -1, -10, -2147483648, 2147483647], result)
+
+ with self.assertRaisesOpError(_ERROR_MESSAGE + "2.9"):
+ output.eval(feed_dict={input_string: ["2.9"]})
+
+ # The next two exceed maximum value of int32.
+ for in_string in ["-2147483649", "2147483648"]:
+ with self.assertRaisesOpError(_ERROR_MESSAGE + in_string):
+ output.eval(feed_dict={input_string: [in_string]})
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/summary_image_op_test.py b/tensorflow/python/kernel_tests/summary_image_op_test.py
new file mode 100644
index 0000000000..dfdb2c8938
--- /dev/null
+++ b/tensorflow/python/kernel_tests/summary_image_op_test.py
@@ -0,0 +1,63 @@
+"""Tests for summary image op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import image_ops
+
+
+class SummaryImageOpTest(tf.test.TestCase):
+
+ def _AsSummary(self, s):
+ summ = tf.Summary()
+ summ.ParseFromString(s)
+ return summ
+
+ def testImageSummary(self):
+ np.random.seed(7)
+ with self.test_session() as sess:
+ for depth in 1, 3, 4:
+ shape = (4, 5, 7) + (depth,)
+ bad_color = [255, 0, 0, 255][:depth]
+ for positive in False, True:
+ # Build a mostly random image with one nan
+ const = np.random.randn(*shape)
+ const[0, 1, 2] = 0 # Make the nan entry not the max
+ if positive:
+ const = 1 + np.maximum(const, 0)
+ scale = 255 / const.reshape(4, -1).max(axis=1)
+ offset = 0
+ else:
+ scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
+ offset = 128
+ adjusted = np.floor(scale[:, None, None, None] * const + offset)
+ const[0, 1, 2, depth / 2] = np.nan
+
+ # Summarize
+ summ = tf.image_summary("img", const)
+ value = sess.run(summ)
+ self.assertEqual([], summ.get_shape())
+ image_summ = self._AsSummary(value)
+
+ # Decode the first image and check consistency
+ image = image_ops.decode_png(
+ image_summ.value[0].image.encoded_image_string).eval()
+ self.assertAllEqual(image[1, 2], bad_color)
+ image[1, 2] = adjusted[0, 1, 2]
+ self.assertAllClose(image, adjusted[0])
+
+ # Check the rest of the proto
+ # Only the first 3 images are returned.
+ for v in image_summ.value:
+ v.image.ClearField("encoded_image_string")
+ expected = '\n'.join("""
+ value {
+ tag: "img/image/%d"
+ image { height: %d width: %d colorspace: %d }
+ }""" % ((i,) + shape[1:]) for i in xrange(3))
+ self.assertProtoEquals(expected, image_summ)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/summary_ops_test.py b/tensorflow/python/kernel_tests/summary_ops_test.py
new file mode 100644
index 0000000000..13e5021ccc
--- /dev/null
+++ b/tensorflow/python/kernel_tests/summary_ops_test.py
@@ -0,0 +1,83 @@
+"""Tests for summary ops."""
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+class SummaryOpsTest(tf.test.TestCase):
+
+ def _AsSummary(self, s):
+ summ = tf.Summary()
+ summ.ParseFromString(s)
+ return summ
+
+ def testScalarSummary(self):
+ with self.test_session() as sess:
+ const = tf.constant([10.0, 20.0])
+ summ = tf.scalar_summary(["c1", "c2"], const, name="mysumm")
+ value = sess.run(summ)
+ self.assertEqual([], summ.get_shape())
+ self.assertProtoEquals("""
+ value { tag: "c1" simple_value: 10.0 }
+ value { tag: "c2" simple_value: 20.0 }
+ """, self._AsSummary(value))
+
+ def testScalarSummaryDefaultName(self):
+ with self.test_session() as sess:
+ const = tf.constant([10.0, 20.0])
+ summ = tf.scalar_summary(["c1", "c2"], const)
+ value = sess.run(summ)
+ self.assertEqual([], summ.get_shape())
+ self.assertProtoEquals("""
+ value { tag: "c1" simple_value: 10.0 }
+ value { tag: "c2" simple_value: 20.0 }
+ """, self._AsSummary(value))
+
+ def testMergeSummary(self):
+ with self.test_session() as sess:
+ const = tf.constant(10.0)
+ summ1 = tf.histogram_summary("h", const, name="histo")
+ summ2 = tf.scalar_summary("c", const, name="summ")
+ merge = tf.merge_summary([summ1, summ2])
+ value = sess.run(merge)
+ self.assertEqual([], merge.get_shape())
+ self.assertProtoEquals("""
+ value {
+ tag: "h"
+ histo {
+ min: 10.0
+ max: 10.0
+ num: 1.0
+ sum: 10.0
+ sum_squares: 100.0
+ bucket_limit: 9.93809490288
+ bucket_limit: 10.9319043932
+ bucket_limit: 1.79769313486e+308
+ bucket: 0.0
+ bucket: 1.0
+ bucket: 0.0
+ }
+ }
+ value { tag: "c" simple_value: 10.0 }
+ """, self._AsSummary(value))
+
+ def testMergeAllSummaries(self):
+ with tf.Graph().as_default():
+ const = tf.constant(10.0)
+ summ1 = tf.histogram_summary("h", const, name="histo")
+ summ2 = tf.scalar_summary("o", const, name="oops",
+ collections=["foo_key"])
+ summ3 = tf.scalar_summary("c", const, name="summ")
+ merge = tf.merge_all_summaries()
+ self.assertEqual("MergeSummary", merge.op.type)
+ self.assertEqual(2, len(merge.op.inputs))
+ self.assertEqual(summ1, merge.op.inputs[0])
+ self.assertEqual(summ3, merge.op.inputs[1])
+ merge = tf.merge_all_summaries("foo_key")
+ self.assertEqual("MergeSummary", merge.op.type)
+ self.assertEqual(1, len(merge.op.inputs))
+ self.assertEqual(summ2, merge.op.inputs[0])
+ self.assertTrue(tf.merge_all_summaries("bar_key") is None)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/topk_op_test.py b/tensorflow/python/kernel_tests/topk_op_test.py
new file mode 100644
index 0000000000..497dc9ac1e
--- /dev/null
+++ b/tensorflow/python/kernel_tests/topk_op_test.py
@@ -0,0 +1,52 @@
+"""Tests for TopK op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class TopKTest(tf.test.TestCase):
+
+ def _validateTopK(self, inputs, k, expected_values, expected_indices):
+ np_values = np.array(expected_values)
+ np_indices = np.array(expected_indices)
+ with self.test_session():
+ values_op, indices_op = tf.nn.top_k(inputs, k)
+ values = values_op.eval()
+ indices = indices_op.eval()
+ self.assertAllClose(np_values, values)
+ self.assertAllEqual(np_indices, indices)
+ self.assertShapeEqual(np_values, values_op)
+ self.assertShapeEqual(np_indices, indices_op)
+
+ def testTop1(self):
+ inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
+ self._validateTopK(inputs, 1,
+ [[0.4], [0.3]],
+ [[3], [1]])
+
+ def testTop2(self):
+ inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
+ self._validateTopK(inputs, 2,
+ [[0.4, 0.3], [0.3, 0.3]],
+ [[3, 1], [1, 2]])
+
+ def testTopAll(self):
+ inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
+ self._validateTopK(inputs, 4,
+ [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
+ [[3, 1, 2, 0], [1, 2, 3, 0]])
+
+ def testKNegative(self):
+ inputs = [[0.1, 0.2], [0.3, 0.4]]
+ with self.assertRaisesRegexp(ValueError, "less than minimum 1"):
+ tf.nn.top_k(inputs, -1)
+
+ def testKTooLarge(self):
+ inputs = [[0.1, 0.2], [0.3, 0.4]]
+ with self.assertRaisesRegexp(ValueError, "input must have at least k"):
+ tf.nn.top_k(inputs, 4)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/transpose_op_test.py b/tensorflow/python/kernel_tests/transpose_op_test.py
new file mode 100644
index 0000000000..2786eaf37b
--- /dev/null
+++ b/tensorflow/python/kernel_tests/transpose_op_test.py
@@ -0,0 +1,176 @@
+"""Functional tests for Transpose op."""
+import itertools
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests.gradient_checker import ComputeGradient
+
+
+class TransposeTest(tf.test.TestCase):
+
+ def _np_transpose(self, x, perm):
+ ret = np.copy(x)
+ ret = ret.transpose(perm)
+ return ret
+
+ def _compareCpu(self, x, p):
+ np_ans = self._np_transpose(x, p)
+ with self.test_session(use_gpu=False):
+ inx = tf.convert_to_tensor(x)
+ y = tf.transpose(inx, p)
+ tf_ans = y.eval()
+ self.assertAllEqual(np_ans, tf_ans)
+ self.assertShapeEqual(np_ans, y)
+
+ jacob_t = None
+ # Gradient check on CPU.
+ xs = list(np.shape(x))
+ ys = list(np.shape(tf_ans))
+ if x.dtype == np.float32:
+ jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
+ self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
+ elif x.dtype == np.float64:
+ jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
+ self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
+
+ return tf_ans, jacob_t
+
+ def _compareGpu(self, x, p):
+ np_ans = self._np_transpose(x, p)
+ with self.test_session(use_gpu=True):
+ inx = tf.convert_to_tensor(x)
+ y = tf.transpose(inx, p)
+ tf_ans = y.eval()
+ self.assertAllEqual(np_ans, tf_ans)
+ self.assertShapeEqual(np_ans, y)
+
+ jacob_t = None
+ # Gradient check on GPU.
+ xs = list(np.shape(x))
+ ys = list(np.shape(tf_ans))
+ if x.dtype == np.float32:
+ jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
+ self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
+ elif x.dtype == np.float64:
+ jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
+ self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
+
+ return tf_ans, jacob_t
+
+ def _compare(self, x, use_gpu=False):
+ n = np.ndim(x)
+ # generate all permutations of [0, 1, ... n-1] in random order.
+ all_perm = np.random.permutation(
+ [p for p in itertools.permutations(range(n))]).astype(np.int32)
+ for p in all_perm[0:2]:
+ self._compareCpu(x, p)
+ if use_gpu:
+ self._compareGpu(x, p)
+
+ def _compare_cpu_gpu(self, x):
+ n = np.ndim(x)
+ # generate all permutation of [0, 1, ... n-1] in random order.
+ all_perm = np.random.permutation(
+ [p for p in itertools.permutations(range(n))]).astype(np.int32)
+ for p in all_perm[0:2]:
+ tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
+ tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
+ assert tf_g_cpu is not None
+ assert tf_g_gpu is not None
+ if x.dtype == np.float32:
+ self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
+ self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
+ elif x.dtype == np.float64:
+ self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
+ self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
+
+ def _testCpu(self, x):
+ self._compare(x, use_gpu=False)
+
+ def test1D(self):
+ self._compareCpu(np.arange(0., 2), [0])
+
+ def testNop(self):
+ self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
+
+ def testSimple(self):
+ self._compareCpu(np.arange(0, 8).reshape([2, 4]).astype(np.float32),
+ np.array([1, 0]).astype(np.int32))
+
+ def testFloat(self):
+ self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
+ self._compare_cpu_gpu(
+ np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
+
+ def testDouble(self):
+ self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
+ self._compare_cpu_gpu(
+ np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
+
+ def testSComplex(self):
+ self._testCpu(np.complex(1, 2) * np.arange(0, 21).reshape(
+ [3, 7]).astype(np.complex64))
+ self._testCpu(np.complex(1, 2) * np.arange(0, 210).reshape(
+ [2, 3, 5, 7]).astype(np.complex64))
+
+ def testInt8(self):
+ self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
+ self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
+
+ def testInt16(self):
+ self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
+ self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
+
+ def testInt32(self):
+ self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
+ self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
+
+ def testInt64(self):
+ self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
+ self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
+
+ def testTranspose2DAuto(self):
+ x_np = [[1, 2, 3], [4, 5, 6]]
+ for use_gpu in [False, True]:
+ with self.test_session(use_gpu=use_gpu):
+ x_tf = tf.transpose(x_np).eval()
+ self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
+
+ def testTransposeShapes(self):
+ self.assertEqual([], tf.transpose(
+ tf.placeholder(tf.int32, shape=[])).get_shape().dims)
+ self.assertEqual([100], tf.transpose(
+ tf.placeholder(tf.int32, shape=[100])).get_shape().dims)
+ self.assertEqual([37, 100], tf.transpose(
+ tf.placeholder(tf.int32, shape=[100, 37])).get_shape().dims)
+ self.assertEqual([100, 37], tf.transpose(
+ tf.placeholder(tf.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
+ self.assertEqual([15, 37, 100], tf.transpose(
+ tf.placeholder(tf.int32, shape=[100, 37, 15])).get_shape().dims)
+ self.assertEqual([15, 100, 37], tf.transpose(
+ tf.placeholder(tf.int32,
+ shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
+ self.assertEqual(tf.TensorShape(None), tf.transpose(
+ tf.placeholder(tf.int32)).get_shape())
+
+ def _testError(self, x, p, err):
+ with self.test_session():
+ with self.assertRaisesOpError(err):
+ tf.transpose(x, p).eval()
+
+ def testError(self):
+ with self.assertRaises(ValueError):
+ tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
+ self._testError(np.arange(0., 2 ** 10).reshape([2] * 10),
+ range(10),
+ "not implemented")
+ with self.assertRaises(IndexError):
+ tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
+ self._testError(np.arange(0., 30).reshape([2, 3, 5]),
+ [0, 1, 1],
+ "2 is missing")
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/unique_op_test.py b/tensorflow/python/kernel_tests/unique_op_test.py
new file mode 100644
index 0000000000..4d6543a206
--- /dev/null
+++ b/tensorflow/python/kernel_tests/unique_op_test.py
@@ -0,0 +1,22 @@
+"""Tests for tensorflow.kernels.unique_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class UniqueTest(tf.test.TestCase):
+
+ def testInt32(self):
+ x = list(np.random.randint(2, high=10, size=7000))
+ with self.test_session() as sess:
+ y, idx = tf.unique(x)
+ tf_y, tf_idx = sess.run([y, idx])
+
+ self.assertEqual(len(x), len(tf_idx))
+ self.assertEqual(len(tf_y), len(np.unique(x)))
+ for i in range(len(x)):
+ self.assertEqual(x[i], tf_y[tf_idx[i]])
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/unpack_op_test.py b/tensorflow/python/kernel_tests/unpack_op_test.py
new file mode 100644
index 0000000000..4929af035f
--- /dev/null
+++ b/tensorflow/python/kernel_tests/unpack_op_test.py
@@ -0,0 +1,56 @@
+"""Functional tests for Unpack Op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class UnpackOpTest(tf.test.TestCase):
+
+ def testSimple(self):
+ np.random.seed(7)
+ for use_gpu in False, True:
+ with self.test_session(use_gpu=use_gpu):
+ for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
+ data = np.random.randn(*shape)
+ # Convert data to a single tensorflow tensor
+ x = tf.constant(data)
+ # Unpack into a list of tensors
+ cs = tf.unpack(x, num=shape[0])
+ self.assertEqual(type(cs), list)
+ self.assertEqual(len(cs), shape[0])
+ cs = [c.eval() for c in cs]
+ self.assertAllEqual(cs, data)
+
+ def testGradients(self):
+ for use_gpu in False, True:
+ for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
+ data = np.random.randn(*shape)
+ shapes = [shape[1:]] * shape[0]
+ for i in xrange(shape[0]):
+ with self.test_session(use_gpu=use_gpu):
+ x = tf.constant(data)
+ cs = tf.unpack(x, num=shape[0])
+ err = gradient_checker.ComputeGradientError(x, shape, cs[i],
+ shapes[i])
+ self.assertLess(err, 1e-6)
+
+ def testInferNum(self):
+ with self.test_session():
+ for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
+ x = tf.placeholder(np.float32, shape=shape)
+ cs = tf.unpack(x)
+ self.assertEqual(type(cs), list)
+ self.assertEqual(len(cs), shape[0])
+
+ def testCannotInferNum(self):
+ x = tf.placeholder(np.float32)
+ with self.assertRaisesRegexp(
+ ValueError, r'Cannot infer num from shape TensorShape\(None\)'):
+ tf.unpack(x)
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/variable_ops_test.py b/tensorflow/python/kernel_tests/variable_ops_test.py
new file mode 100644
index 0000000000..aaa4237260
--- /dev/null
+++ b/tensorflow/python/kernel_tests/variable_ops_test.py
@@ -0,0 +1,225 @@
+"""Tests for tensorflow.ops.tf.variable_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.ops import gen_state_ops
+from tensorflow.python.ops import state_ops
+
+
+_NP_TO_TF = {
+ np.float32: tf.float32,
+ np.float64: tf.float64,
+ np.int32: tf.int32,
+ np.int64: tf.int64,
+}
+
+
+class VariableOpTest(tf.test.TestCase):
+
+ def _initFetch(self, x, tftype, use_gpu=None):
+ with self.test_session(use_gpu=use_gpu):
+ p = state_ops.variable_op(x.shape, tftype)
+ op = tf.assign(p, x)
+ op.op.run()
+ return p.eval()
+
+ def _testTypes(self, vals):
+ for dtype in [np.float32, np.float64, np.int32, np.int64]:
+ self.setUp()
+ x = vals.astype(dtype)
+ tftype = _NP_TO_TF[dtype]
+ self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
+ # NOTE(mdevin): the GPU test should pass for all types, whether the
+ # Variable op has an implementation for that type on GPU as we expect
+ # that Variable and Assign have GPU implementations for matching tf.
+ self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
+
+ def testBasic(self):
+ self._testTypes(np.arange(0, 20).reshape([4, 5]))
+
+ def testset_shape(self):
+ p = state_ops.variable_op([1, 2], tf.float32)
+ self.assertEqual([1, 2], p.get_shape())
+ p = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
+
+ def testAssign(self):
+ value = np.array([[42.0, 43.0]])
+ var = state_ops.variable_op(value.shape, tf.float32)
+ self.assertShapeEqual(value, var)
+ assigned = tf.assign(var, value)
+ self.assertShapeEqual(value, assigned)
+
+ def testAssignNoValidateShape(self):
+ value = np.array([[42.0, 43.0]])
+ var = state_ops.variable_op(value.shape, tf.float32)
+ self.assertShapeEqual(value, var)
+ assigned = tf.assign(var, value, validate_shape=False)
+ self.assertShapeEqual(value, assigned)
+
+ def testAssignNoVarShape(self):
+ value = np.array([[42.0, 43.0]])
+ var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
+ assigned = tf.assign(var, value)
+ self.assertShapeEqual(value, assigned)
+
+ def testAssignNoVarShapeNoValidateShape(self):
+ value = np.array([[42.0, 43.0]])
+ var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
+ assigned = tf.assign(var, value, validate_shape=False)
+ self.assertShapeEqual(value, assigned)
+
+ def _NewShapelessTensor(self):
+ tensor = tf.placeholder(tf.float32)
+ self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
+ return tensor
+
+ def testAssignNoValueShape(self):
+ value = self._NewShapelessTensor()
+ shape = [1, 2]
+ var = state_ops.variable_op(shape, tf.float32)
+ assigned = tf.assign(var, value)
+ self.assertEqual(shape, var.get_shape())
+ self.assertEqual(shape, assigned.get_shape())
+
+ def testAssignNoValueShapeNoValidateShape(self):
+ value = self._NewShapelessTensor()
+ shape = [1, 2]
+ var = state_ops.variable_op(shape, tf.float32)
+ self.assertEqual(shape, var.get_shape())
+ assigned = tf.assign(var, value, validate_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
+
+ def testAssignNoShape(self):
+ with self.test_session():
+ value = self._NewShapelessTensor()
+ var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
+ self.assertEqual(tensor_shape.unknown_shape(),
+ tf.assign(var, value).get_shape())
+
+ def testAssignNoShapeNoValidateShape(self):
+ with self.test_session():
+ value = self._NewShapelessTensor()
+ var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
+ self.assertEqual(tensor_shape.unknown_shape(),
+ tf.assign(var, value, validate_shape=False).get_shape())
+
+ def testAssignUpdate(self):
+ var = state_ops.variable_op([1, 2], tf.float32)
+ added = tf.assign_add(var, [[2.0, 3.0]])
+ self.assertEqual([1, 2], added.get_shape())
+ subbed = tf.assign_sub(var, [[12.0, 13.0]])
+ self.assertEqual([1, 2], subbed.get_shape())
+
+ def testAssignUpdateNoVarShape(self):
+ var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ added = tf.assign_add(var, [[2.0, 3.0]])
+ self.assertEqual([1, 2], added.get_shape())
+ subbed = tf.assign_sub(var, [[12.0, 13.0]])
+ self.assertEqual([1, 2], subbed.get_shape())
+
+ def testAssignUpdateNoValueShape(self):
+ var = state_ops.variable_op([1, 2], tf.float32)
+ added = tf.assign_add(var, self._NewShapelessTensor())
+ self.assertEqual([1, 2], added.get_shape())
+ subbed = tf.assign_sub(var, self._NewShapelessTensor())
+ self.assertEqual([1, 2], subbed.get_shape())
+
+ def testAssignUpdateNoShape(self):
+ var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
+ added = tf.assign_add(var, self._NewShapelessTensor())
+ self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
+ subbed = tf.assign_sub(var, self._NewShapelessTensor())
+ self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
+
+ def testTemporaryVariable(self):
+ with self.test_session(use_gpu=True):
+ var = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="foo")
+ var = tf.assign(var, [[4.0, 5.0]])
+ var = tf.assign_add(var, [[6.0, 7.0]])
+ final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
+ self.assertAllClose([[10.0, 12.0]], final.eval())
+
+ def testDestroyNonexistentTemporaryVariable(self):
+ with self.test_session(use_gpu=True):
+ var = gen_state_ops._temporary_variable([1, 2], tf.float32)
+ final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
+ with self.assertRaises(errors.NotFoundError):
+ final.eval()
+
+ def testDuplicateTemporaryVariable(self):
+ with self.test_session(use_gpu=True):
+ var1 = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="dup")
+ var1 = tf.assign(var1, [[1.0, 2.0]])
+ var2 = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="dup")
+ var2 = tf.assign(var2, [[3.0, 4.0]])
+ final = var1 + var2
+ with self.assertRaises(errors.AlreadyExistsError):
+ final.eval()
+
+ def testDestroyTemporaryVariableTwice(self):
+ with self.test_session(use_gpu=True):
+ var = gen_state_ops._temporary_variable([1, 2], tf.float32)
+ val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
+ val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
+ final = val1 + val2
+ with self.assertRaises(errors.NotFoundError):
+ final.eval()
+
+ def testTemporaryVariableNoLeak(self):
+ with self.test_session(use_gpu=True):
+ var = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="bar")
+ final = tf.identity(var)
+ final.eval()
+
+ def testTwoTemporaryVariablesNoLeaks(self):
+ with self.test_session(use_gpu=True):
+ var1 = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="var1")
+ var2 = gen_state_ops._temporary_variable(
+ [1, 2],
+ tf.float32,
+ var_name="var2")
+ final = var1 + var2
+ final.eval()
+
+ def testAssignDependencyAcrossDevices(self):
+ with self.test_session(use_gpu=True):
+ # The variable and an op to increment it are on the GPU.
+ var = state_ops.variable_op([1], tf.float32)
+ tf.assign(var, [1.0]).eval()
+ increment = tf.assign_add(var, [1.0])
+ with tf.control_dependencies([increment]):
+ with tf.device("/cpu:0"):
+ # This mul op is pinned to the CPU, but reads the variable from the
+ # GPU. The test ensures that the dependency on 'increment' is still
+ # honored, i.e., the Send and Recv from GPU to CPU should take place
+ # only after the increment.
+ result = tf.mul(var, var)
+ self.assertAllClose([4.0], result.eval())
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
new file mode 100644
index 0000000000..bb538198ea
--- /dev/null
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -0,0 +1,160 @@
+"""Tests for variable store."""
+import tensorflow.python.platform
+
+import tensorflow as tf
+
+from tensorflow.python.ops import variable_scope
+
+
+class VariableStoreTest(tf.test.TestCase):
+
+ def testGetVar(self):
+ vs = variable_scope._get_default_variable_store()
+ v = vs.get_variable("v", [1])
+ v1 = vs.get_variable("v", [1])
+ assert v == v1
+
+ def testNameExists(self):
+ vs = variable_scope._get_default_variable_store()
+ # No check by default, so we can both create and get existing names.
+ v = vs.get_variable("v", [1])
+ v1 = vs.get_variable("v", [1])
+ assert v == v1
+ # When reuse is False, we fail when variables are already there.
+ vs.get_variable("w", [1], reuse=False) # That's ok.
+ with self.assertRaises(ValueError):
+ vs.get_variable("v", [1], reuse=False) # That fails.
+ # When reuse is True, we fail when variables are new.
+ vs.get_variable("v", [1], reuse=True) # That's ok.
+ with self.assertRaises(ValueError):
+ vs.get_variable("u", [1], reuse=True) # That fails.
+
+ def testNamelessStore(self):
+ vs = variable_scope._get_default_variable_store()
+ vs.get_variable("v1", [2])
+ vs.get_variable("v2", [2])
+ expected_names = ["%s:0" % name for name in ["v1", "v2"]]
+ self.assertEqual(set(expected_names),
+ set([v.name for v in vs._vars.values()]))
+
+ def testVarScopeIntializer(self):
+ with self.test_session() as sess:
+ init = tf.constant_initializer(0.3)
+ with variable_scope.variable_scope("tower") as tower:
+ with variable_scope.variable_scope("foo", initializer=init):
+ v = variable_scope.get_variable("v", [])
+ sess.run(tf.initialize_variables([v]))
+ self.assertAllClose(v.eval(), 0.3)
+ with variable_scope.variable_scope(tower, initializer=init):
+ w = variable_scope.get_variable("w", [])
+ sess.run(tf.initialize_variables([w]))
+ self.assertAllClose(w.eval(), 0.3)
+
+ def testGetVariableScope(self):
+ # Test the get_variable_scope() function and setting properties of result.
+ with self.test_session() as sess:
+ init = tf.constant_initializer(0.3)
+ with variable_scope.variable_scope("foo"):
+ new_init1 = variable_scope.get_variable_scope().initializer
+ self.assertEqual(new_init1, None)
+ # Check that we can set initializer like this.
+ variable_scope.get_variable_scope().set_initializer(init)
+ v = variable_scope.get_variable("v", [])
+ sess.run(tf.initialize_variables([v]))
+ self.assertAllClose(v.eval(), 0.3)
+ # Check that we can set reuse.
+ variable_scope.get_variable_scope().reuse_variables()
+ with self.assertRaises(ValueError): # Fail, w does not exist yet.
+ variable_scope.get_variable("w", [1])
+ # Check that the set initializer goes away.
+ new_init = variable_scope.get_variable_scope().initializer
+ self.assertEqual(new_init, None)
+
+ def testVarScope(self):
+ with self.test_session():
+ with variable_scope.variable_scope("tower") as tower:
+ self.assertEqual(tower.name, "tower")
+ with tf.name_scope("scope") as sc:
+ self.assertEqual(sc, "tower/scope/")
+
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope("bar") as bar:
+ self.assertEqual(bar.name, "foo/bar")
+ with tf.name_scope("scope") as sc:
+ self.assertEqual(sc, "foo/bar/scope/")
+
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
+ self.assertEqual(tower_shared.name, "tower")
+ with tf.name_scope("scope") as sc:
+ self.assertEqual(sc, "foo_1/scope/")
+
+ def testVarScopeNameScope(self):
+ with self.test_session():
+ with tf.name_scope("scope1"):
+ with variable_scope.variable_scope("tower") as tower:
+ with tf.name_scope("scope2") as sc2:
+ self.assertEqual(sc2, "scope1/tower/scope2/")
+ with variable_scope.variable_scope("tower"): # Re-enter adds suffix.
+ with tf.name_scope("scope2") as sc2:
+ self.assertEqual(sc2, "scope1/tower_1/scope2/")
+
+ with tf.name_scope("scope3"):
+ with variable_scope.variable_scope("tower"):
+ with tf.name_scope("scope2") as sc2:
+ self.assertEqual(sc2, "scope3/tower/scope2/")
+ with variable_scope.variable_scope(tower):
+ with tf.name_scope("scope2") as sc2:
+ self.assertEqual(sc2, "scope3/scope2/")
+
+ def testVarScopeGetVar(self):
+ with self.test_session():
+ with variable_scope.variable_scope("root"):
+ with variable_scope.variable_scope("towerA") as tower_a:
+ va = variable_scope.get_variable("v", [1])
+ self.assertEqual(va.name, "root/towerA/v:0")
+
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ va2 = variable_scope.get_variable("v", [1])
+ self.assertEqual(va2, va)
+
+ with variable_scope.variable_scope("towerB"):
+ vb = variable_scope.get_variable("v", [1])
+ self.assertEqual(vb.name, "root/towerB/v:0")
+
+ with self.assertRaises(ValueError) as exc:
+ with variable_scope.variable_scope("towerA"):
+ va2 = variable_scope.get_variable("v", [1])
+ self.assertEqual(exc.exception.message[:12], "Over-sharing")
+
+ with variable_scope.variable_scope("towerA", reuse=True):
+ va2 = variable_scope.get_variable("v", [1])
+ self.assertEqual(va2, va)
+
+ with variable_scope.variable_scope("foo"):
+ with variable_scope.variable_scope("bar"):
+ v = variable_scope.get_variable("v", [1])
+ self.assertEqual(v.name, "root/foo/bar/v:0")
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ va3 = variable_scope.get_variable("v", [1])
+ self.assertEqual(va, va3)
+
+ with self.assertRaises(ValueError) as exc:
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ with variable_scope.variable_scope("baz"):
+ variable_scope.get_variable("v", [1])
+ self.assertEqual(exc.exception.message[:13], "Under-sharing")
+
+ with self.assertRaises(ValueError) as exc:
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ variable_scope.get_variable("v", [2]) # Different shape.
+ self.assertEqual("shape" in exc.exception.message, True)
+
+ with self.assertRaises(ValueError) as exc:
+ with variable_scope.variable_scope(tower_a, reuse=True):
+ variable_scope.get_variable("v", [1], dtype=tf.int32)
+ self.assertEqual("dtype" in exc.exception.message, True)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
new file mode 100644
index 0000000000..f2a7ea0af8
--- /dev/null
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -0,0 +1,242 @@
+"""Tests for tf.py."""
+import operator
+
+import tensorflow.python.platform
+
+import numpy as np
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+from tensorflow.python.ops import random_ops
+
+
+class VariablesTestCase(tf.test.TestCase):
+
+ def testInitialization(self):
+ with self.test_session():
+ var0 = tf.Variable(0.0)
+ self.assertEqual("Variable:0", var0.name)
+ self.assertEqual([], var0.get_shape())
+ self.assertEqual([], var0.get_shape())
+
+ var1 = tf.Variable(1.1)
+ self.assertEqual("Variable_1:0", var1.name)
+ self.assertEqual([], var1.get_shape())
+ self.assertEqual([], var1.get_shape())
+
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ var0.eval()
+
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ var1.eval()
+
+ tf.initialize_all_variables().run()
+
+ self.assertAllClose(0.0, var0.eval())
+ self.assertAllClose(1.1, var1.eval())
+
+ def testInitializationOrder(self):
+ with self.test_session():
+ rnd = tf.Variable(random_ops.random_uniform([3, 6]), name="rnd")
+ self.assertEqual("rnd:0", rnd.name)
+ self.assertEqual([3, 6], rnd.get_shape())
+ self.assertEqual([3, 6], rnd.get_shape())
+
+ dep = tf.Variable(rnd.initialized_value(), name="dep")
+ self.assertEqual("dep:0", dep.name)
+ self.assertEqual([3, 6], dep.get_shape())
+ self.assertEqual([3, 6], dep.get_shape())
+
+ # Currently have to set the shape manually for Add.
+ added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
+ added_val.set_shape(rnd.get_shape())
+
+ depdep = tf.Variable(added_val, name="depdep")
+ self.assertEqual("depdep:0", depdep.name)
+ self.assertEqual([3, 6], depdep.get_shape())
+ self.assertEqual([3, 6], depdep.get_shape())
+
+ tf.initialize_all_variables().run()
+
+ self.assertAllClose(rnd.eval(), dep.eval())
+ self.assertAllClose(rnd.eval() + dep.eval() + 2.0,
+ depdep.eval())
+
+ def testAssignments(self):
+ with self.test_session():
+ var = tf.Variable(0.0)
+ plus_one = var.assign_add(1.0)
+ minus_one = var.assign_sub(2.0)
+ four = var.assign(4.0)
+ tf.initialize_all_variables().run()
+ self.assertAllClose(0.0, var.eval())
+
+ self.assertAllClose(1.0, plus_one.eval())
+ self.assertAllClose(1.0, var.eval())
+
+ self.assertAllClose(-1.0, minus_one.eval())
+ self.assertAllClose(-1.0, var.eval())
+
+ self.assertAllClose(4.0, four.eval())
+ self.assertAllClose(4.0, var.eval())
+
+ def _countUpToTest(self, dtype):
+ with self.test_session():
+ zero = tf.constant(0, dtype=dtype)
+ var = tf.Variable(zero)
+ count_up_to = var.count_up_to(3)
+
+ tf.initialize_all_variables().run()
+ self.assertEqual(0, var.eval())
+
+ self.assertEqual(0, count_up_to.eval())
+ self.assertEqual(1, var.eval())
+
+ self.assertEqual(1, count_up_to.eval())
+ self.assertEqual(2, var.eval())
+
+ self.assertEqual(2, count_up_to.eval())
+ self.assertEqual(3, var.eval())
+
+ with self.assertRaisesOpError("Reached limit of 3"):
+ count_up_to.eval()
+ self.assertEqual(3, var.eval())
+
+ with self.assertRaisesOpError("Reached limit of 3"):
+ count_up_to.eval()
+ self.assertEqual(3, var.eval())
+
+ def testCountUpToInt32(self):
+ self._countUpToTest(tf.int32)
+
+ def testCountUpToInt64(self):
+ self._countUpToTest(tf.int64)
+
+ def testUseVariableAsTensor(self):
+ with self.test_session():
+ var_x = tf.Variable(2.0)
+ var_y = tf.Variable(3.0)
+ tf.initialize_all_variables().run()
+ self.assertAllClose(2.0, var_x.eval())
+ self.assertAllClose(3.0, var_y.eval())
+ self.assertAllClose(5.0, tf.add(var_x, var_y).eval())
+
+ def testCollections(self):
+ with self.test_session():
+ var_x = tf.Variable(2.0)
+ var_y = tf.Variable(2.0, trainable=False)
+ var_z = tf.Variable(2.0, trainable=True)
+ var_t = tf.Variable(
+ 2.0, trainable=True,
+ collections=[tf.GraphKeys.TRAINABLE_VARIABLES,
+ tf.GraphKeys.VARIABLES])
+ self.assertEqual([var_x, var_y, var_z, var_t], tf.all_variables())
+ self.assertEqual([var_x, var_z, var_t], tf.trainable_variables())
+
+ def testOperators(self):
+ with self.test_session():
+ var_f = tf.Variable([2.0])
+ add = var_f + 0.0
+ radd = 1.0 + var_f
+ sub = var_f - 1.0
+ rsub = 1.0 - var_f
+ mul = var_f * 10.0
+ rmul = 10.0 * var_f
+ div = var_f / 10.0
+ rdiv = 10.0 / var_f
+ lt = var_f < 3.0
+ rlt = 3.0 < var_f
+ le = var_f <= 2.0
+ rle = 2.0 <= var_f
+ gt = var_f > 3.0
+ rgt = 3.0 > var_f
+ ge = var_f >= 2.0
+ rge = 2.0 >= var_f
+ neg = -var_f
+ abs_v = abs(var_f)
+
+ var_i = tf.Variable([20])
+ mod = var_i % 7
+ rmod = 103 % var_i
+
+ var_b = tf.Variable([True, False])
+ and_v = operator.and_(var_b, [True, True])
+ or_v = operator.or_(var_b, [False, True])
+ xor_v = operator.xor(var_b, [False, False])
+ invert_v = ~var_b
+
+ rnd = np.random.rand(4, 4).astype("f")
+ var_t = tf.Variable(rnd)
+ slice_v = var_t[2, 0:0]
+
+ tf.initialize_all_variables().run()
+ self.assertAllClose([2.0], add.eval())
+ self.assertAllClose([3.0], radd.eval())
+ self.assertAllClose([1.0], sub.eval())
+ self.assertAllClose([-1.0], rsub.eval())
+ self.assertAllClose([20.0], mul.eval())
+ self.assertAllClose([20.0], rmul.eval())
+ self.assertAllClose([0.2], div.eval())
+ self.assertAllClose([5.0], rdiv.eval())
+ self.assertAllClose([-2.0], neg.eval())
+ self.assertAllClose([2.0], abs_v.eval())
+ self.assertAllClose([True], lt.eval())
+ self.assertAllClose([False], rlt.eval())
+ self.assertAllClose([True], le.eval())
+ self.assertAllClose([True], rle.eval())
+ self.assertAllClose([False], gt.eval())
+ self.assertAllClose([True], rgt.eval())
+ self.assertAllClose([True], ge.eval())
+ self.assertAllClose([True], rge.eval())
+
+ self.assertAllClose([6], mod.eval())
+ self.assertAllClose([3], rmod.eval())
+
+ self.assertAllClose([True, False], and_v.eval())
+ self.assertAllClose([True, True], or_v.eval())
+ self.assertAllClose([True, False], xor_v.eval())
+ self.assertAllClose([False, True], invert_v.eval())
+
+ self.assertAllClose(rnd[2, 0:0], slice_v.eval())
+
+ def testSession(self):
+ with self.test_session() as sess:
+ var = tf.Variable([1, 12])
+ tf.initialize_all_variables().run()
+ self.assertAllClose([1, 12], sess.run(var))
+
+
+class IsInitializedTest(tf.test.TestCase):
+
+ def testNoVars(self):
+ with tf.Graph().as_default():
+ self.assertEqual(None, tf.assert_variables_initialized())
+
+ def testVariables(self):
+ with tf.Graph().as_default(), self.test_session() as sess:
+ v = tf.Variable([1, 2])
+ w = tf.Variable([3, 4])
+ _ = v, w
+ inited = tf.assert_variables_initialized()
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ sess.run(inited)
+ tf.initialize_all_variables().run()
+ sess.run(inited)
+
+ def testVariableList(self):
+ with tf.Graph().as_default(), self.test_session() as sess:
+ v = tf.Variable([1, 2])
+ w = tf.Variable([3, 4])
+ inited = tf.assert_variables_initialized([v])
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ inited.op.run()
+ sess.run(w.initializer)
+ with self.assertRaisesOpError("Attempting to use uninitialized value"):
+ inited.op.run()
+ v.initializer.run()
+ inited.op.run()
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/where_op_test.py b/tensorflow/python/kernel_tests/where_op_test.py
new file mode 100644
index 0000000000..263f98f622
--- /dev/null
+++ b/tensorflow/python/kernel_tests/where_op_test.py
@@ -0,0 +1,43 @@
+"""Tests for tensorflow.ops.reverse_sequence_op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+
+class WhereOpTest(tf.test.TestCase):
+
+ def _testWhere(self, x, truth, expected_err_re=None):
+ with self.test_session():
+ ans = tf.where(x)
+ self.assertEqual([None, x.ndim], ans.get_shape().as_list())
+ if expected_err_re is None:
+ tf_ans = ans.eval()
+ self.assertAllClose(tf_ans, truth, atol=1e-10)
+ else:
+ with self.assertRaisesOpError(expected_err_re):
+ ans.eval()
+
+ def testBasicMat(self):
+ x = np.asarray([[True, False], [True, False]])
+
+ # Ensure RowMajor mode
+ truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
+
+ self._testWhere(x, truth)
+
+ def testBasic3Tensor(self):
+ x = np.asarray(
+ [[[True, False], [True, False]], [[False, True], [False, True]],
+ [[False, False], [False, True]]])
+
+ # Ensure RowMajor mode
+ truth = np.asarray(
+ [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]],
+ dtype=np.int64)
+
+ self._testWhere(x, truth)
+
+
+if __name__ == "__main__":
+ tf.test.main()
diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py
new file mode 100644
index 0000000000..4e44472c0d
--- /dev/null
+++ b/tensorflow/python/kernel_tests/xent_op_test.py
@@ -0,0 +1,110 @@
+"""Tests for SoftmaxCrossEntropyWithLogits op."""
+import tensorflow.python.platform
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.kernel_tests import gradient_checker as gc
+
+
+class XentTest(tf.test.TestCase):
+
+ def _npXent(self, features, labels):
+ batch_dim = 0
+ class_dim = 1
+ batch_size = features.shape[batch_dim]
+ e = np.exp(features -
+ np.reshape(np.amax(features, axis=class_dim), [batch_size, 1]))
+ probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
+ bp = (probs - labels)
+ l = -np.sum(labels * np.log(probs + 1.0e-20), axis=1)
+ return l, bp
+
+ def _testXent(self, np_features, np_labels, use_gpu=False):
+ np_loss, np_backprop = self._npXent(np_features, np_labels)
+ with self.test_session(use_gpu=use_gpu) as sess:
+ loss = tf.nn.softmax_cross_entropy_with_logits(np_features, np_labels)
+ backprop = loss.op.outputs[1]
+ tf_loss, tf_backprop = sess.run([loss, backprop])
+ self.assertAllClose(np_loss, tf_loss)
+ self.assertAllClose(np_backprop, tf_backprop)
+
+ def _testAll(self, features, labels):
+ self._testXent(features, labels, use_gpu=False)
+ self._testXent(features, labels, use_gpu=True)
+
+ def testNpXent(self):
+ # We create 2 batches of logits for testing.
+ # batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
+ # batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
+ features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
+ labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
+
+ # For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
+ # With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
+ # The loss for this batch is -log(0.25) = 1.386
+ #
+ # For batch 1, we have:
+ # exp(0) = 1
+ # exp(1) = 2.718
+ # exp(2) = 7.389
+ # exp(3) = 20.085
+ # SUM = 31.192
+ # So we have as probabilities:
+ # exp(0) / SUM = 0.032
+ # exp(1) / SUM = 0.087
+ # exp(2) / SUM = 0.237
+ # exp(3) / SUM = 0.644
+ # With a soft target (1, 2), the backprop is
+ # [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
+ # The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
+ # = [1.3862, 1.9401]
+ np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
+ self.assertAllClose(np.array([[0.25, 0.25, 0.25, -0.75],
+ [0.0321, -0.4129, -0.2632, 0.6439]]),
+ np_backprop,
+ rtol=1.e-3, atol=1.e-3)
+ self.assertAllClose(np.array([1.3862, 1.9401]), np_loss,
+ rtol=1.e-3, atol=1.e-3)
+
+ def testShapeMismatch(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.nn.softmax_cross_entropy_with_logits(
+ [[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
+
+ def testNotMatrix(self):
+ with self.test_session():
+ with self.assertRaises(ValueError):
+ tf.nn.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
+ [0., 1., 0., 1.])
+
+ def testFloat(self):
+ self._testAll(
+ np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
+ np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
+
+ def testDouble(self):
+ self._testXent(
+ np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
+ np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64),
+ use_gpu=False)
+
+ def testGradient(self):
+ with self.test_session():
+ l = tf.constant([0.0, 0.0, 1.0, 0.0,
+ 1.0, 0.0, 0.0, 0.0,
+ 0.0, 0.5, 0.0, 0.5], shape=[3, 4],
+ dtype=tf.float64, name="l")
+ f = tf.constant([0.1, 0.2, 0.3, 0.4,
+ 0.1, 0.4, 0.9, 1.6,
+ 0.1, 0.8, 2.7, 6.4], shape=[3, 4],
+ dtype=tf.float64, name="f")
+ x = tf.nn.softmax_cross_entropy_with_logits(f, l, name="xent")
+ err = gc.ComputeGradientError(f, [3, 4], x, [3])
+ print "cross entropy gradient err = ", err
+ self.assertLess(err, 5e-8)
+
+
+if __name__ == "__main__":
+ tf.test.main()