aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gunhan Gulsoy <gunan@google.com>2018-10-09 14:38:55 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-09 14:49:32 -0700
commita6fcb9d3d81e9207650eda1c899051ccbb97dec7 (patch)
treece203abc097918c2940a489dec2e3357fac9ceb9
parent950cf87104bfee28e2165fe368f66337b8a1336d (diff)
Avoid creating sparse tensor objects before library is initialized.
PiperOrigin-RevId: 216425002
-rw-r--r--tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py482
1 files changed, 255 insertions, 227 deletions
diff --git a/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py b/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py
index 929e83523a..707f93b2da 100644
--- a/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py
+++ b/tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_test.py
@@ -39,18 +39,18 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input_a': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2, 0, 1),
- dense_shape=(2, 2)),
- 'sparse_input_b': sparse_tensor.SparseTensorValue(
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2, 0, 1),
+ 'dense_shape': (2, 2)},
+ 'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(1, 2, 0),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (1, 2, 0),
+ 'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
@@ -58,20 +58,20 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
- 'sparse_input_a': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
- indices=(
+ 'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2, 0, 1, 0, 0, 1),
- dense_shape=(2, 2, 2)),
- 'sparse_input_b': sparse_tensor.SparseTensorValue(
+ 'values': (2, 0, 1, 0, 0, 1),
+ 'dense_shape': (2, 2, 2)},
+ 'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
- indices=((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
- values=(1, 1, 1, 2, 0),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': (1, 1, 1, 2, 0),
+ 'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
@@ -80,9 +80,11 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
'expected_sequence_length': [2, 2]},
)
def test_embedding_column(
- self, sparse_input_a, sparse_input_b, expected_input_layer,
+ self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
+ sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
+ sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
@@ -261,18 +263,18 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input_a': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2, 0, 1),
- dense_shape=(2, 2)),
- 'sparse_input_b': sparse_tensor.SparseTensorValue(
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2, 0, 1),
+ 'dense_shape': (2, 2)},
+ 'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(1, 1, 0),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (1, 1, 0),
+ 'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
@@ -280,20 +282,20 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
- 'sparse_input_a': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
- indices=(
+ 'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2, 0, 1, 0, 0, 1),
- dense_shape=(2, 2, 2)),
- 'sparse_input_b': sparse_tensor.SparseTensorValue(
+ 'values': (2, 0, 1, 0, 0, 1),
+ 'dense_shape': (2, 2, 2)},
+ 'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
- indices=((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
- values=(1, 1, 1, 1, 0),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': (1, 1, 1, 1, 0),
+ 'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
@@ -302,8 +304,11 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
'expected_sequence_length': [2, 2]},
)
def test_indicator_column(
- self, sparse_input_a, sparse_input_b, expected_input_layer,
+ self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
+ sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
+ sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
+
vocabulary_size_a = 3
vocabulary_size_b = 2
@@ -350,30 +355,32 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
- indices=((0, 0), (0, 1), (1, 0)),
- values=(0., 1., 10.),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (0, 1), (1, 0)),
+ 'values': (0., 1., 10.),
+ 'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
- indices=((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
- values=(20, 3, 5., 3., 8.),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': (20, 3, 5., 3., 8.),
+ 'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
def test_numeric_column(
- self, sparse_input, expected_input_layer, expected_sequence_length):
+ self, sparse_input_args, expected_input_layer, expected_sequence_length):
+ sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
+
numeric_column = sfc.sequence_numeric_column('aaa')
input_layer, sequence_length = sfc.sequence_input_layer(
@@ -387,27 +394,27 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
- indices=((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
- (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 8)),
+ 'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
+ (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
- indices=((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
- (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
- (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 2, 4)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
+ (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
+ (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
@@ -415,8 +422,10 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
'expected_sequence_length': [2, 1]},
)
def test_numeric_column_multi_dim(
- self, sparse_input, expected_input_layer, expected_sequence_length):
+ self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests sequence_input_layer for multi-dimensional numeric_column."""
+ sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
+
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, sequence_length = sfc.sequence_input_layer(
@@ -460,28 +469,29 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
- indices=((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
- (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 8)),
+ 'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
+ (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
- indices=((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
- (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
- (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 2, 4)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
+ (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 2),
+ (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
def test_static_shape_from_tensors_numeric(
- self, sparse_input, expected_shape):
+ self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
+ sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
input_layer, _ = sfc.sequence_input_layer(
@@ -492,30 +502,31 @@ class SequenceInputLayerTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
- indices=((0, 0), (1, 0), (1, 1), (3, 0)),
- values=(2, 0, 1, 1),
- dense_shape=(4, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
+ 'values': (2, 0, 1, 1),
+ 'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
- (3, 0, 0), (3, 1, 0), (3, 1, 1)),
- values=(2, 0, 1, 2, 1, 0, 2),
- dense_shape=(4, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
+ (3, 0, 0), (3, 1, 0), (3, 1, 1)),
+ 'values': (2, 0, 1, 2, 1, 0, 2),
+ 'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
def test_static_shape_from_tensors_indicator(
- self, sparse_input, expected_shape):
+ self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
+ sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc.indicator_column(categorical_column)
@@ -546,11 +557,12 @@ class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
- 'seq_input': ops.convert_to_tensor(np.arange(100).reshape(10, 10))},
+ 'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
- 'seq_input': ops.convert_to_tensor(np.arange(100).reshape(5, 5, 2, 2))}
+ 'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
- def test_sequence_input_throws_error(self, seq_input):
+ def test_sequence_input_throws_error(self, seq_input_arg):
+ seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
@@ -559,11 +571,12 @@ class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
- 'context_input': ops.convert_to_tensor(np.arange(100))},
+ 'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
- 'context_input': ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))}
+ 'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
- def test_context_input_throws_error(self, context_input):
+ def test_context_input_throws_error(self, context_input_arg):
+ context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
@@ -657,25 +670,27 @@ class SequenceCategoricalColumnWithIdentityTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0), (1, 0), (1, 1)),
- values=(1, 2, 0),
- dense_shape=(2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 0), (1, 0, 0), (1, 1, 0)),
- values=np.array((1, 2, 0), dtype=np.int64),
- dense_shape=(2, 2, 1))},
+ 'inputs_args': {
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (1, 2, 0),
+ 'dense_shape': (2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': np.array((1, 2, 0), dtype=np.int64),
+ 'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=(6, 7, 8),
- dense_shape=(2, 2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=(6, 7, 8),
- dense_shape=(2, 2, 2))}
+ 'inputs_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': (6, 7, 8),
+ 'dense_shape': (2, 2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': (6, 7, 8),
+ 'dense_shape': (2, 2, 2)}}
)
- def test_get_sparse_tensors(self, inputs, expected):
+ def test_get_sparse_tensors(self, inputs_args, expected_args):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
+ expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
@@ -691,27 +706,29 @@ class SequenceCategoricalColumnWithHashBucketTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0), (1, 0), (1, 1)),
- values=('omar', 'stringer', 'marlo'),
- dense_shape=(2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 0), (1, 0, 0), (1, 1, 0)),
+ 'inputs_args': {
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': ('omar', 'stringer', 'marlo'),
+ 'dense_shape': (2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
- values=np.array((0, 0, 0), dtype=np.int64),
- dense_shape=(2, 2, 1))},
+ 'values': np.array((0, 0, 0), dtype=np.int64),
+ 'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=('omar', 'stringer', 'marlo'),
- dense_shape=(2, 2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'inputs_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': ('omar', 'stringer', 'marlo'),
+ 'dense_shape': (2, 2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
- values=np.array((0, 0, 0), dtype=np.int64),
- dense_shape=(2, 2, 2))}
+ 'values': np.array((0, 0, 0), dtype=np.int64),
+ 'dense_shape': (2, 2, 2)}}
)
- def test_get_sparse_tensors(self, inputs, expected):
+ def test_get_sparse_tensors(self, inputs_args, expected_args):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
+ expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
@@ -742,25 +759,27 @@ class SequenceCategoricalColumnWithVocabularyFileTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0), (1, 0), (1, 1)),
- values=('marlo', 'skywalker', 'omar'),
- dense_shape=(2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 0), (1, 0, 0), (1, 1, 0)),
- values=np.array((2, -1, 0), dtype=np.int64),
- dense_shape=(2, 2, 1))},
+ 'inputs_args': {
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': ('marlo', 'skywalker', 'omar'),
+ 'dense_shape': (2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': np.array((2, -1, 0), dtype=np.int64),
+ 'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=('omar', 'skywalker', 'marlo'),
- dense_shape=(2, 2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=np.array((0, -1, 2), dtype=np.int64),
- dense_shape=(2, 2, 2))}
+ 'inputs_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': ('omar', 'skywalker', 'marlo'),
+ 'dense_shape': (2, 2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': np.array((0, -1, 2), dtype=np.int64),
+ 'dense_shape': (2, 2, 2)}}
)
- def test_get_sparse_tensors(self, inputs, expected):
+ def test_get_sparse_tensors(self, inputs_args, expected_args):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
+ expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
@@ -779,25 +798,27 @@ class SequenceCategoricalColumnWithVocabularyListTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0), (1, 0), (1, 1)),
- values=('marlo', 'skywalker', 'omar'),
- dense_shape=(2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 0), (1, 0, 0), (1, 1, 0)),
- values=np.array((2, -1, 0), dtype=np.int64),
- dense_shape=(2, 2, 1))},
+ 'inputs_args': {
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': ('marlo', 'skywalker', 'omar'),
+ 'dense_shape': (2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': np.array((2, -1, 0), dtype=np.int64),
+ 'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=('omar', 'skywalker', 'marlo'),
- dense_shape=(2, 2, 2)),
- 'expected': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 2), (1, 0, 0), (1, 2, 0)),
- values=np.array((0, -1, 2), dtype=np.int64),
- dense_shape=(2, 2, 2))}
+ 'inputs_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': ('omar', 'skywalker', 'marlo'),
+ 'dense_shape': (2, 2, 2)},
+ 'expected_args': {
+ 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
+ 'values': np.array((0, -1, 2), dtype=np.int64),
+ 'dense_shape': (2, 2, 2)}}
)
- def test_get_sparse_tensors(self, inputs, expected):
+ def test_get_sparse_tensors(self, inputs_args, expected_args):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
+ expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
@@ -815,14 +836,14 @@ class SequenceEmbeddingColumnTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
- indices=((0, 0), (1, 0), (1, 1), (3, 0)),
- values=(2, 0, 1, 1),
- dense_shape=(4, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
+ 'values': (2, 0, 1, 1),
+ 'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
@@ -833,15 +854,15 @@ class SequenceEmbeddingColumnTest(
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
- (3, 0, 0), (3, 1, 0), (3, 1, 1)),
- values=(2, 0, 1, 2, 1, 0, 2),
- dense_shape=(4, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
+ (3, 0, 0), (3, 1, 0), (3, 1, 1)),
+ 'values': (2, 0, 1, 2, 1, 0, 2),
+ 'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
@@ -852,7 +873,8 @@ class SequenceEmbeddingColumnTest(
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
- def test_get_sequence_dense_tensor(self, inputs, expected):
+ def test_get_sequence_dense_tensor(self, inputs_args, expected):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
@@ -884,23 +906,24 @@ class SequenceEmbeddingColumnTest(
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2, 0, 1),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2, 0, 1),
+ 'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2, 0, 1, 2),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
+ 'values': (2, 0, 1, 2),
+ 'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
- def test_sequence_length(self, inputs, expected_sequence_length):
+ def test_sequence_length(self, inputs_args, expected_sequence_length):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
@@ -1124,14 +1147,14 @@ class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
- indices=((0, 0), (1, 0), (1, 1), (3, 0)),
- values=(2, 0, 1, 1),
- dense_shape=(4, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
+ 'values': (2, 0, 1, 1),
+ 'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
@@ -1142,15 +1165,15 @@ class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
- (3, 0, 0), (3, 1, 0), (3, 1, 1)),
- values=(2, 0, 1, 2, 1, 2, 2),
- dense_shape=(4, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
+ (3, 0, 0), (3, 1, 0), (3, 1, 1)),
+ 'values': (2, 0, 1, 2, 1, 2, 2),
+ 'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
@@ -1161,7 +1184,8 @@ class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
- def test_get_sequence_dense_tensor(self, inputs, expected):
+ def test_get_sequence_dense_tensor(self, inputs_args, expected):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
@@ -1176,23 +1200,24 @@ class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2, 0, 1),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2, 0, 1),
+ 'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2, 0, 1, 2),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
+ 'values': (2, 0, 1, 2),
+ 'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
- def test_sequence_length(self, inputs, expected_sequence_length):
+ def test_sequence_length(self, inputs_args, expected_sequence_length):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
@@ -1269,27 +1294,28 @@ class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
- indices=((0, 0), (0, 1), (1, 0)),
- values=(0., 1., 10.),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (0, 1), (1, 0)),
+ 'values': (0., 1., 10.),
+ 'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
- indices=((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
- values=(20, 3, 5., 3., 8.),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
+ 'values': (20, 3, 5., 3., 8.),
+ 'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
- def test_get_sequence_dense_tensor(self, inputs, expected):
+ def test_get_sequence_dense_tensor(self, inputs_args, expected):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
@@ -1335,23 +1361,23 @@ class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
+ 'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
- indices=((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
- (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 8)),
+ 'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
+ (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
- 'sparse_input': sparse_tensor.SparseTensorValue(
- indices=((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
- (0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
- (1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
- values=(0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
- dense_shape=(2, 2, 8)),
+ 'sparse_input_args': {
+ 'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
+ (0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
+ (1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
+ 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
+ 'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
@@ -1359,8 +1385,9 @@ class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
- self, sparse_input, expected_dense_tensor):
+ self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
+ sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = numeric_column._get_sequence_dense_tensor(
@@ -1372,43 +1399,44 @@ class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2., 0., 1.),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2., 0., 1.),
+ 'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2., 0., 1., 2.),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
+ 'values': (2., 0., 1., 2.),
+ 'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
- indices=((0, 0), (1, 0), (1, 1)),
- values=(2., 0., 1.),
- dense_shape=(2, 2)),
+ 'indices': ((0, 0), (1, 0), (1, 1)),
+ 'values': (2., 0., 1.),
+ 'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
- 'inputs': sparse_tensor.SparseTensorValue(
+ 'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
- indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
- values=(2., 0., 1., 2.),
- dense_shape=(2, 2, 2)),
+ 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
+ 'values': (2., 0., 1., 2.),
+ 'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
- def test_sequence_length(self, inputs, expected_sequence_length, shape):
+ def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
+ inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = numeric_column._get_sequence_dense_tensor(