aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/layers
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-09-10 14:36:05 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-10 14:43:27 -0700
commit55ad6406b8e0e1f50d27f619aa150cc2f827311a (patch)
treef198767fb56e4eb32f177102a2fffe41b7da5f43 /tensorflow/contrib/layers
parent4cbe494e87437213a7cb464ec23c12cb5788eb66 (diff)
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about: * the fact that the session may be reused. * the session is not closed even when doing a "with self.test_session()" statement. PiperOrigin-RevId: 212336206
Diffstat (limited to 'tensorflow/contrib/layers')
-rw-r--r--tensorflow/contrib/layers/python/layers/embedding_ops_test.py54
-rw-r--r--tensorflow/contrib/layers/python/layers/encoders_test.py20
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column_ops_test.py206
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column_test.py26
-rw-r--r--tensorflow/contrib/layers/python/layers/layers_test.py316
-rw-r--r--tensorflow/contrib/layers/python/layers/normalization_test.py8
-rw-r--r--tensorflow/contrib/layers/python/layers/optimizers_test.py14
-rw-r--r--tensorflow/contrib/layers/python/layers/regularizers_test.py14
-rw-r--r--tensorflow/contrib/layers/python/layers/rev_block_lib_test.py10
-rw-r--r--tensorflow/contrib/layers/python/layers/summaries_test.py12
-rw-r--r--tensorflow/contrib/layers/python/layers/utils_test.py24
11 files changed, 352 insertions, 352 deletions
diff --git a/tensorflow/contrib/layers/python/layers/embedding_ops_test.py b/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
index 7ede193029..124515e5a6 100644
--- a/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
+++ b/tensorflow/contrib/layers/python/layers/embedding_ops_test.py
@@ -109,7 +109,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
return sparse_ids, sparse_weights
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
@@ -122,7 +122,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
def test_safe_embedding_lookup_sparse_return_special_vector(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
@@ -136,7 +136,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
embedding_weights[0][2], embedding_weights[0][3]])
def test_safe_embedding_lookup_sparse_no_weights(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
@@ -150,7 +150,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
@@ -164,7 +164,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
(embedding_weights[0] + embedding_weights[1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
@@ -179,7 +179,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
embedding_weights, sparse_ids, sparse_weights)
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
@@ -192,7 +192,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
@@ -208,7 +208,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
]])
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
@@ -224,7 +224,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
]])
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
@@ -241,7 +241,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
@@ -276,7 +276,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
return embedding_weights
def test_scattered_embedding_consistency(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
@@ -288,7 +288,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
embedding_lookup_result[1])
def test_scattered_embedding_multiple_partition(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=7)
values = constant_op.constant([4, 4, 5])
@@ -304,7 +304,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
self.assertGreater(embedding_diff, 0)
def test_scattered_embedding_coverage(self):
- with self.test_session():
+ with self.cached_session():
size = 8
embedding_weights = self._random_weights(size=size, num_shards=3)
values = constant_op.constant(["foo"])
@@ -316,7 +316,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
self.assertEqual(len(np.unique(embedding_lookup_result[0])), size)
def test_scattered_embedding_multi_dimension(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
@@ -329,7 +329,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
embedding_lookup_result[1][2])
def test_scattered_embedding_lookup_sparse(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=["foo", "bar", "foo", "bar"],
@@ -358,7 +358,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
embeds = np.random.randn(n_embed, d_embed)
idx = np.random.randint(0, n_embed, idx_shape)
- with self.test_session():
+ with self.cached_session():
embedded_np = embeds[idx]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
@@ -370,7 +370,7 @@ class ScatteredEmbeddingLookupTest(test.TestCase):
idx = np.random.randint(0, 5, 10)
idx2d = np.random.randint(0, 5, (10, 2))
- with self.test_session():
+ with self.cached_session():
embedded_np = embeds[idx]
embedded_np2d = embeds[idx2d]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
@@ -408,7 +408,7 @@ class SampledScatteredEmbeddingLookupTest(test.TestCase):
return embedding_weights
def test_hashed_embedding_consistency(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
# The first three sampled_candidates are equal, so the first three
@@ -429,7 +429,7 @@ class SampledScatteredEmbeddingLookupTest(test.TestCase):
embedding_lookup_result[1][3])
def test_hashed_embedding_multi_dimension(self):
- with self.test_session():
+ with self.cached_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
@@ -467,7 +467,7 @@ class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def test_output_shape(self):
"""Verifies the shape of the output tensor."""
- with self.test_session():
+ with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
@@ -481,7 +481,7 @@ class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def test_output_values(self):
"""Verifies the values in a trivial case."""
- with self.test_session():
+ with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .2, .3])
@@ -495,7 +495,7 @@ class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def test_output_values_with_sampled_candidates(self):
"""Verifies the values for given sampled_candidates."""
- with self.test_session():
+ with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
@@ -520,7 +520,7 @@ class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def test_output_values_with_sign_hash(self):
"""Verifies the values in a trivial case with hash_signs=True."""
- with self.test_session():
+ with self.cached_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .1, .1])
@@ -537,7 +537,7 @@ class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def test_distributive_property(self):
"""Verifies the distributive property of matrix multiplication."""
- with self.test_session():
+ with self.cached_session():
params = constant_op.constant([.1, .2, .3])
sp_values_a = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[0, 0]], dense_shape=[3, 1])
@@ -710,7 +710,7 @@ class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
[1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
- with self.test_session():
+ with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = \
@@ -749,7 +749,7 @@ class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
- with self.test_session():
+ with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
@@ -767,7 +767,7 @@ class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
- with self.test_session():
+ with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
diff --git a/tensorflow/contrib/layers/python/layers/encoders_test.py b/tensorflow/contrib/layers/python/layers/encoders_test.py
index e8528e9890..1a2aa710d5 100644
--- a/tensorflow/contrib/layers/python/layers/encoders_test.py
+++ b/tensorflow/contrib/layers/python/layers/encoders_test.py
@@ -34,14 +34,14 @@ def _get_const_var(name, shape, value):
class EncodersTest(test.TestCase):
def testBowEncoderSparse(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseTensor(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
enc = encoders.bow_encoder(sparse_docs, 4, 3)
@@ -49,28 +49,28 @@ class EncodersTest(test.TestCase):
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseEmptyRow(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 5)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([3, 5], enc.eval().shape)
def testBowEncoderDense(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3], [0, 0], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 3, sparse_lookup=False)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([4, 3], enc.eval().shape)
def testBowEncoderSparseTensorDenseLookup(self):
- with self.test_session():
+ with self.cached_session():
docs = [[0, 1]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
with self.assertRaises(TypeError):
encoders.bow_encoder(sparse_docs, 4, 3, sparse_lookup=False)
def testBowEncodersSharingEmbeddings(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='test')
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
@@ -79,7 +79,7 @@ class EncodersTest(test.TestCase):
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsInheritedScopes(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
with variable_scope.variable_scope('test'):
enc_1 = encoders.bow_encoder(docs, 4, 3)
@@ -90,7 +90,7 @@ class EncodersTest(test.TestCase):
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsSharedScope(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='bow')
variable_scope.get_variable_scope().reuse_variables()
@@ -100,7 +100,7 @@ class EncodersTest(test.TestCase):
self.assertAllEqual(avg_1, avg_2)
def testBowEncoderReuseEmbeddingsVariable(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
@@ -111,7 +111,7 @@ class EncodersTest(test.TestCase):
self.assertAllClose([[3., 4., 5.], [7.5, 8.5, 9.5]], enc.eval())
def testEmbedSequence(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
index e6bbd86ab7..6fb4b9ff35 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
@@ -49,7 +49,7 @@ class TransformerTest(test.TestCase):
real_valued = feature_column.real_valued_column("price")
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testSparseRealValuedColumnIdentityTransformation(self):
@@ -60,7 +60,7 @@ class TransformerTest(test.TestCase):
features = {"rating": rating_tensor}
output = feature_column_ops._Transformer(features).transform(
sparse_real_valued)
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(output.values.eval(), rating_tensor.values.eval())
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
@@ -80,7 +80,7 @@ class TransformerTest(test.TestCase):
[sparse_real_valued])
self.assertTrue(sparse_real_valued in output_dict)
output = output_dict[sparse_real_valued]
- with self.test_session():
+ with self.cached_session():
self.assertArrayNear(output.values.eval(), [4.0, 25.0], 1e-5)
self.assertAllEqual(output.indices.eval(), rating_tensor.indices.eval())
self.assertAllEqual(output.dense_shape.eval(),
@@ -97,7 +97,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[bucket])
self.assertEqual(len(output), 1)
self.assertIn(bucket, output)
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(output[bucket].eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
@@ -109,7 +109,7 @@ class TransformerTest(test.TestCase):
"price": constant_op.constant([[20., 110], [110., 20], [-3, -3]])
}
output = feature_column_ops._Transformer(features).transform(bucket)
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
@@ -118,7 +118,7 @@ class TransformerTest(test.TestCase):
# buckets 2, 3, 0
features = {"price": constant_op.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
@@ -138,7 +138,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
@@ -161,7 +161,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
@@ -177,7 +177,7 @@ class TransformerTest(test.TestCase):
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
- with self.test_session():
+ with self.cached_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int64)
@@ -203,7 +203,7 @@ class TransformerTest(test.TestCase):
self.assertEqual(len(output), 2)
self.assertIn(hashed_sparse, output)
self.assertIn(wire_embedding, output)
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(output[wire_embedding].indices.eval(),
wire_tensor.indices.eval())
self.assertAllEqual(output[wire_embedding].dense_shape.eval(), [2, 2])
@@ -223,7 +223,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[keys_sparse])
self.assertEqual(len(output), 1)
self.assertIn(keys_sparse, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[keys_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[keys_sparse].values.eval(), [1, 2, 0])
@@ -241,7 +241,7 @@ class TransformerTest(test.TestCase):
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
@@ -264,7 +264,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[hashed_sparse])
self.assertEqual(len(output), 1)
self.assertIn(hashed_sparse, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[hashed_sparse].values.dtype, dtypes.int32)
self.assertTrue(
all(x < 10 and x >= 0 for x in output[hashed_sparse].values.eval()))
@@ -282,7 +282,7 @@ class TransformerTest(test.TestCase):
wire_tensor = constant_op.constant([[100, 0], [1, 25]])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
- with self.test_session():
+ with self.cached_session():
# While the input is a dense Tensor, the output should be a SparseTensor.
self.assertIsInstance(output, sparse_tensor.SparseTensor)
self.assertEqual(output.values.dtype, dtypes.int32)
@@ -310,7 +310,7 @@ class TransformerTest(test.TestCase):
self.assertEqual(len(output), 1)
self.assertIn(weighted_ids, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(output[weighted_ids][0].dense_shape.eval(),
ids_tensor.dense_shape.eval())
@@ -340,7 +340,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
@@ -362,7 +362,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
@@ -386,7 +386,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0])
@@ -408,7 +408,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[vocab_sparse])
self.assertEqual(len(output), 1)
self.assertIn(vocab_sparse, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertEqual(output[vocab_sparse].values.dtype, dtypes.int64)
self.assertAllEqual(output[vocab_sparse].values.eval(), [1, 2, 0, 1])
@@ -440,7 +440,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[country_language])
self.assertEqual(len(output), 1)
self.assertIn(country_language, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[country_language].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_language].values.eval(
@@ -467,7 +467,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[country_price])
self.assertEqual(len(output), 1)
self.assertIn(country_price, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[country_price].values.eval()))
@@ -498,7 +498,7 @@ class TransformerTest(test.TestCase):
weights = column_to_variable[country_price][0]
grad = array_ops.squeeze(
gradients_impl.gradients(output, weights)[0].values)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertEqual(len(grad.eval()), 6)
@@ -537,7 +537,7 @@ class TransformerTest(test.TestCase):
features=features, feature_columns=[wire_country_price])
self.assertEqual(len(output), 1)
self.assertIn(wire_country_price, output)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(output[wire_country_price].values.dtype, dtypes.int64)
self.assertTrue(
all(x < 15 and x >= 0 for x in output[wire_country_price].values.eval(
@@ -600,7 +600,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
columns = [one_hot_column, embedding_column, real_valued_column]
output = feature_column_ops.input_from_feature_columns(features, columns)
output_core = fc_core.input_layer(features, columns)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 2 + 4 + 10])
@@ -626,7 +626,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
cols_to_outs = {}
feature_column_ops.input_from_feature_columns(
features, columns, cols_to_outs=cols_to_outs)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
for column in columns:
@@ -637,7 +637,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
@@ -650,7 +650,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval())
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
@@ -662,7 +662,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
rating = np.array([[0., 1., 2., -1.],
[3., 4., 5., 6.]])
features = {"rating": constant_op.constant(rating)}
- with self.test_session() as sess:
+ with self.cached_session() as sess:
output = sess.run(feature_column_ops.input_from_feature_columns(
features, [var_len_real_valued]))
self.assertAllClose(rating, output)
@@ -673,7 +673,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
rating = np.array([[0, 1, 2, -1],
[3, 4, 5, 6]])
features = {"rating": constant_op.constant(rating, dtype=dtypes.int64)}
- with self.test_session() as sess:
+ with self.cached_session() as sess:
output = sess.run(feature_column_ops.input_from_feature_columns(
features, [var_len_real_valued]))
self.assertAllClose(rating.astype(np.float32), output)
@@ -684,7 +684,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
@@ -698,7 +698,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
}
output = feature_column_ops.input_from_feature_columns(features,
[real_valued])
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
# Verify cross compatibility: Core builder output should equal to contrib.
self.assertAllClose(output.eval(),
@@ -713,7 +713,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
features = {"price": constant_op.constant([[20.], [110], [-3]])}
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), expected)
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [bucket]).eval())
@@ -729,7 +729,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
output = feature_column_ops.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
- with self.test_session():
+ with self.cached_session():
self.assertAllClose(output.eval(), expected)
self.assertAllClose(output.eval(),
fc_core.input_layer(features, [bucket]).eval())
@@ -752,7 +752,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_column])
output_core = fc_core.input_layer(features, [one_hot_column])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 10., 0], [0, 20., 0, 0], [30., 0, 40., 0]],
@@ -773,7 +773,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],
@@ -794,7 +794,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
@@ -816,7 +816,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
@@ -834,7 +834,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
output = feature_column_ops.input_from_feature_columns(features,
[one_hot_sparse])
output_core = fc_core.input_layer(features, [one_hot_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual([3, 10], output.eval().shape)
@@ -852,7 +852,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [4, 10])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -878,7 +878,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
features, [embedded_sparse], weight_collections=["my_collection_core"])
weights_core = ops.get_collection("my_collection_core")
grad_core = gradients_impl.gradients(output_core, weights_core)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
gradient_values = []
gradient_values_core = []
@@ -907,7 +907,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
@@ -935,7 +935,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
"Duplicate feature column key found for column: wire_embedding"):
@@ -961,7 +961,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
[embeded_sparse])
output_core = fc_core.input_layer(features, [embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
@@ -986,7 +986,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
embeded_sparse = feature_column.embedding_column(weighted_ids, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
@@ -1005,7 +1005,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
embeded_sparse = feature_column.embedding_column(crossed, 10)
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
@@ -1016,7 +1016,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: wire"):
variables_lib.global_variables_initializer().run()
@@ -1035,7 +1035,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: ids_weighted_by_weights"):
@@ -1053,7 +1053,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: aaa_X_bbb"):
variables_lib.global_variables_initializer().run()
@@ -1080,7 +1080,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
hashed_sparse, 10, initializer=init_ops.constant_initializer(133.7))
output = feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
@@ -1099,7 +1099,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
@@ -1119,7 +1119,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
max_norm=0.5)
output = feature_column_ops.input_from_feature_columns(features,
[embedded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
# score: (number of values * 0.5)
self.assertAllClose(output.eval(), [[0.5], [1.], [0.]])
@@ -1144,7 +1144,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
initializer=init_ops.ones_initializer())
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# score: (sum of weights)
@@ -1236,7 +1236,7 @@ class CreateInputLayersForDNNsTest(test.TestCase):
# There should be one trainable variables for sparse_2
self.assertEqual(1, len(variables_lib.trainable_variables()))
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
output_1_eval = output_1.eval()
output_2_eval = output_2.eval()
@@ -1295,7 +1295,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(measurement_input, model_inputs)
@@ -1305,7 +1305,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
rating = np.array([[0., 1., 2., -1.],
[3., 4., 5., 6.]])
features = {"rating": constant_op.constant(rating)}
- with self.test_session() as sess:
+ with self.cached_session() as sess:
output = sess.run(
feature_column_ops.sequence_input_from_feature_columns(
features, [var_len_real_valued]))
@@ -1329,7 +1329,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(reshaped_measurements, model_inputs)
@@ -1350,7 +1350,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(measurement_input), model_inputs)
@@ -1373,7 +1373,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(reshaped_measurements), model_inputs)
@@ -1395,7 +1395,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
@@ -1429,7 +1429,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
@@ -1459,7 +1459,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
@@ -1488,7 +1488,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
model_input_tensor = feature_column_ops.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
@@ -1518,7 +1518,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
embedding_weights = ops.get_collection("my_collection")
gradient_tensor = gradients_impl.gradients(model_input_tensor,
embedding_weights)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input, gradients = sess.run([model_input_tensor, gradient_tensor])
@@ -1585,7 +1585,7 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
columns_to_tensors, model_input_columns)
self.assertEqual(dtypes.float32, model_input_tensor.dtype)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
model_input = sess.run(model_input_tensor)
@@ -1622,7 +1622,7 @@ class WeightedSumTest(test.TestCase):
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -1640,7 +1640,7 @@ class WeightedSumTest(test.TestCase):
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -1654,7 +1654,7 @@ class WeightedSumTest(test.TestCase):
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
logits_core = fc_core.linear_model(features, [hashed_sparse], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -1676,7 +1676,7 @@ class WeightedSumTest(test.TestCase):
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
logits_core = fc_core.linear_model(features, [weighted_ids], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
@@ -1695,7 +1695,7 @@ class WeightedSumTest(test.TestCase):
features, [weighted_ids], num_outputs=5)
logits_core = fc_core.linear_model(features, [weighted_ids], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
@@ -1716,7 +1716,7 @@ class WeightedSumTest(test.TestCase):
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
logits_core = fc_core.linear_model(features, [crossed], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -1730,7 +1730,7 @@ class WeightedSumTest(test.TestCase):
dense_shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = feature_column.embedding_column(hashed_sparse, 10)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, "Error creating weighted sum for column: wire_embedding"):
variables_lib.global_variables_initializer().run()
@@ -1756,7 +1756,7 @@ class WeightedSumTest(test.TestCase):
features, [movies], num_outputs=1))
logits_core = fc_core.linear_model(features, [movies])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.initialize_all_variables().run()
lookup_ops.tables_initializer().run()
@@ -1776,7 +1776,7 @@ class WeightedSumTest(test.TestCase):
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
@@ -1789,7 +1789,7 @@ class WeightedSumTest(test.TestCase):
}
logits, _, _ = feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
@@ -1814,7 +1814,7 @@ class WeightedSumTest(test.TestCase):
features, [real_valued, bucket, hashed_sparse, crossed], num_outputs=5)
output_core = fc_core.linear_model(
features, [real_valued, bucket, hashed_sparse, crossed], units=5)
- with self.test_session():
+ with self.cached_session():
variables_lib.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 5])
# Verify cross compatibility: Core builder output should equal to contrib.
@@ -1837,7 +1837,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [age, language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -1877,7 +1877,7 @@ class WeightedSumTest(test.TestCase):
features, [country, language], num_outputs=1))
# Assert that only a single weight is created.
self.assertEqual(len(variables), 1)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -1941,7 +1941,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [weighted_language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -1969,7 +1969,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, bias = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -1992,7 +1992,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [movies], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2026,7 +2026,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2050,7 +2050,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [language_language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2083,7 +2083,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2124,7 +2124,7 @@ class WeightedSumTest(test.TestCase):
features, [country, language, country_language],
num_outputs=1,
scope=scope))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2161,7 +2161,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, incomes], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2197,7 +2197,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country, age, height, incomes], num_outputs=5))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2228,7 +2228,7 @@ class WeightedSumTest(test.TestCase):
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=1))
output_core = fc_core.linear_model(features, [bucket])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# Cross compatibility: Core builder output should equal to contrib.
@@ -2259,7 +2259,7 @@ class WeightedSumTest(test.TestCase):
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=1))
output_core = fc_core.linear_model(features, [bucket, country])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
# Cross compatibility: Core builder output should equal to contrib.
@@ -2290,7 +2290,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [bucket, country], num_outputs=5))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2326,7 +2326,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_price], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2365,7 +2365,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [country_language_price], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2389,7 +2389,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
@@ -2404,7 +2404,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
@@ -2419,7 +2419,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
@@ -2440,7 +2440,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [product], num_outputs=1))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
product_weights = column_to_variable[product][0]
@@ -2452,7 +2452,7 @@ class WeightedSumTest(test.TestCase):
features = {"age": constant_op.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = feature_column_ops.weighted_sum_from_feature_columns(
features, [feature_column.real_valued_column("age")], num_outputs=3)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
@@ -2466,7 +2466,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
@@ -2490,7 +2490,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
weights = column_to_variable[column][0]
@@ -2516,7 +2516,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2556,7 +2556,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2585,7 +2585,7 @@ class WeightedSumTest(test.TestCase):
output, column_to_variable, _ = (
feature_column_ops.weighted_sum_from_feature_columns(
features, [column], num_outputs=3))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
lookup_ops.tables_initializer().run()
@@ -2651,7 +2651,7 @@ class ParseExampleTest(test.TestCase):
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
- with self.test_session():
+ with self.cached_session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
@@ -2713,7 +2713,7 @@ class ParseExampleTest(test.TestCase):
self.assertIn("measurements", seq)
self.assertIsInstance(seq["measurements"], ops.Tensor)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
location_val, wire_cast_val, measurement_val = sess.run(
[ctx["location"], seq["wire_cast"], seq["measurements"]])
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_test.py b/tensorflow/contrib/layers/python/layers/feature_column_test.py
index eaaf9f8d5f..d90d6ecf7f 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_test.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_test.py
@@ -201,7 +201,7 @@ class FeatureColumnTest(test.TestCase):
b2 = feature_column_ops.input_from_feature_columns({
b[1]: input_tensor_c2
}, [b[1]])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
b1_value = b1.eval()
b2_value = b2.eval()
@@ -230,7 +230,7 @@ class FeatureColumnTest(test.TestCase):
e1 = feature_column_ops.input_from_feature_columns({
e[0]: input_tensor_c1
}, [e[0]])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
d1_value = d1.eval()
e1_value = e1.eval()
@@ -340,7 +340,7 @@ class FeatureColumnTest(test.TestCase):
with variable_scope.variable_scope("output_rank_{}".format(output_rank)):
one_hot_output = one_hot._to_dnn_input_layer(
id_tensor, output_rank=output_rank)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size])
self.assertEquals(expected_shape, list(one_hot_value.shape))
@@ -376,7 +376,7 @@ class FeatureColumnTest(test.TestCase):
one_hot_output_shape = one_hot_output.get_shape().as_list()
expected_shape = id_tensor_shape[:-1] + [vocab_size]
self.assertEquals(expected_shape, one_hot_output_shape)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
self.assertEquals(expected_shape, list(one_hot_value.shape))
@@ -399,7 +399,7 @@ class FeatureColumnTest(test.TestCase):
expected = np.array([[0., 1., 0., 0., 0., 0., 0., 1., 0.,
0.], [0., 1., 0., 0., 0., 0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
one_hot_value = sess.run(one_hot_output)
self.assertTrue(np.array_equal(one_hot_value, expected))
@@ -440,7 +440,7 @@ class FeatureColumnTest(test.TestCase):
}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[2., 6., 0.]], one_hot_tensor.eval())
@@ -451,7 +451,7 @@ class FeatureColumnTest(test.TestCase):
features = {"ids": constant_op.constant([["marlo", "unknown", "omar"]])}
one_hot_tensor = feature_column_ops.input_from_feature_columns(
features, [one_hot])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
self.assertAllEqual([[1., 1., 0.]], one_hot_tensor.eval())
@@ -603,7 +603,7 @@ class FeatureColumnTest(test.TestCase):
real_valued_output = real_valued_column._to_dnn_input_layer(
constant_op.constant(real_valued_input, dtype=dtypes.float32),
output_rank=output_rank)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
real_valued_eval = sess.run(real_valued_output)
expected_shape = (
input_shape[:output_rank - 1] +
@@ -797,7 +797,7 @@ class FeatureColumnTest(test.TestCase):
sparse_column.insert_transformed_feature(features)
sparse_output = features[sparse_column]
expected_shape = [batch_size, 1]
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sparse_result = sess.run(sparse_output)
self.assertEquals(expected_shape, list(sparse_result.dense_shape))
@@ -1110,7 +1110,7 @@ class FeatureColumnTest(test.TestCase):
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
saved_embedding = embeddings.eval()
save.save(sess, checkpoint_path)
@@ -1131,7 +1131,7 @@ class FeatureColumnTest(test.TestCase):
embedding_col_initialized: input_tensor
}, [embedding_col_initialized])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_embedding = pretrained_embeddings.eval()
@@ -1176,7 +1176,7 @@ class FeatureColumnTest(test.TestCase):
ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(assign_op)
saved_col_weights = col_weights[crossed_col][0].eval()
@@ -1201,7 +1201,7 @@ class FeatureColumnTest(test.TestCase):
}, [crossed_col_initialized], 1))
col_weights_from_ckpt = col_weights[crossed_col_initialized][0]
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loaded_col_weights = col_weights_from_ckpt.eval()
diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py
index 52c9c4f3be..85af9de4e4 100644
--- a/tensorflow/contrib/layers/python/layers/layers_test.py
+++ b/tensorflow/contrib/layers/python/layers/layers_test.py
@@ -281,7 +281,7 @@ class BiasAddTest(test.TestCase):
def testCreate(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.bias_add(images)
self.assertEqual(output.op.name, 'BiasAdd/BiasAdd')
@@ -289,7 +289,7 @@ class BiasAddTest(test.TestCase):
def testCreateWithActivation(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.bias_add(images, activation_fn=nn_ops.relu)
self.assertEqual(output.op.name, 'BiasAdd/Relu')
@@ -298,7 +298,7 @@ class BiasAddTest(test.TestCase):
def testCreateDimensions(self):
dims = (2, 3, 4)
shape = [5, 2, 3, 4]
- with self.test_session():
+ with self.cached_session():
for d in dims:
input_shape = shape[:d]
inputs = random_ops.random_uniform(input_shape, seed=1)
@@ -311,7 +311,7 @@ class BiasAddTest(test.TestCase):
class ConvolutionTest(test.TestCase):
def testInvalidShape(self):
- with self.test_session():
+ with self.cached_session():
images_2d = random_ops.random_uniform((5, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 5, got 4'):
@@ -323,14 +323,14 @@ class ConvolutionTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -342,7 +342,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvNCHW(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32)
output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -354,7 +354,7 @@ class ConvolutionTest(test.TestCase):
def testCreateSquareConv(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -362,7 +362,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvWithTensorShape(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -370,7 +370,7 @@ class ConvolutionTest(test.TestCase):
def testCreateFullyConv(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
output = layers_lib.convolution2d(
images, 64, images.get_shape()[1:3], padding='VALID')
@@ -381,7 +381,7 @@ class ConvolutionTest(test.TestCase):
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
@@ -395,7 +395,7 @@ class ConvolutionTest(test.TestCase):
def testCreateVerticalConv(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -407,7 +407,7 @@ class ConvolutionTest(test.TestCase):
def testCreateHorizontalConv(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -417,7 +417,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvWithStride(self):
height, width = 6, 8
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -427,7 +427,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
@@ -436,7 +436,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvWithScope(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
@@ -453,14 +453,14 @@ class ConvolutionTest(test.TestCase):
def testCreateConvWithoutActivation(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
@@ -468,7 +468,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(weight_decay)
layers_lib.convolution2d(
@@ -481,7 +481,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvNoRegularizers(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(
@@ -489,7 +489,7 @@ class ConvolutionTest(test.TestCase):
def testReuseVars(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
@@ -498,7 +498,7 @@ class ConvolutionTest(test.TestCase):
def testNonReuseVars(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 2)
@@ -507,7 +507,7 @@ class ConvolutionTest(test.TestCase):
def testReuseConvWithWD(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
with arg_scope(
@@ -523,7 +523,7 @@ class ConvolutionTest(test.TestCase):
def testConvWithBatchNorm(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
@@ -539,7 +539,7 @@ class ConvolutionTest(test.TestCase):
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
@@ -557,7 +557,7 @@ class ConvolutionTest(test.TestCase):
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')
@@ -573,7 +573,7 @@ class ConvolutionTest(test.TestCase):
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -587,7 +587,7 @@ class ConvolutionTest(test.TestCase):
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -601,7 +601,7 @@ class ConvolutionTest(test.TestCase):
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -612,7 +612,7 @@ class ConvolutionTest(test.TestCase):
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
- with self.test_session():
+ with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
@@ -651,7 +651,7 @@ class ConvolutionTest(test.TestCase):
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
- with self.test_session():
+ with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
@@ -670,7 +670,7 @@ class ConvolutionTest(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -688,7 +688,7 @@ class ConvolutionTest(test.TestCase):
padding='VALID',
activation_fn=None,
scope='conv7')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -712,7 +712,7 @@ class Convolution2dTransposeTests(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
@@ -915,7 +915,7 @@ class Convolution2dTransposeTests(test.TestCase):
images, num_filters, [3, 3], stride=1, padding='SAME')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -929,7 +929,7 @@ class Convolution2dTransposeTests(test.TestCase):
images, num_filters, [3, 3], stride=1, padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -944,7 +944,7 @@ class Convolution2dTransposeTests(test.TestCase):
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -958,7 +958,7 @@ class Convolution2dTransposeTests(test.TestCase):
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -971,7 +971,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -984,7 +984,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -997,7 +997,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1010,7 +1010,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1023,7 +1023,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1036,7 +1036,7 @@ class Convolution2dTransposeTests(test.TestCase):
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1083,7 +1083,7 @@ class Convolution2dTransposeTests(test.TestCase):
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), expected_size)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
@@ -1095,7 +1095,7 @@ class Convolution2dTransposeTests(test.TestCase):
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 18, 22, num_filters]
- with self.test_session():
+ with self.cached_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
@@ -1116,7 +1116,7 @@ class Convolution2dTransposeTests(test.TestCase):
images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
self.assertEqual(output.op.name, 'conv7/Relu')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1135,7 +1135,7 @@ class Convolution2dTransposeTests(test.TestCase):
scope='conv7')
self.assertEqual(output.op.name, 'conv7/BiasAdd')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
@@ -1146,7 +1146,7 @@ class Convolution2dTransposeTests(test.TestCase):
stride = 2
padding = 'VALID'
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(input_size, seed=1)
output_deconv = layers_lib.conv2d_transpose(
images,
@@ -1184,7 +1184,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
expected = np.zeros((1, 10, 9, 1))
@@ -1201,7 +1201,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(
horz_gradients, feed_dict={
@@ -1225,7 +1225,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
@@ -1245,7 +1245,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
@@ -1267,7 +1267,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
@@ -1283,7 +1283,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
@@ -1306,7 +1306,7 @@ class ConvolutionInPlaneTest(test.TestCase):
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
@@ -1314,7 +1314,7 @@ class ConvolutionInPlaneTest(test.TestCase):
def testConv1dShape(self):
width = 7
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, width, 3), seed=1)
output = layers_lib.convolution1d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
@@ -1322,7 +1322,7 @@ class ConvolutionInPlaneTest(test.TestCase):
def testConvInferSpatialDims(self):
depth, height, width = 7, 9, 11
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, width, 4)).astype(np.float32)
output = layers_lib.convolution(images, 32, [3])
self.assertListEqual(output.get_shape().as_list(), [5, width, 32])
@@ -1344,7 +1344,7 @@ class DenseToSparseTest(test.TestCase):
sparse = _layers.dense_to_sparse(tensor)
dense = sparse_ops.sparse_to_dense(sparse.indices, sparse.dense_shape,
sparse.values)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
constant = sess.run(dense)
self.assertAllEqual(expected_constant, constant)
@@ -1353,7 +1353,7 @@ class DropoutTest(test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.dropout(images)
self.assertEqual(output.op.name, 'Dropout/dropout_1/mul')
@@ -1362,7 +1362,7 @@ class DropoutTest(test.TestCase):
def testCreateDropoutWithConstantTrue(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
is_training = constant_op.constant(True)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
@@ -1370,7 +1370,7 @@ class DropoutTest(test.TestCase):
def testCreateDropoutWithConstantFalse(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
is_training = constant_op.constant(False)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
@@ -1378,7 +1378,7 @@ class DropoutTest(test.TestCase):
def testCreateDropoutWithPlaceholder(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
is_training = array_ops.placeholder(dtype=dtypes.bool, shape=[])
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
@@ -1387,7 +1387,7 @@ class DropoutTest(test.TestCase):
def testCollectOutputs(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
@@ -1396,7 +1396,7 @@ class DropoutTest(test.TestCase):
def testDropout(self):
height, width = 10, 10
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
@@ -1409,7 +1409,7 @@ class DropoutTest(test.TestCase):
def testDropoutSeed(self):
"""Test that providing the same seed produces the same result."""
height, width = 10, 10
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output1 = _layers.dropout(images, seed=1)
@@ -1418,7 +1418,7 @@ class DropoutTest(test.TestCase):
def testCreateDropoutNoTraining(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
@@ -1431,7 +1431,7 @@ class DropoutTest(test.TestCase):
def testCreateFCFollowByDropout(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(images, 50)
@@ -1445,7 +1445,7 @@ class DropoutTest(test.TestCase):
def testCreateFCWithDropout(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(
@@ -1475,7 +1475,7 @@ class FlattenTest(test.TestCase):
def testCollectOutputs(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.flatten(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
@@ -1484,7 +1484,7 @@ class FlattenTest(test.TestCase):
def testFlatten4D(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.flatten(images)
@@ -1494,7 +1494,7 @@ class FlattenTest(test.TestCase):
def testFlatten3D(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width), seed=1, name='images')
output = _layers.flatten(images)
@@ -1504,7 +1504,7 @@ class FlattenTest(test.TestCase):
def testFlattenBatchSize(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, height, width, 3))
@@ -1516,7 +1516,7 @@ class FlattenTest(test.TestCase):
def testUnknownDims(self):
height = width = depth = 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, depth), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, None, None, None))
@@ -1551,7 +1551,7 @@ class PartialFlattenTest(test.TestCase):
flattened_t = _layers._inner_flatten(inputs, new_rank)
static_shape = flattened_t.get_shape().as_list()
self.assertEqual(static_shape, expected_new_shape)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_flattened, flattened)
@@ -1571,7 +1571,7 @@ class PartialFlattenTest(test.TestCase):
flattened_t = _layers._inner_flatten(inputs_t, new_rank)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_indices, flattened.indices)
@@ -1641,7 +1641,7 @@ class FCTest(test.TestCase):
def testCreateFCWithScope(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(output.op.name, 'fc1/Relu')
@@ -1659,7 +1659,7 @@ class FCTest(test.TestCase):
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
_layers.fully_connected(inputs, 32, scope='fc1')
@@ -1669,7 +1669,7 @@ class FCTest(test.TestCase):
def testReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
- with self.test_session():
+ with self.cached_session():
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(len(variables.get_variables('fc1')), 2)
_layers.fully_connected(inputs, 32, scope='fc1', reuse=True)
@@ -1678,7 +1678,7 @@ class FCTest(test.TestCase):
def testNonReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
- with self.test_session():
+ with self.cached_session():
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 2)
_layers.fully_connected(inputs, 32)
@@ -1713,14 +1713,14 @@ class FCTest(test.TestCase):
def testCreateFCWithoutActivation(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, activation_fn=None)
self.assertEqual(output.op.name, 'fully_connected/BiasAdd')
def testCreateFCWithWD(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, weights_regularizer=weight_decay)
@@ -1732,7 +1732,7 @@ class FCTest(test.TestCase):
def testCreateFCWithBD(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
bias_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, biases_regularizer=bias_decay)
@@ -1744,7 +1744,7 @@ class FCTest(test.TestCase):
def testCreateNoRegularizers(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(inputs, 32)
self.assertEqual(
@@ -1752,7 +1752,7 @@ class FCTest(test.TestCase):
def testReuseFCWithWD(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(
@@ -1768,7 +1768,7 @@ class FCTest(test.TestCase):
def testFCWithBatchNorm(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
@@ -1786,7 +1786,7 @@ class FCTest(test.TestCase):
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
@@ -1844,7 +1844,7 @@ class BatchNormTest(test.TestCase):
if dtype is None:
dtype = dtypes.float32
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(
dtype.as_numpy_dtype)
output = _layers.batch_norm(images, fused=fused)
@@ -1866,7 +1866,7 @@ class BatchNormTest(test.TestCase):
def _testCreateOpBetaRegularizer(self, fused=True):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(images, param_regularizers={'beta': reg}, fused=fused)
@@ -1883,7 +1883,7 @@ class BatchNormTest(test.TestCase):
def _testCreateOpGammaRegularizer(self, fused=True):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
_layers.batch_norm(
@@ -1901,7 +1901,7 @@ class BatchNormTest(test.TestCase):
def testCreateVariables(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
@@ -1915,7 +1915,7 @@ class BatchNormTest(test.TestCase):
def testMovingAverageVariables(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
self.assertEqual(len(variables.get_model_variables()), 4)
@@ -1926,7 +1926,7 @@ class BatchNormTest(test.TestCase):
def testMovingAverageVariablesZeroDebias(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(
images, scale=True, zero_debias_moving_mean=True, fused=False)
@@ -1943,7 +1943,7 @@ class BatchNormTest(test.TestCase):
def testUpdatesCollection(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, updates_collections='my_update_ops')
update_layers = ops.get_collection('my_update_ops')
@@ -1971,7 +1971,7 @@ class BatchNormTest(test.TestCase):
def testReuseVariables(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True, scope='bn')
_layers.batch_norm(images, scale=True, scope='bn', reuse=True)
@@ -1986,7 +1986,7 @@ class BatchNormTest(test.TestCase):
def testReuseUpdateOps(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with arg_scope([_layers.batch_norm], updates_collections='update_ops'):
_layers.batch_norm(images, scope='bn')
@@ -1996,7 +1996,7 @@ class BatchNormTest(test.TestCase):
def testCreateMovingVars(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_ = _layers.batch_norm(images)
moving_mean = variables.get_variables('BatchNorm/moving_mean')
@@ -2029,7 +2029,7 @@ class BatchNormTest(test.TestCase):
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertAllClose(local_step.eval(), 0)
self.assertAllClose(moving_mean.eval(), [0] * channels)
@@ -2213,7 +2213,7 @@ class BatchNormTest(test.TestCase):
def _testEvalMovingVars(self, zero_debias_moving_mean=False):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
@@ -2264,7 +2264,7 @@ class BatchNormTest(test.TestCase):
height, width = 3, 3
batch_size = 10
channels = 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
image_shape = (batch_size, height, width, channels)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
@@ -2435,7 +2435,7 @@ class BatchNormTest(test.TestCase):
def testNoUpdatesWhenIsTrainingFalse(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
@@ -2460,7 +2460,7 @@ class BatchNormTest(test.TestCase):
def testNoneUpdatesCollectionNoTraining(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
@@ -2647,7 +2647,7 @@ class BatchNormTest(test.TestCase):
def testCustomInitializer(self):
height, width = 3, 3
channels = 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = (np.ones((5, height, width, channels)) * 9.0).astype('f')
beta = init_ops.constant_initializer(
(np.ones(channels) * 5.0).astype('f'))
@@ -2728,7 +2728,7 @@ class BatchNormTest(test.TestCase):
def testBatchNormBeta(self):
# Test case for 11673
- with self.test_session() as sess:
+ with self.cached_session() as sess:
a_32 = array_ops.placeholder(dtypes.float32, shape=(10, 10, 10, 10))
_layers.batch_norm(
a_32, center=False, data_format='NCHW', zero_debias_moving_mean=True)
@@ -2739,7 +2739,7 @@ class BatchNormTest(test.TestCase):
def testVariablesAreFloat32(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float16)
_layers.batch_norm(images, scale=True)
@@ -2824,7 +2824,7 @@ class LayerNormTest(test.TestCase):
def testCreateOp(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.layer_norm(images)
self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))
@@ -2832,7 +2832,7 @@ class LayerNormTest(test.TestCase):
def testCreateVariables(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images)
beta = variables.get_variables_by_name('beta')[0]
@@ -2842,7 +2842,7 @@ class LayerNormTest(test.TestCase):
def testReuseVariables(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images, scope='ln')
_layers.layer_norm(images, scope='ln', reuse=True)
@@ -2853,7 +2853,7 @@ class LayerNormTest(test.TestCase):
def testReuseVars(self):
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
@@ -2940,7 +2940,7 @@ class GDNTest(test.TestCase):
def _runGDN(self, x, shape, inverse, data_format):
inputs = array_ops.placeholder(dtypes.float32, shape)
outputs = _layers.gdn(inputs, inverse=inverse, data_format=data_format)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
y, = sess.run([outputs], {inputs: x})
return y
@@ -3152,14 +3152,14 @@ class MaxPool3DTest(test.TestCase):
class OneHotEncodingTest(test.TestCase):
def testOneHotEncodingCreate(self):
- with self.test_session():
+ with self.cached_session():
labels = np.array([0, 1, 2])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertEqual(output.op.name, 'OneHotEncoding/one_hot')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testCollectOutputs(self):
- with self.test_session():
+ with self.cached_session():
labels = constant_op.constant([0, 1, 2])
output = _layers.one_hot_encoding(
labels, num_classes=3, outputs_collections='outputs')
@@ -3168,14 +3168,14 @@ class OneHotEncodingTest(test.TestCase):
self.assertEqual(c_output, output)
def testOneHotEncoding(self):
- with self.test_session():
+ with self.cached_session():
labels = constant_op.constant([0, 1, 2])
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
def testOneHotEncodingInt32(self):
- with self.test_session():
+ with self.cached_session():
labels = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
@@ -3186,7 +3186,7 @@ class RepeatTests(test.TestCase):
def testRepeat(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
self.assertEqual(output.op.name, 'Repeat/convolution2d_3/Relu')
@@ -3194,7 +3194,7 @@ class RepeatTests(test.TestCase):
def testRepeatWithScope(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.repeat(
@@ -3207,7 +3207,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvInt32(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.int32, maxval=12345)
with self.assertRaisesRegexp(TypeError, 'non-floating point type'):
@@ -3215,7 +3215,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvFloat32(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float32)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 2)
@@ -3224,7 +3224,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateDepthwiseConv(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, None, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
@@ -3233,7 +3233,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
@@ -3245,7 +3245,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateAtrousConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
@@ -3257,7 +3257,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
- with self.test_session():
+ with self.cached_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
@@ -3268,14 +3268,14 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvWithScope(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 6, scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 8, activation_fn=None)
@@ -3283,7 +3283,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvValid(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID')
@@ -3291,7 +3291,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateAtrousConvValid(self):
height, width = 5, 5
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID', rate=2)
@@ -3299,7 +3299,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateDepthwiseConvValid(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID')
@@ -3307,7 +3307,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateAtrousDepthwiseConvValid(self):
height, width = 5, 5
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID', rate=2)
@@ -3316,7 +3316,7 @@ class SeparableConv2dTest(test.TestCase):
def testCreateConvWithWeightDecay(self):
random_seed.set_random_seed(0)
height, width = 3, 3
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
@@ -3360,7 +3360,7 @@ class SeparableConv2dTest(test.TestCase):
def testReuseConvWithWeightDecay(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
@@ -3419,7 +3419,7 @@ class SeparableConv2dTest(test.TestCase):
normalizer_params={},
scope='conv1')
init_op = variables_lib.global_variables_initializer()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
images = np.random.rand(5, height, width, 3)
sess.run(init_op)
sess.run(net, feed_dict={images_placeholder: images})
@@ -3440,7 +3440,7 @@ class SeparableConv2dTest(test.TestCase):
def testSepConvNCHW(self):
for num_filters, correct_output_filters in zip((None, 5), (6, 5)):
- with self.test_session():
+ with self.cached_session():
batch, height, width = 4, 10, 12
kernel_dim, stride = 3, 2
images = random_ops.random_uniform((batch, 3, height, width), seed=1)
@@ -3462,7 +3462,7 @@ class ScaleGradientTests(test.TestCase):
"""Simple tests of the scale_gradient function."""
def testBasic(self):
- with self.test_session():
+ with self.cached_session():
x = np.array([42], np.float32)
gradient_scale = np.array([2], np.float32)
@@ -3513,7 +3513,7 @@ class SoftmaxTests(test.TestCase):
exp_prediction = np.array([[self.low, self.high], [0.5, 0.5],
[self.high, self.low]])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
@@ -3529,7 +3529,7 @@ class SoftmaxTests(test.TestCase):
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
@@ -3547,7 +3547,7 @@ class SoftmaxTests(test.TestCase):
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logit_placeholder)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
prediction = sess.run(prediction, feed_dict=feed_dict)
self.assertAllClose(exp_prediction, prediction)
@@ -3575,7 +3575,7 @@ class SpatialSoftmaxTests(test.TestCase):
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3586,7 +3586,7 @@ class SpatialSoftmaxTests(test.TestCase):
features = array_ops.placeholder(dtypes.float32, shape=batch_shape)
np_features = np.zeros(batch_shape, dtype=np.float32)
spatial_softmax = _layers.spatial_softmax(features, data_format='NCHW')
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3613,7 +3613,7 @@ class SpatialSoftmaxTests(test.TestCase):
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3637,7 +3637,7 @@ class SpatialSoftmaxTests(test.TestCase):
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3669,7 +3669,7 @@ class SpatialSoftmaxTests(test.TestCase):
batch_size, nchannels)
# Make sure expected location keypoints matches actual location keypoints.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features1}
tf_keypoints1 = sess.run(spatial_softmax, feed_dict)
@@ -3696,7 +3696,7 @@ class SpatialSoftmaxTests(test.TestCase):
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3719,7 +3719,7 @@ class SpatialSoftmaxTests(test.TestCase):
nchannels)
# Make sure expected location keypoints matches actual location keypoints.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
keypoints = sess.run(spatial_softmax, feed_dict)
@@ -3731,7 +3731,7 @@ class SpatialSoftmaxTests(test.TestCase):
spatial_softmax = _layers.spatial_softmax(features)
net = _layers.fully_connected(spatial_softmax, 10)
np_features = np.zeros(batch_shape, dtype=np.float32)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables_lib.global_variables_initializer())
feed_dict = {features: np_features}
sess.run(net, feed_dict)
@@ -3741,7 +3741,7 @@ class StackTests(test.TestCase):
def testStackFullyConnected(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = np.random.uniform(size=(5, height * width * 3))
output = _layers.stack(images, _layers.fully_connected, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
@@ -3749,7 +3749,7 @@ class StackTests(test.TestCase):
def testStackFullyConnectedFailOnReuse(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
with variable_scope.variable_scope('test', reuse=True):
images = np.random.uniform(size=(5, height * width * 3))
with self.assertRaises(ValueError):
@@ -3757,7 +3757,7 @@ class StackTests(test.TestCase):
def testStackRelu(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.relu, [10, 20, 30])
@@ -3766,7 +3766,7 @@ class StackTests(test.TestCase):
def testStackElu(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.elu, [10, 20, 30])
@@ -3775,7 +3775,7 @@ class StackTests(test.TestCase):
def testStackConvolution2d(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
@@ -3788,7 +3788,7 @@ class StackTests(test.TestCase):
def testStackWithScope(self):
height, width = 3, 3
- with self.test_session():
+ with self.cached_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
@@ -3817,7 +3817,7 @@ class UnitNormTests(test.TestCase):
del shape[dim]
expected = np.ones(shape)
- with self.test_session():
+ with self.cached_session():
actual = norms.eval()
self.assertAllClose(expected, actual, 1e-4, 1e-4)
@@ -3849,7 +3849,7 @@ class UnitNormTests(test.TestCase):
norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), reduction_indices=dim))
- with self.test_session():
+ with self.cached_session():
actual = norms.eval({image: placeholder_value})
self.assertAllClose(expected, actual, 1e-4, 1e-4)
@@ -3875,7 +3875,7 @@ class PoincareNormalizeTest(test.TestCase):
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._PoincareNormalize(x_np, dim, epsilon)
- with self.test_session():
+ with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
@@ -3893,7 +3893,7 @@ class PoincareNormalizeTest(test.TestCase):
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._PoincareNormalize(x_np, dim, epsilon)
- with self.test_session():
+ with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim, epsilon)
y_tf_eval = y_tf.eval()
@@ -3908,7 +3908,7 @@ class PoincareNormalizeTest(test.TestCase):
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
- with self.test_session():
+ with self.cached_session():
x_tf = constant_op.constant(x_np, name='x')
y_tf = _layers.poincare_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
@@ -4117,7 +4117,7 @@ class LegacyFullyConnectedTest(test.TestCase):
# Empty x is common if someone masks their input with tf.boolean_mask in
# order to drop missing entries, and in a particular batch all entries are
# missing.
- with self.test_session():
+ with self.cached_session():
x = np.array([]).reshape(0, 3)
self.assertEqual(0, array_ops.size(x).eval())
y = _layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
@@ -4131,7 +4131,7 @@ class LegacyFullyConnectedTest(test.TestCase):
y = _layers.legacy_fully_connected(x, 1)
# in the output we still only know the 2nd and 3rd dimensions statically.
self.assertEqual(y.get_shape().as_list(), [None, 4, 1])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
# we can feed in input with first dimension 2
shape_value = sess.run(
@@ -4162,7 +4162,7 @@ class LegacyFullyConnectedTest(test.TestCase):
self._unknown_dim_invalid_input(last_dim=None)
def test_1d_invalid_input(self):
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'rank of x must be at least 2 not: 1'):
x = constant_op.constant([[]], shape=[0])
diff --git a/tensorflow/contrib/layers/python/layers/normalization_test.py b/tensorflow/contrib/layers/python/layers/normalization_test.py
index 55272e5fd1..c8d3c91b10 100644
--- a/tensorflow/contrib/layers/python/layers/normalization_test.py
+++ b/tensorflow/contrib/layers/python/layers/normalization_test.py
@@ -106,7 +106,7 @@ class InstanceNormTest(test.TestCase):
images = random_ops.random_uniform(image_shape, seed=1)
output_train = normalization.instance_norm(images, scope='IN')
output_eval = normalization.instance_norm(images, scope='IN', reuse=True)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
# output_train and output_eval should be the same.
train_np, eval_np = sess.run([output_train, output_eval])
@@ -130,7 +130,7 @@ class InstanceNormTest(test.TestCase):
inputs = random_ops.random_uniform(input_shape, seed=0) * sigma + mu
output_op = normalization.instance_norm(
inputs, center=False, scale=False, data_format=data_format)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = sess.run(output_op)
# Make sure that there are no NaNs
@@ -287,7 +287,7 @@ class GroupNormTest(test.TestCase):
output_train = normalization.group_norm(images, groups=2, scope='IN')
output_eval = normalization.group_norm(images, groups=2, scope='IN',
reuse=True)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
# output_train and output_eval should be the same.
train_np, eval_np = sess.run([output_train, output_eval])
@@ -349,7 +349,7 @@ class GroupNormTest(test.TestCase):
channels_axis=channels_axis,
reduction_axes=reduction_axes,
mean_close_to_zero=mean_close_to_zero)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = sess.run(output_op)
# Make sure that there are no NaNs
diff --git a/tensorflow/contrib/layers/python/layers/optimizers_test.py b/tensorflow/contrib/layers/python/layers/optimizers_test.py
index 0f037e24ad..29dede2a49 100644
--- a/tensorflow/contrib/layers/python/layers/optimizers_test.py
+++ b/tensorflow/contrib/layers/python/layers/optimizers_test.py
@@ -165,7 +165,7 @@ class OptimizersTest(test.TestCase):
def testGradientNoise(self):
random_seed.set_random_seed(42)
- with self.test_session() as session:
+ with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
@@ -182,7 +182,7 @@ class OptimizersTest(test.TestCase):
def testGradientNoiseWithClipping(self):
random_seed.set_random_seed(42)
- with self.test_session() as session:
+ with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
@@ -198,7 +198,7 @@ class OptimizersTest(test.TestCase):
self.assertEqual(global_step_value, 1)
def testGradientClip(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
@@ -213,7 +213,7 @@ class OptimizersTest(test.TestCase):
self.assertEqual(global_step_value, 1)
def testAdaptiveGradientClip(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
clip_gradients = optimizers_lib.adaptive_clipping_fn()
train = optimizers_lib.optimize_loss(
@@ -234,7 +234,7 @@ class OptimizersTest(test.TestCase):
self.assertEqual(2, var_count)
def testGradientMultiply(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
@@ -433,7 +433,7 @@ class OptimizersTest(test.TestCase):
class AdaptiveClipping(test.TestCase):
def testAverages(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
scale = 2.
grad = array_ops.ones([3, 4]) * scale
log_norm = np.log(np.sqrt(scale**2 * grad.get_shape().num_elements()))
@@ -463,7 +463,7 @@ class AdaptiveClipping(test.TestCase):
self.assertAlmostEqual(float(sq_mean), log_norm**2, places=4)
def testClip(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
spike = 1000.
multiplier = array_ops.placeholder(dtypes.float32, [], "multiplier")
step = array_ops.placeholder(dtypes.int32, [], "step")
diff --git a/tensorflow/contrib/layers/python/layers/regularizers_test.py b/tensorflow/contrib/layers/python/layers/regularizers_test.py
index 07191eeda7..51faba30c7 100644
--- a/tensorflow/contrib/layers/python/layers/regularizers_test.py
+++ b/tensorflow/contrib/layers/python/layers/regularizers_test.py
@@ -71,7 +71,7 @@ class RegularizerTest(test.TestCase):
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0.5, 0)
- with self.test_session():
+ with self.cached_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
@@ -84,7 +84,7 @@ class RegularizerTest(test.TestCase):
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(0.0, 1.0)(tensor)
- with self.test_session():
+ with self.cached_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
@@ -93,7 +93,7 @@ class RegularizerTest(test.TestCase):
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(1.0, 0.0)(tensor)
- with self.test_session():
+ with self.cached_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
@@ -104,7 +104,7 @@ class RegularizerTest(test.TestCase):
self.assertEquals(loss, None)
def testL1L2RegularizerWithScope(self):
- with self.test_session():
+ with self.cached_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
@@ -142,7 +142,7 @@ class RegularizerTest(test.TestCase):
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
expected = sum([2 * x for l in array_weights_list for x in l])
- with self.test_session():
+ with self.cached_session():
result = regularizers.apply_regularization(dummy_regularizer,
tensor_weights_list)
self.assertAllClose(expected, result.eval())
@@ -151,7 +151,7 @@ class RegularizerTest(test.TestCase):
regularizer = regularizers.l2_regularizer(0.0)
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
- with self.test_session():
+ with self.cached_session():
result = regularizers.apply_regularization(regularizer,
tensor_weights_list)
self.assertAllClose(0.0, result.eval())
@@ -161,7 +161,7 @@ class RegularizerTest(test.TestCase):
tensor_weights_list = [
constant_op.constant(x) for x in [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
]
- with self.test_session():
+ with self.cached_session():
with self.assertRaises(ValueError):
regularizers.apply_regularization(non_scalar_regularizer,
tensor_weights_list)
diff --git a/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py b/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
index c34b5a8017..2c7463acc0 100644
--- a/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
+++ b/tensorflow/contrib/layers/python/layers/rev_block_lib_test.py
@@ -58,7 +58,7 @@ class RevBlockTest(test.TestCase):
y1, y2 = block.forward(x1, x2)
x1_inv, x2_inv = block.backward(y1, y2)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
x1, x2, x1_inv, x2_inv = sess.run([x1, x2, x1_inv, x2_inv])
@@ -81,7 +81,7 @@ class RevBlockTest(test.TestCase):
x1, x2 = block.backward(y1, y2)
y1_inv, y2_inv = block.forward(x1, x2)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
y1, y2, y1_inv, y2_inv = sess.run([y1, y2, y1_inv, y2_inv])
@@ -151,7 +151,7 @@ class RevBlockTest(test.TestCase):
grads_rev = gradients_impl.gradients(loss_rev, wrt)
grads = gradients_impl.gradients(loss, wrt)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
y_val, yd_val, gd_val, g_val = sess.run([y, y_rev, grads_rev, grads])
self.assertAllClose(y_val, yd_val)
@@ -286,7 +286,7 @@ class RecomputeTest(test.TestCase):
for out, scope_vars in outputs_and_vars:
all_grads.append(gradients_impl.gradients(out, scope_vars))
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = list(zip(*outputs_and_vars))[0]
outs, all_grads_val = sess.run([outputs, all_grads])
@@ -389,7 +389,7 @@ class RecomputeTest(test.TestCase):
layer_list.append(math_ops.sqrt(concat_n_wrap(*layer_list)))
grads = gradients_impl.gradients(layer_list[-1], layer_list[0])
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(grads)
def testErrorOnClosedOverTensor(self):
diff --git a/tensorflow/contrib/layers/python/layers/summaries_test.py b/tensorflow/contrib/layers/python/layers/summaries_test.py
index a1ef06feec..2ec2af9d44 100644
--- a/tensorflow/contrib/layers/python/layers/summaries_test.py
+++ b/tensorflow/contrib/layers/python/layers/summaries_test.py
@@ -29,19 +29,19 @@ from tensorflow.python.platform import test
class SummariesTest(test.TestCase):
def test_summarize_scalar_tensor(self):
- with self.test_session():
+ with self.cached_session():
scalar_var = variables.Variable(1)
summary_op = summaries_lib.summarize_tensor(scalar_var)
self.assertEquals(summary_op.op.type, 'ScalarSummary')
def test_summarize_multidim_tensor(self):
- with self.test_session():
+ with self.cached_session():
tensor_var = variables.Variable([1, 2, 3])
summary_op = summaries_lib.summarize_tensor(tensor_var)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
def test_summarize_activation(self):
- with self.test_session():
+ with self.cached_session():
var = variables.Variable(1)
op = array_ops.identity(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
@@ -52,7 +52,7 @@ class SummariesTest(test.TestCase):
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu(self):
- with self.test_session():
+ with self.cached_session():
var = variables.Variable(1)
op = nn_ops.relu(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
@@ -64,7 +64,7 @@ class SummariesTest(test.TestCase):
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu6(self):
- with self.test_session():
+ with self.cached_session():
var = variables.Variable(1)
op = nn_ops.relu6(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
@@ -77,7 +77,7 @@ class SummariesTest(test.TestCase):
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_collection_regex(self):
- with self.test_session():
+ with self.cached_session():
var = variables.Variable(1)
array_ops.identity(var, name='Test1')
ops.add_to_collection('foo', array_ops.identity(var, name='Test2'))
diff --git a/tensorflow/contrib/layers/python/layers/utils_test.py b/tensorflow/contrib/layers/python/layers/utils_test.py
index a9bd89532a..34f63f5d86 100644
--- a/tensorflow/contrib/layers/python/layers/utils_test.py
+++ b/tensorflow/contrib/layers/python/layers/utils_test.py
@@ -42,7 +42,7 @@ class ConstantValueTest(test.TestCase):
c = constant_op.constant(v)
value = utils.constant_value(c)
self.assertEqual(value, v)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(c.eval(), v)
def test_variable(self):
@@ -60,7 +60,7 @@ class ConstantValueTest(test.TestCase):
x = array_ops.identity(p)
value = utils.constant_value(p)
self.assertEqual(value, None)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(x.eval(feed_dict={p: v}), v)
@@ -80,7 +80,7 @@ class StaticCondTest(test.TestCase):
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
@@ -89,7 +89,7 @@ class StaticCondTest(test.TestCase):
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
@@ -99,7 +99,7 @@ class StaticCondTest(test.TestCase):
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(), expected(v))
@@ -119,7 +119,7 @@ class SmartCondStaticTest(test.TestCase):
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
@@ -128,7 +128,7 @@ class SmartCondStaticTest(test.TestCase):
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
@@ -138,7 +138,7 @@ class SmartCondStaticTest(test.TestCase):
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(), expected(v))
@@ -151,7 +151,7 @@ class SmartCondDynamicTest(test.TestCase):
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_constant(self):
@@ -161,7 +161,7 @@ class SmartCondDynamicTest(test.TestCase):
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_variable(self):
@@ -171,7 +171,7 @@ class SmartCondDynamicTest(test.TestCase):
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
@@ -182,7 +182,7 @@ class SmartCondDynamicTest(test.TestCase):
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))