aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-09-10 14:37:06 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-10 15:04:14 -0700
commitb828f89263e054bfa7c7a808cab1506834ab906d (patch)
treee31816a6850d177306f19ee8670e0836060fcfc9 /tensorflow
parentacf0ee82092727afc2067316982407cf5e496f75 (diff)
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about: * the fact that the session may be reused. * the session is not closed even when doing a "with self.test_session()" statement. PiperOrigin-RevId: 212336464
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py4
-rw-r--r--tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py8
-rw-r--r--tensorflow/contrib/constrained_optimization/python/external_regret_optimizer_test.py4
-rw-r--r--tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py10
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/optimization/latency_all_edges_test.py2
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py4
-rw-r--r--tensorflow/contrib/eager/python/evaluator_test.py4
-rw-r--r--tensorflow/contrib/eager/python/metrics_test.py4
-rw-r--r--tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py18
-rw-r--r--tensorflow/contrib/framework/python/framework/tensor_util_test.py20
-rw-r--r--tensorflow/contrib/gan/python/losses/python/losses_impl_test.py52
-rw-r--r--tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py8
-rw-r--r--tensorflow/contrib/learn/python/learn/ops/ops_test.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/ops/seq2seq_ops_test.py6
-rw-r--r--tensorflow/contrib/specs/python/specs_test.py22
-rw-r--r--tensorflow/contrib/specs/python/summaries_test.py8
-rw-r--r--tensorflow/python/data/util/convert_test.py16
-rw-r--r--tensorflow/python/data/util/sparse_test.py2
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees_test.py16
-rw-r--r--tensorflow/python/estimator/canned/head_test.py208
-rw-r--r--tensorflow/python/estimator/inputs/numpy_io_test.py34
-rw-r--r--tensorflow/python/estimator/inputs/pandas_io_test.py24
-rw-r--r--tensorflow/python/training/checkpointable/tracking_test.py2
-rw-r--r--tensorflow/python/training/checkpointable/util_test.py2
24 files changed, 242 insertions, 242 deletions
diff --git a/tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py b/tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
index 4278a30ba9..46dfbdefeb 100644
--- a/tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
+++ b/tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
@@ -331,7 +331,7 @@ class PredictionOpsTest(test_util.TensorFlowTestCase):
self.assertAllEqual([[], []], dropout_info.eval())
def testObliviousEnsemble(self):
- with self.test_session():
+ with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
@@ -1399,7 +1399,7 @@ class PartitionExamplesOpsTest(test_util.TensorFlowTestCase):
self.assertAllEqual([0, 0], result.eval())
def testObliviousTreeNonFinalized(self):
- with self.test_session():
+ with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
diff --git a/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py b/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py
index b3e4c2e5f7..86fd5770a0 100644
--- a/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py
+++ b/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py
@@ -411,7 +411,7 @@ class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
def testGrowEmptyEnsembleObliviousCase(self):
"""Test growing an empty ensemble in the oblivious case."""
- with self.test_session() as session:
+ with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
@@ -1620,7 +1620,7 @@ class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
def testGrowEnsembleTreeLayerByLayerObliviousCase(self):
"""Test growing an existing ensemble with the last tree not finalized."""
- with self.test_session() as session:
+ with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
@@ -1810,7 +1810,7 @@ class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
def testGrowEnsembleWithEmptyNodesMiddleCase(self):
"""Test case: The middle existing leaves don't have examples."""
- with self.test_session() as session:
+ with self.cached_session() as session:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
@@ -2071,7 +2071,7 @@ class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
def testGrowEnsembleWithEmptyNodesBorderCase(self):
"""Test case: The first and last existing leaves don't have examples."""
- with self.test_session() as session:
+ with self.cached_session() as session:
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(
"""
diff --git a/tensorflow/contrib/constrained_optimization/python/external_regret_optimizer_test.py b/tensorflow/contrib/constrained_optimization/python/external_regret_optimizer_test.py
index 9b4bf62710..3e25079e02 100644
--- a/tensorflow/contrib/constrained_optimization/python/external_regret_optimizer_test.py
+++ b/tensorflow/contrib/constrained_optimization/python/external_regret_optimizer_test.py
@@ -75,7 +75,7 @@ class ExternalRegretOptimizerTest(test.TestCase):
multipliers3 = standard_ops.constant([0.4, 0.7, -0.2, 0.5, 0.1])
expected_projected_multipliers3 = np.array([0.2, 0.5, 0.0, 0.3, 0.0])
- with self.test_session() as session:
+ with self.cached_session() as session:
projected_multipliers1 = session.run(
external_regret_optimizer._project_multipliers_wrt_euclidean_norm(
multipliers1, 1.0))
@@ -122,7 +122,7 @@ class ExternalRegretOptimizerTest(test.TestCase):
]
multipliers = []
- with self.test_session() as session:
+ with self.cached_session() as session:
session.run(standard_ops.global_variables_initializer())
while len(multipliers) < len(expected_multipliers):
multipliers.append(session.run(optimizer.lagrange_multipliers))
diff --git a/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py b/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py
index 34c4543dca..df0eced631 100644
--- a/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py
+++ b/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py
@@ -97,7 +97,7 @@ class SwapRegretOptimizerTest(test.TestCase):
matrix1 = np.matrix([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9], [0.4, 0.3, 0.0]])
matrix2 = np.matrix([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5], [0.4, 0.5, 0.3]])
- with self.test_session() as session:
+ with self.cached_session() as session:
eigenvector1 = session.run(
swap_regret_optimizer._maximal_eigenvector_power_method(
standard_ops.constant(matrix1)))
@@ -119,7 +119,7 @@ class SwapRegretOptimizerTest(test.TestCase):
expected_projected_matrix = np.array([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9],
[0.4, 0.3, 0.0]])
- with self.test_session() as session:
+ with self.cached_session() as session:
projected_matrix = session.run(
swap_regret_optimizer._project_stochastic_matrix_wrt_euclidean_norm(
matrix))
@@ -134,7 +134,7 @@ class SwapRegretOptimizerTest(test.TestCase):
expected_projected_matrix = np.array([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5],
[0.4, 0.5, 0.3]])
- with self.test_session() as session:
+ with self.cached_session() as session:
projected_matrix = session.run(
standard_ops.exp(
swap_regret_optimizer.
@@ -165,7 +165,7 @@ class SwapRegretOptimizerTest(test.TestCase):
]
matrices = []
- with self.test_session() as session:
+ with self.cached_session() as session:
session.run(standard_ops.global_variables_initializer())
while len(matrices) < len(expected_matrices):
matrices.append(session.run(optimizer.stochastic_matrix))
@@ -198,7 +198,7 @@ class SwapRegretOptimizerTest(test.TestCase):
]
matrices = []
- with self.test_session() as session:
+ with self.cached_session() as session:
session.run(standard_ops.global_variables_initializer())
while len(matrices) < len(expected_matrices):
matrices.append(session.run(optimizer.stochastic_matrix))
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimization/latency_all_edges_test.py b/tensorflow/contrib/data/python/kernel_tests/optimization/latency_all_edges_test.py
index 1850b6921a..db380c02a9 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimization/latency_all_edges_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimization/latency_all_edges_test.py
@@ -40,7 +40,7 @@ class OptimizeStatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase):
get_next = iterator.get_next()
summary_t = stats_aggregator.get_summary()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(iterator.initializer)
self.assertEqual(1 * 1, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py b/tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
index 6a7ef877f9..dde115925e 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py
@@ -74,7 +74,7 @@ class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase):
dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
for x in range(5):
result = sess.run(get_next)
r = x
@@ -131,7 +131,7 @@ class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
- with self.test_session() as sess:
+ with self.cached_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
diff --git a/tensorflow/contrib/eager/python/evaluator_test.py b/tensorflow/contrib/eager/python/evaluator_test.py
index 7d2274db9b..48d093e075 100644
--- a/tensorflow/contrib/eager/python/evaluator_test.py
+++ b/tensorflow/contrib/eager/python/evaluator_test.py
@@ -117,7 +117,7 @@ class EvaluatorTest(test.TestCase):
self.assertEqual(6.0, results["mean"].numpy())
def testDatasetGraph(self):
- with context.graph_mode(), ops.Graph().as_default(), self.test_session():
+ with context.graph_mode(), ops.Graph().as_default(), self.cached_session():
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
init_op, call_op, results_op = e.evaluate_on_dataset(ds)
@@ -126,7 +126,7 @@ class EvaluatorTest(test.TestCase):
self.assertEqual(6.0, results["mean"])
def testWriteSummariesGraph(self):
- with context.graph_mode(), ops.Graph().as_default(), self.test_session():
+ with context.graph_mode(), ops.Graph().as_default(), self.cached_session():
e = SimpleEvaluator(IdentityModel())
ds = dataset_ops.Dataset.from_tensor_slices([3.0, 5.0, 7.0, 9.0])
training_util.get_or_create_global_step()
diff --git a/tensorflow/contrib/eager/python/metrics_test.py b/tensorflow/contrib/eager/python/metrics_test.py
index dcc7b71d79..9d2d172752 100644
--- a/tensorflow/contrib/eager/python/metrics_test.py
+++ b/tensorflow/contrib/eager/python/metrics_test.py
@@ -216,7 +216,7 @@ class MetricsTest(test.TestCase):
self.assertEqual(m1.numer.name, "has_space/numer:0")
def testGraphWithPlaceholder(self):
- with context.graph_mode(), self.test_session() as sess:
+ with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
p = array_ops.placeholder(dtypes.float32)
accumulate = m(p)
@@ -309,7 +309,7 @@ class MetricsTest(test.TestCase):
self.assertTrue(old_numer is m.numer)
def testMetricsChain(self):
- with context.graph_mode(), self.test_session():
+ with context.graph_mode(), self.cached_session():
m1 = metrics.Mean()
m2 = metrics.Mean(name="m2")
update_m2 = m2(3.0)
diff --git a/tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py b/tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
index 4f591367fd..77a424145a 100644
--- a/tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
+++ b/tensorflow/contrib/framework/python/framework/checkpoint_utils_test.py
@@ -82,7 +82,7 @@ class CheckpointsTest(test.TestCase):
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
@@ -90,7 +90,7 @@ class CheckpointsTest(test.TestCase):
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
@@ -103,7 +103,7 @@ class CheckpointsTest(test.TestCase):
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
@@ -112,7 +112,7 @@ class CheckpointsTest(test.TestCase):
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
@@ -146,7 +146,7 @@ class CheckpointsTest(test.TestCase):
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
@@ -165,7 +165,7 @@ class CheckpointsTest(test.TestCase):
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
@@ -189,7 +189,7 @@ class CheckpointsTest(test.TestCase):
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
@@ -212,7 +212,7 @@ class CheckpointsTest(test.TestCase):
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
@@ -266,7 +266,7 @@ class CheckpointsTest(test.TestCase):
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
- with self.test_session() as session:
+ with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
diff --git a/tensorflow/contrib/framework/python/framework/tensor_util_test.py b/tensorflow/contrib/framework/python/framework/tensor_util_test.py
index 2479fe5b8d..b1820c10c8 100644
--- a/tensorflow/contrib/framework/python/framework/tensor_util_test.py
+++ b/tensorflow/contrib/framework/python/framework/tensor_util_test.py
@@ -39,7 +39,7 @@ from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
@@ -55,7 +55,7 @@ class LocalVariabletest(test.TestCase):
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
- with self.test_session():
+ with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
@@ -119,13 +119,13 @@ class WithShapeTest(test.TestCase):
}))
def test_with_shape_invalid_expected_shape(self):
- with self.test_session():
+ with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
- with self.test_session():
+ with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
@@ -138,7 +138,7 @@ class WithShapeTest(test.TestCase):
constant_op.constant(1.0))
def test_with_shape_0(self):
- with self.test_session():
+ with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
@@ -150,7 +150,7 @@ class WithShapeTest(test.TestCase):
unexpected_shapes)
def test_with_shape_1(self):
- with self.test_session():
+ with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
@@ -162,7 +162,7 @@ class WithShapeTest(test.TestCase):
unexpected_shapes)
def test_with_shape_2(self):
- with self.test_session():
+ with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
@@ -174,7 +174,7 @@ class WithShapeTest(test.TestCase):
unexpected_shapes)
def test_with_shape_2x2(self):
- with self.test_session():
+ with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
@@ -196,7 +196,7 @@ class WithShapeTest(test.TestCase):
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
- with self.test_session():
+ with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
@@ -220,7 +220,7 @@ class WithShapeTest(test.TestCase):
@test_util.enable_c_shapes
def test_with_shape_partial(self):
- with self.test_session():
+ with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
diff --git a/tensorflow/contrib/gan/python/losses/python/losses_impl_test.py b/tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
index 9f5fee4542..e3c780ac1a 100644
--- a/tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
+++ b/tensorflow/contrib/gan/python/losses/python/losses_impl_test.py
@@ -51,7 +51,7 @@ class _LossesTest(object):
loss = self._g_loss_fn(self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
@@ -59,7 +59,7 @@ class _LossesTest(object):
self._discriminator_real_outputs, self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
@@ -90,7 +90,7 @@ class _LossesTest(object):
loss = self._g_loss_fn(
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
@@ -98,7 +98,7 @@ class _LossesTest(object):
array_ops.reshape(self._discriminator_real_outputs, [2, 2]),
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
@@ -108,7 +108,7 @@ class _LossesTest(object):
loss = self._g_loss_fn(logits, weights=weights)
self.assertEqual(logits.dtype, loss.dtype)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [[10.0, 4.4, -5.5, 3.6]],
@@ -125,7 +125,7 @@ class _LossesTest(object):
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
@@ -136,7 +136,7 @@ class _LossesTest(object):
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, weights=self._weights)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
@@ -144,14 +144,14 @@ class _LossesTest(object):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=self._weights, generated_weights=self._weights)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs,
weights=constant_op.constant(self._weights))
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
@@ -160,7 +160,7 @@ class _LossesTest(object):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=weights, generated_weights=weights)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
@@ -284,7 +284,7 @@ class ACGANLossTest(test.TestCase):
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
@@ -292,7 +292,7 @@ class ACGANLossTest(test.TestCase):
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
@@ -319,14 +319,14 @@ class ACGANLossTest(test.TestCase):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._generator_kwargs.items()}
loss = self._g_loss_fn(**patch_args)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._discriminator_kwargs.items()}
loss = self._d_loss_fn(**patch_args)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
@@ -334,7 +334,7 @@ class ACGANLossTest(test.TestCase):
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._g_loss_fn(gen_logits, one_hot_labels)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
@@ -349,7 +349,7 @@ class ACGANLossTest(test.TestCase):
loss = self._d_loss_fn(gen_logits, real_logits, one_hot_labels)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
@@ -360,7 +360,7 @@ class ACGANLossTest(test.TestCase):
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
@@ -368,14 +368,14 @@ class ACGANLossTest(test.TestCase):
loss = self._d_loss_fn(
real_weights=self._weights, generated_weights=self._weights,
**self._discriminator_kwargs)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(
weights=constant_op.constant(self._weights), **self._generator_kwargs)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
@@ -383,7 +383,7 @@ class ACGANLossTest(test.TestCase):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(real_weights=weights, generated_weights=weights,
**self._discriminator_kwargs)
- with self.test_session():
+ with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
@@ -404,7 +404,7 @@ class _PenaltyTest(object):
loss = self._penalty_fn(**self._kwargs)
self.assertEqual(self._expected_dtype, loss.dtype)
self.assertEqual(self._expected_op_name, loss.op.name)
- with self.test_session():
+ with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss, loss.eval(), 6)
@@ -419,13 +419,13 @@ class _PenaltyTest(object):
def test_python_scalar_weight(self):
loss = self._penalty_fn(weights=2.3, **self._kwargs)
- with self.test_session():
+ with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
def test_scalar_tensor_weight(self):
loss = self._penalty_fn(weights=constant_op.constant(2.3), **self._kwargs)
- with self.test_session():
+ with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
@@ -472,7 +472,7 @@ class GradientPenaltyTest(test.TestCase, _PenaltyTest):
self._kwargs['discriminator_scope'])
self.assertEqual(generated_data.dtype, loss.dtype)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
@@ -494,7 +494,7 @@ class GradientPenaltyTest(test.TestCase, _PenaltyTest):
one_sided=True)
self.assertEqual(generated_data.dtype, loss.dtype)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
@@ -516,7 +516,7 @@ class GradientPenaltyTest(test.TestCase, _PenaltyTest):
self._kwargs['discriminator_scope'],
target=2.0)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(
loss,
diff --git a/tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py b/tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py
index a559bbfa11..25d74a8c23 100644
--- a/tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py
+++ b/tensorflow/contrib/gan/python/losses/python/tuple_losses_test.py
@@ -118,7 +118,7 @@ def add_loss_consistency_test(test_class, loss_name_str, loss_args):
def consistency_test(self):
self.assertEqual(arg_loss.__name__, tuple_loss.__name__)
- with self.test_session():
+ with self.cached_session():
self.assertEqual(arg_loss(**loss_args).eval(),
tuple_loss(_tuple_from_dict(loss_args)).eval())
@@ -241,7 +241,7 @@ class StarGANLossWrapperTest(test.TestCase):
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
@@ -257,7 +257,7 @@ class StarGANLossWrapperTest(test.TestCase):
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
@@ -282,7 +282,7 @@ class StarGANLossWrapperTest(test.TestCase):
discriminator_scope=self.discriminator_scope)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
diff --git a/tensorflow/contrib/learn/python/learn/ops/ops_test.py b/tensorflow/contrib/learn/python/learn/ops/ops_test.py
index 80d4923db3..ff190110c1 100644
--- a/tensorflow/contrib/learn/python/learn/ops/ops_test.py
+++ b/tensorflow/contrib/learn/python/learn/ops/ops_test.py
@@ -33,7 +33,7 @@ class OpsTest(test.TestCase):
"""Ops tests."""
def test_softmax_classifier(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
@@ -52,7 +52,7 @@ class OpsTest(test.TestCase):
ids_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
ids = np.random.randint(0, n_embed, ids_shape)
- with self.test_session():
+ with self.cached_session():
embed_np = embeds[ids]
embed_tf = ops.embedding_lookup(embeds, ids).eval()
self.assertEqual(embed_np.shape, embed_tf.shape)
@@ -60,7 +60,7 @@ class OpsTest(test.TestCase):
def test_categorical_variable(self):
random_seed.set_random_seed(42)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2])
embeddings = ops.categorical_variable(
cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var")
diff --git a/tensorflow/contrib/learn/python/learn/ops/seq2seq_ops_test.py b/tensorflow/contrib/learn/python/learn/ops/seq2seq_ops_test.py
index 95aec61955..5a7e4ebfea 100644
--- a/tensorflow/contrib/learn/python/learn/ops/seq2seq_ops_test.py
+++ b/tensorflow/contrib/learn/python/learn/ops/seq2seq_ops_test.py
@@ -31,7 +31,7 @@ class Seq2SeqOpsTest(test.TestCase):
"""Sequence-to-sequence tests."""
def test_sequence_classifier(self):
- with self.test_session() as session:
+ with self.cached_session() as session:
decoding = [
array_ops.placeholder(dtypes.float32, [2, 2]) for _ in range(3)
]
@@ -60,7 +60,7 @@ class Seq2SeqOpsTest(test.TestCase):
def test_seq2seq_inputs(self):
inp = np.array([[[1, 0], [0, 1], [1, 0]], [[0, 1], [1, 0], [0, 1]]])
out = np.array([[[0, 1, 0], [1, 0, 0]], [[1, 0, 0], [0, 1, 0]]])
- with self.test_session() as session:
+ with self.cached_session() as session:
x = array_ops.placeholder(dtypes.float32, [2, 3, 2])
y = array_ops.placeholder(dtypes.float32, [2, 2, 3])
in_x, in_y, out_y = ops.seq2seq_inputs(x, y, 3, 2)
@@ -77,7 +77,7 @@ class Seq2SeqOpsTest(test.TestCase):
[[0, 0, 0], [0, 0, 0]]])
def test_rnn_decoder(self):
- with self.test_session():
+ with self.cached_session():
decoder_inputs = [
array_ops.placeholder(dtypes.float32, [2, 2]) for _ in range(3)
]
diff --git a/tensorflow/contrib/specs/python/specs_test.py b/tensorflow/contrib/specs/python/specs_test.py
index 9a4ad36793..b7ce6aa20a 100644
--- a/tensorflow/contrib/specs/python/specs_test.py
+++ b/tensorflow/contrib/specs/python/specs_test.py
@@ -38,7 +38,7 @@ def _rand(*size):
class SpecsTest(test.TestCase):
def testSimpleConv(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
@@ -53,7 +53,7 @@ class SpecsTest(test.TestCase):
def testUnary(self):
# This is just a quick and dirty check that these ops exist
# and work as unary ops.
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Do(0.5) | Bn | Unit(1) | Relu | Sig | Tanh | Smax"
outputs = specs.create_net(spec, inputs)
@@ -63,7 +63,7 @@ class SpecsTest(test.TestCase):
self.assertEqual(tuple(result.shape), (17, 55))
def testAdd(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Fs(10) + Fr(10)"
outputs = specs.create_net(spec, inputs)
@@ -77,7 +77,7 @@ class SpecsTest(test.TestCase):
"<> variablev2 dot variablev2 biasadd relu add")
def testMpPower(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "M2 = Mp([2, 2]); net = M2**3"
outputs = specs.create_net(spec, inputs)
@@ -90,7 +90,7 @@ class SpecsTest(test.TestCase):
"_ maxpool maxpool maxpool")
def testAbbrevPower(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr([3, 3]); M2 = Mp([2, 2]); net = (C3(5) | M2)**3"
outputs = specs.create_net(spec, inputs)
@@ -106,7 +106,7 @@ class SpecsTest(test.TestCase):
" biasadd relu maxpool")
def testAbbrevPower2(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr(_1=[3, 3]); M2 = Mp([2, 2]);"
spec += "net = (C3(_0=5) | M2)**3"
@@ -123,7 +123,7 @@ class SpecsTest(test.TestCase):
" maxpool")
def testConc(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "net = Conc(1, Fs(20), Fs(10))"
outputs = specs.create_net(spec, inputs)
@@ -137,7 +137,7 @@ class SpecsTest(test.TestCase):
"<> variablev2 dot variablev2 biasadd sig _ concatv2")
def testImport(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = ("S = Import('from tensorflow.python.ops" +
" import math_ops; f = math_ops.sigmoid')")
@@ -150,7 +150,7 @@ class SpecsTest(test.TestCase):
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
def testKeywordRestriction(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "import re; net = Conc(1, Fs(20), Fs(10))"
self.assertRaises(ValueError, lambda: specs.create_net(spec, inputs))
@@ -179,7 +179,7 @@ class SpecsTest(test.TestCase):
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testVar(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
with specs.ops:
# pylint: disable=undefined-variable
v = Var("test_var",
@@ -196,7 +196,7 @@ class SpecsTest(test.TestCase):
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testShared(self):
- with self.test_session():
+ with self.cached_session():
with specs.ops:
# pylint: disable=undefined-variable
f = Shared(Fr(100))
diff --git a/tensorflow/contrib/specs/python/summaries_test.py b/tensorflow/contrib/specs/python/summaries_test.py
index 34ff4bc8ca..b82ba06d3f 100644
--- a/tensorflow/contrib/specs/python/summaries_test.py
+++ b/tensorflow/contrib/specs/python/summaries_test.py
@@ -34,7 +34,7 @@ def _rand(*size):
class SummariesTest(test.TestCase):
def testStructure(self):
- with self.test_session():
+ with self.cached_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
@@ -48,7 +48,7 @@ class SummariesTest(test.TestCase):
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
@@ -60,7 +60,7 @@ class SummariesTest(test.TestCase):
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
@@ -70,7 +70,7 @@ class SummariesTest(test.TestCase):
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
- with self.test_session():
+ with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
diff --git a/tensorflow/python/data/util/convert_test.py b/tensorflow/python/data/util/convert_test.py
index 6a67093e48..89c3afb296 100644
--- a/tensorflow/python/data/util/convert_test.py
+++ b/tensorflow/python/data/util/convert_test.py
@@ -30,28 +30,28 @@ class ConvertTest(test.TestCase):
def testInteger(self):
resp = convert.optional_param_to_tensor("foo", 3)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertEqual(3, sess.run(resp))
def testIntegerDefault(self):
resp = convert.optional_param_to_tensor("foo", None)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertEqual(0, sess.run(resp))
def testStringDefault(self):
resp = convert.optional_param_to_tensor("bar", None, "default",
dtypes.string)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertEqual(compat.as_bytes("default"), sess.run(resp))
def testString(self):
resp = convert.optional_param_to_tensor("bar", "value", "default",
dtypes.string)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertEqual(compat.as_bytes("value"), sess.run(resp))
def testPartialShapeToTensorKnownDimension(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertAllEqual([1], sess.run(convert.partial_shape_to_tensor(
tensor_shape.TensorShape([1]))))
self.assertAllEqual([1], sess.run(convert.partial_shape_to_tensor((1,))))
@@ -60,7 +60,7 @@ class ConvertTest(test.TestCase):
constant_op.constant([1], dtype=dtypes.int64))))
def testPartialShapeToTensorUnknownDimension(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertAllEqual([-1], sess.run(convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None]))))
self.assertAllEqual([-1], sess.run(convert.partial_shape_to_tensor(
@@ -84,7 +84,7 @@ class ConvertTest(test.TestCase):
convert.partial_shape_to_tensor(constant_op.constant([1., 1.]))
def testPartialShapeToTensorMultipleDimensions(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertAllEqual([3, 6], sess.run(convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, 6]))))
self.assertAllEqual([3, 6], sess.run(convert.partial_shape_to_tensor(
@@ -113,7 +113,7 @@ class ConvertTest(test.TestCase):
constant_op.constant([-1, -1], dtype=dtypes.int64))))
def testPartialShapeToTensorScalar(self):
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.assertAllEqual([], sess.run(convert.partial_shape_to_tensor(
tensor_shape.TensorShape([]))))
self.assertAllEqual([], sess.run(convert.partial_shape_to_tensor(())))
diff --git a/tensorflow/python/data/util/sparse_test.py b/tensorflow/python/data/util/sparse_test.py
index d49b3ff34b..056b32480f 100644
--- a/tensorflow/python/data/util/sparse_test.py
+++ b/tensorflow/python/data/util/sparse_test.py
@@ -291,7 +291,7 @@ class SparseTest(test.TestCase):
self.assertEqual(a, b)
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(a.eval().indices, b.eval().indices)
self.assertAllEqual(a.eval().values, b.eval().values)
self.assertAllEqual(a.eval().dense_shape, b.eval().dense_shape)
diff --git a/tensorflow/python/estimator/canned/boosted_trees_test.py b/tensorflow/python/estimator/canned/boosted_trees_test.py
index 08026a93c5..6e28c72151 100644
--- a/tensorflow/python/estimator/canned/boosted_trees_test.py
+++ b/tensorflow/python/estimator/canned/boosted_trees_test.py
@@ -1560,7 +1560,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_classification())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -1593,7 +1593,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_classification_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
boosted_trees._create_classification_head(n_classes=2),
@@ -1633,7 +1633,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_classification())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train without train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -1666,7 +1666,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_classification_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
boosted_trees._create_classification_head(n_classes=2),
@@ -1704,7 +1704,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_regression())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -1734,7 +1734,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_regression_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -1774,7 +1774,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third = (
self._get_expected_ensembles_for_regression())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train without train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
@@ -1804,7 +1804,7 @@ class ModelFnTests(test_util.TensorFlowTestCase):
ops.reset_default_graph()
expected_first, expected_second, expected_third, expected_forth = (
self._get_expected_ensembles_for_regression_with_bias())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Train with train_in_memory mode.
with sess.graph.as_default():
train_op, ensemble_serialized = self._get_train_op_and_ensemble(
diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py
index bd2e0ae943..de9c84d2ef 100644
--- a/tensorflow/python/estimator/canned/head_test.py
+++ b/tensorflow/python/estimator/canned/head_test.py
@@ -260,7 +260,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
@@ -293,7 +293,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
@@ -347,14 +347,14 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesOpError('Labels must <= n_classes - 1'):
training_loss.eval({
labels_placeholder: labels_2x1_with_large_id,
logits_placeholder: logits_2x3
})
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesOpError('Labels must >= 0'):
training_loss.eval({
labels_placeholder: labels_2x1_with_negative_id,
@@ -413,7 +413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
@@ -449,7 +449,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
spec.export_outputs.keys())
# Assert predictions and export_outputs.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
@@ -484,7 +484,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
@@ -510,7 +510,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
@@ -534,7 +534,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -561,7 +561,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -581,7 +581,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -632,7 +632,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -698,7 +698,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -727,7 +727,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -755,7 +755,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
}
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -804,7 +804,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -837,7 +837,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -866,7 +866,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -921,7 +921,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -962,7 +962,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
optimizer=_Optimizer())
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -992,7 +992,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
labels=np.array(((1,), (1,)), dtype=np.int64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -1023,7 +1023,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -1064,7 +1064,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1104,7 +1104,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels_rank_1)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -1153,7 +1153,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1183,7 +1183,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1211,7 +1211,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
train_op_fn=_train_op_fn)
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -1253,7 +1253,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -1292,7 +1292,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
@@ -1327,7 +1327,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -1353,7 +1353,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -1380,7 +1380,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -1413,7 +1413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
# Assert predictions, loss, and metrics.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -1506,7 +1506,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
@@ -1536,7 +1536,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
@@ -1577,7 +1577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[3 1\] \[labels_shape: \] \[2 1\]'):
@@ -1585,7 +1585,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels_placeholder: values_2x1,
logits_placeholder: values_3x1
})
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
@@ -1624,7 +1624,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
@@ -1660,7 +1660,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
@@ -1680,7 +1680,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1733,7 +1733,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1808,7 +1808,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1832,7 +1832,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(41., training_loss.eval())
@@ -1849,7 +1849,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1877,7 +1877,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -1924,7 +1924,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -1957,7 +1957,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -1983,7 +1983,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -2011,7 +2011,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -2031,7 +2031,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2086,7 +2086,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -2126,7 +2126,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=labels,
optimizer=_Optimizer())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
@@ -2153,7 +2153,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=np.array(((1,), (1,),), dtype=np.float64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -2182,7 +2182,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
labels=labels,
train_op_fn=_train_op_fn)
# Assert summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -2227,7 +2227,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
@@ -2254,7 +2254,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'Labels must <= n_classes - 1'):
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
training_loss.eval()
@@ -2277,7 +2277,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -2309,7 +2309,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
@@ -2334,7 +2334,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)
@@ -2360,7 +2360,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
expected_loss = 1.2484322
# Assert loss.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2385,7 +2385,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(
@@ -2447,7 +2447,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2483,7 +2483,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
@@ -2531,7 +2531,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
@@ -2577,7 +2577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
@@ -2612,7 +2612,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels)
tol = 1e-2
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(),
@@ -2649,7 +2649,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
@@ -2675,7 +2675,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2700,7 +2700,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -2744,7 +2744,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
}
tol = 1e-2
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
@@ -2825,7 +2825,7 @@ class RegressionHead(test.TestCase):
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({
logits_placeholder: logits_1d
@@ -2857,7 +2857,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
@@ -2868,7 +2868,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
@@ -2908,7 +2908,7 @@ class RegressionHead(test.TestCase):
logits=logits_placeholder,
labels=labels_placeholder,
train_op_fn=lambda x: x)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
@@ -2919,7 +2919,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder)[0]
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'):
@@ -2957,7 +2957,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions.
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(logits, spec.predictions[prediction_key].eval())
self.assertAllClose(
@@ -2992,7 +2992,7 @@ class RegressionHead(test.TestCase):
spec.export_outputs.keys())
# Assert predictions.
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(
expected_predictions, spec.predictions[keys.PREDICTIONS].eval())
@@ -3019,7 +3019,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(43-45)^2, (44-41)] = [4, 9]
self.assertAllClose(13., training_loss.eval())
@@ -3045,7 +3045,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits_input,
labels=labels_input)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(np.sum(loss), actual_training_loss.eval())
@@ -3064,7 +3064,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -3112,7 +3112,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3180,7 +3180,7 @@ class RegressionHead(test.TestCase):
}
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
@@ -3212,7 +3212,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3237,7 +3237,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3294,7 +3294,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3337,7 +3337,7 @@ class RegressionHead(test.TestCase):
labels=labels,
optimizer=_Optimizer())
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
@@ -3364,7 +3364,7 @@ class RegressionHead(test.TestCase):
labels=np.array(((43.,), (44.,),), dtype=np.float64),
train_op_fn=_train_op_fn)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
@@ -3394,7 +3394,7 @@ class RegressionHead(test.TestCase):
train_op_fn=_train_op_fn)
# Assert summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
summary_str = sess.run(spec.scaffold.summary_op)
@@ -3441,7 +3441,7 @@ class RegressionHead(test.TestCase):
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
@@ -3487,7 +3487,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3523,7 +3523,7 @@ class RegressionHead(test.TestCase):
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert loss.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
@@ -3565,7 +3565,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3600,7 +3600,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3648,7 +3648,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3679,7 +3679,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
@@ -3718,7 +3718,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
@@ -3750,7 +3750,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
# loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].
# weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6
@@ -3796,7 +3796,7 @@ class RegressionHead(test.TestCase):
_assert_no_hooks(self, spec)
# Evaluate predictions, loss, train_op, and summaries.
- with self.test_session() as sess:
+ with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
@@ -3857,7 +3857,7 @@ class RegressionHead(test.TestCase):
self.assertIsNone(spec.train_op)
_assert_no_hooks(self, spec)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
@@ -3915,7 +3915,7 @@ class RegressionHead(test.TestCase):
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertIsNotNone(spec.train_op)
- with self.test_session() as sess:
+ with self.cached_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
@@ -3955,7 +3955,7 @@ class RegressionHead(test.TestCase):
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_training_loss, training_loss.eval())
self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())
@@ -3988,7 +3988,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(expected_loss, spec.loss.eval())
@@ -4013,7 +4013,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
@@ -4042,7 +4042,7 @@ class RegressionHead(test.TestCase):
logits=logits,
labels=labels,
train_op_fn=_no_op_train_fn)
- with self.test_session():
+ with self.cached_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
diff --git a/tensorflow/python/estimator/inputs/numpy_io_test.py b/tensorflow/python/estimator/inputs/numpy_io_test.py
index 4e7b00b307..632908415f 100644
--- a/tensorflow/python/estimator/inputs/numpy_io_test.py
+++ b/tensorflow/python/estimator/inputs/numpy_io_test.py
@@ -42,7 +42,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -68,7 +68,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
features, target = input_fn()
@@ -93,7 +93,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=0)
features, target = input_fn()
@@ -114,7 +114,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -27)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -150,7 +150,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -29)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=3)
features, target = input_fn()
@@ -196,7 +196,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -221,7 +221,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
@@ -240,7 +240,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXAsNonDict(self):
x = list(range(32, 36))
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x must be a dict or array'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -249,7 +249,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXIsEmptyDict(self):
x = {}
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -257,7 +257,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithXIsEmptyArray(self):
x = np.array([[], []])
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -268,7 +268,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = None
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor = input_fn()
@@ -291,7 +291,7 @@ class NumpyIoTest(test.TestCase):
def testNumpyInputFnWithNonBoolShuffle(self):
x = np.arange(32, 36)
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
@@ -303,7 +303,7 @@ class NumpyIoTest(test.TestCase):
x = {'__target_key__': array}
y = np.arange(4)
- with self.test_session():
+ with self.cached_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
@@ -318,7 +318,7 @@ class NumpyIoTest(test.TestCase):
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
@@ -341,7 +341,7 @@ class NumpyIoTest(test.TestCase):
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'y2': np.arange(32, 28, -1)}
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features_tensor, targets_tensor = input_fn()
@@ -369,7 +369,7 @@ class NumpyIoTest(test.TestCase):
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(ValueError, 'y cannot be empty'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
failing_input_fn()
@@ -379,7 +379,7 @@ class NumpyIoTest(test.TestCase):
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = {'y1': np.arange(-32, -28), 'a': a, 'y2': np.arange(32, 28, -1), 'b': b}
- with self.test_session():
+ with self.cached_session():
with self.assertRaisesRegexp(
ValueError, '2 duplicate keys are found in both x and y'):
failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False)
diff --git a/tensorflow/python/estimator/inputs/pandas_io_test.py b/tensorflow/python/estimator/inputs/pandas_io_test.py
index 6f13bc95d2..9e69fc72dc 100644
--- a/tensorflow/python/estimator/inputs/pandas_io_test.py
+++ b/tensorflow/python/estimator/inputs/pandas_io_test.py
@@ -102,7 +102,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -116,7 +116,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -131,7 +131,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'b'})
input_fn = pandas_io.pandas_input_fn(
@@ -147,7 +147,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'})
input_fn = pandas_io.pandas_input_fn(
@@ -163,7 +163,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
@@ -191,7 +191,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
@@ -230,7 +230,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
@@ -243,7 +243,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
@@ -266,7 +266,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
@@ -276,7 +276,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
@@ -286,7 +286,7 @@ class PandasIoTest(test.TestCase):
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
- with self.test_session() as session:
+ with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
@@ -297,7 +297,7 @@ class PandasIoTest(test.TestCase):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
- with self.test_session() as session:
+ with self.cached_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
diff --git a/tensorflow/python/training/checkpointable/tracking_test.py b/tensorflow/python/training/checkpointable/tracking_test.py
index e85f812ce2..a44c570fb9 100644
--- a/tensorflow/python/training/checkpointable/tracking_test.py
+++ b/tensorflow/python/training/checkpointable/tracking_test.py
@@ -165,7 +165,7 @@ class InterfaceTests(test.TestCase):
self.assertEqual([c], a.attribute["c"].layers)
checkpoint = util.Checkpoint(a=a)
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
- with self.test_session():
+ with self.cached_session():
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
@test_util.run_in_graph_and_eager_modes
diff --git a/tensorflow/python/training/checkpointable/util_test.py b/tensorflow/python/training/checkpointable/util_test.py
index 0d32d21426..f8b5bd8501 100644
--- a/tensorflow/python/training/checkpointable/util_test.py
+++ b/tensorflow/python/training/checkpointable/util_test.py
@@ -384,7 +384,7 @@ class CheckpointingTests(test.TestCase):
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
- with self.test_session() as sess:
+ with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))