diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2018-09-10 14:37:06 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-09-10 15:04:14 -0700 |
commit | b828f89263e054bfa7c7a808cab1506834ab906d (patch) | |
tree | e31816a6850d177306f19ee8670e0836060fcfc9 /tensorflow/python/estimator | |
parent | acf0ee82092727afc2067316982407cf5e496f75 (diff) |
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about:
* the fact that the session may be reused.
* the session is not closed even when doing a "with self.test_session()" statement.
PiperOrigin-RevId: 212336464
Diffstat (limited to 'tensorflow/python/estimator')
-rw-r--r-- | tensorflow/python/estimator/canned/boosted_trees_test.py | 16 | ||||
-rw-r--r-- | tensorflow/python/estimator/canned/head_test.py | 208 | ||||
-rw-r--r-- | tensorflow/python/estimator/inputs/numpy_io_test.py | 34 | ||||
-rw-r--r-- | tensorflow/python/estimator/inputs/pandas_io_test.py | 24 |
4 files changed, 141 insertions, 141 deletions
diff --git a/tensorflow/python/estimator/canned/boosted_trees_test.py b/tensorflow/python/estimator/canned/boosted_trees_test.py index 08026a93c5..6e28c72151 100644 --- a/tensorflow/python/estimator/canned/boosted_trees_test.py +++ b/tensorflow/python/estimator/canned/boosted_trees_test.py @@ -1560,7 +1560,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third = ( self._get_expected_ensembles_for_classification()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train with train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( @@ -1593,7 +1593,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): expected_first, expected_second, expected_third, expected_forth = ( self._get_expected_ensembles_for_classification_with_bias()) - with self.test_session() as sess: + with self.cached_session() as sess: with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( boosted_trees._create_classification_head(n_classes=2), @@ -1633,7 +1633,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third = ( self._get_expected_ensembles_for_classification()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train without train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( @@ -1666,7 +1666,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): expected_first, expected_second, expected_third, expected_forth = ( self._get_expected_ensembles_for_classification_with_bias()) - with self.test_session() as sess: + with self.cached_session() as sess: with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( boosted_trees._create_classification_head(n_classes=2), @@ -1704,7 +1704,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third = ( self._get_expected_ensembles_for_regression()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train with train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( @@ -1734,7 +1734,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third, expected_forth = ( self._get_expected_ensembles_for_regression_with_bias()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train with train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( @@ -1774,7 +1774,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third = ( self._get_expected_ensembles_for_regression()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train without train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( @@ -1804,7 +1804,7 @@ class ModelFnTests(test_util.TensorFlowTestCase): ops.reset_default_graph() expected_first, expected_second, expected_third, expected_forth = ( self._get_expected_ensembles_for_regression_with_bias()) - with self.test_session() as sess: + with self.cached_session() as sess: # Train with train_in_memory mode. with sess.graph.as_default(): train_op, ensemble_serialized = self._get_train_op_and_ensemble( diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py index bd2e0ae943..de9c84d2ef 100644 --- a/tensorflow/python/estimator/canned/head_test.py +++ b/tensorflow/python/estimator/canned/head_test.py @@ -260,7 +260,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): features={'x': np.array(((30.,), (42.,),))}, mode=model_fn.ModeKeys.PREDICT, logits=logits_placeholder) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(errors.OpError, 'logits shape'): spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({ logits_placeholder: logits_2x2 @@ -293,7 +293,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'): @@ -347,14 +347,14 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError('Labels must <= n_classes - 1'): training_loss.eval({ labels_placeholder: labels_2x1_with_large_id, logits_placeholder: logits_2x3 }) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError('Labels must >= 0'): training_loss.eval({ labels_placeholder: labels_2x1_with_negative_id, @@ -413,7 +413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'): @@ -449,7 +449,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): spec.export_outputs.keys()) # Assert predictions and export_outputs. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) predictions = sess.run(spec.predictions) @@ -484,7 +484,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.PREDICT, logits=logits) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertAllEqual( expected_classes, @@ -510,7 +510,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.PREDICT, logits=logits) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) predictions = sess.run(spec.predictions) self.assertAllClose(logits, @@ -534,7 +534,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -561,7 +561,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_input, labels=labels_input)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(np.sum(loss), actual_training_loss.eval()) @@ -581,7 +581,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -632,7 +632,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, and metrics. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -698,7 +698,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, and metrics. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -727,7 +727,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -755,7 +755,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): } tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops} @@ -804,7 +804,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert loss, and metrics. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -837,7 +837,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels) tol = 1e-2 - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=tol, atol=tol) @@ -866,7 +866,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels) tol = 1e-2 - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=tol, atol=tol) @@ -921,7 +921,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -962,7 +962,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): optimizer=_Optimizer()) tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol) @@ -992,7 +992,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): labels=np.array(((1,), (1,)), dtype=np.int64), train_op_fn=_train_op_fn) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) sess.run(spec.train_op) w_value, t_value = sess.run([w, t]) @@ -1023,7 +1023,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) summary_str = sess.run(spec.scaffold.summary_op) @@ -1064,7 +1064,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -1104,7 +1104,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels_rank_1) tol = 1e-2 - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=tol, atol=tol) @@ -1153,7 +1153,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -1183,7 +1183,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -1211,7 +1211,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): train_op_fn=_train_op_fn) tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss = sess.run(spec.loss) self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol) @@ -1253,7 +1253,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -1292,7 +1292,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels) tol = 1e-2 - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=tol, atol=tol) @@ -1327,7 +1327,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol) @@ -1353,7 +1353,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -1380,7 +1380,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -1413,7 +1413,7 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase): # Assert predictions, loss, and metrics. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops} @@ -1506,7 +1506,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): features={'x': np.array(((42.,),))}, mode=model_fn.ModeKeys.PREDICT, logits=logits_placeholder) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(errors.OpError, 'logits shape'): spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({ logits_placeholder: logits_2x2 @@ -1536,7 +1536,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'): @@ -1577,7 +1577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[3 1\] \[labels_shape: \] \[2 1\]'): @@ -1585,7 +1585,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): labels_placeholder: values_2x1, logits_placeholder: values_3x1 }) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'): @@ -1624,7 +1624,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) predictions = sess.run(spec.predictions) @@ -1660,7 +1660,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.PREDICT, logits=logits) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertAllEqual( expected_classes, @@ -1680,7 +1680,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -1733,7 +1733,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -1808,7 +1808,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): } # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -1832,7 +1832,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(41., training_loss.eval()) @@ -1849,7 +1849,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): logits=logits, labels=labels) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -1877,7 +1877,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -1924,7 +1924,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): } self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys()) tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -1957,7 +1957,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -1983,7 +1983,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -2011,7 +2011,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_input, labels=labels_input)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(np.sum(loss), actual_training_loss.eval()) @@ -2031,7 +2031,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -2086,7 +2086,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -2126,7 +2126,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): labels=labels, optimizer=_Optimizer()) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAllClose(expected_loss, loss) @@ -2153,7 +2153,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): labels=np.array(((1,), (1,),), dtype=np.float64), train_op_fn=_train_op_fn) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) sess.run(spec.train_op) w_value, t_value = sess.run([w, t]) @@ -2182,7 +2182,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): labels=labels, train_op_fn=_train_op_fn) # Assert summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) summary_str = sess.run(spec.scaffold.summary_op) @@ -2227,7 +2227,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): regularization_losses=regularization_losses) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run((spec.loss, spec.train_op, @@ -2254,7 +2254,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'Labels must <= n_classes - 1'): - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) training_loss.eval() @@ -2277,7 +2277,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -2309,7 +2309,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): train_op_fn=_train_op_fn) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAlmostEqual(expected_loss, loss, delta=1.e-5) @@ -2334,7 +2334,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2) @@ -2360,7 +2360,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): expected_loss = 1.2484322 # Assert loss. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops} @@ -2385,7 +2385,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): logits=logits) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) predictions = sess.run(spec.predictions) self.assertAllClose( @@ -2447,7 +2447,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys()) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops} @@ -2483,7 +2483,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels_rank_1) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), @@ -2531,7 +2531,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): self.assertIsNotNone(spec.train_op) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run(( @@ -2577,7 +2577,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): self.assertIsNotNone(spec.train_op) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) loss, train_result, summary_str = sess.run(( @@ -2612,7 +2612,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): logits=logits, labels=labels) tol = 1e-2 - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose( expected_training_loss, training_loss.eval(), @@ -2649,7 +2649,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): # Assert predictions, loss, train_op, and summaries. tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol) @@ -2675,7 +2675,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -2700,7 +2700,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -2744,7 +2744,7 @@ class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase): } tol = 1e-2 - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops} @@ -2825,7 +2825,7 @@ class RegressionHead(test.TestCase): features={'x': np.array(((42.,),))}, mode=model_fn.ModeKeys.PREDICT, logits=logits_placeholder) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(errors.OpError, 'logits shape'): spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({ logits_placeholder: logits_1d @@ -2857,7 +2857,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(errors.OpError, 'logits shape'): spec.loss.eval({ labels_placeholder: values_3d, @@ -2868,7 +2868,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'): @@ -2908,7 +2908,7 @@ class RegressionHead(test.TestCase): logits=logits_placeholder, labels=labels_placeholder, train_op_fn=lambda x: x) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(errors.OpError, 'logits shape'): spec.loss.eval({ labels_placeholder: values_3d, @@ -2919,7 +2919,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits_placeholder, labels=labels_placeholder)[0] - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( errors.InvalidArgumentError, r'\[expected_labels_shape: \] \[2 3\] \[labels_shape: \] \[2 1\]'): @@ -2957,7 +2957,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions. - with self.test_session(): + with self.cached_session(): _initialize_variables(self, spec.scaffold) self.assertAllClose(logits, spec.predictions[prediction_key].eval()) self.assertAllClose( @@ -2992,7 +2992,7 @@ class RegressionHead(test.TestCase): spec.export_outputs.keys()) # Assert predictions. - with self.test_session(): + with self.cached_session(): _initialize_variables(self, spec.scaffold) self.assertAllClose( expected_predictions, spec.predictions[keys.PREDICTIONS].eval()) @@ -3019,7 +3019,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) # loss = [(43-45)^2, (44-41)] = [4, 9] self.assertAllClose(13., training_loss.eval()) @@ -3045,7 +3045,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits_input, labels=labels_input)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(np.sum(loss), actual_training_loss.eval()) @@ -3064,7 +3064,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -3112,7 +3112,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[ @@ -3180,7 +3180,7 @@ class RegressionHead(test.TestCase): } # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops} @@ -3212,7 +3212,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -3237,7 +3237,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -3294,7 +3294,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) predictions, loss, train_result, summary_str = sess.run(( @@ -3337,7 +3337,7 @@ class RegressionHead(test.TestCase): labels=labels, optimizer=_Optimizer()) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss, train_result = sess.run((spec.loss, spec.train_op)) self.assertAllClose(expected_loss, loss) @@ -3364,7 +3364,7 @@ class RegressionHead(test.TestCase): labels=np.array(((43.,), (44.,),), dtype=np.float64), train_op_fn=_train_op_fn) - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) sess.run(spec.train_op) w_value, t_value = sess.run([w, t]) @@ -3394,7 +3394,7 @@ class RegressionHead(test.TestCase): train_op_fn=_train_op_fn) # Assert summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) summary_str = sess.run(spec.scaffold.summary_op) @@ -3441,7 +3441,7 @@ class RegressionHead(test.TestCase): regularization_losses=regularization_losses) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) prediction_key = prediction_keys.PredictionKeys.PREDICTIONS @@ -3487,7 +3487,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[ @@ -3523,7 +3523,7 @@ class RegressionHead(test.TestCase): labels=np.array(((35,), (42,), (45,)), dtype=np.int32)) # Assert loss. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) loss = sess.run(spec.loss) # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6 @@ -3565,7 +3565,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) predictions, loss, train_result, summary_str = sess.run(( @@ -3600,7 +3600,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels_rank_1) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -3648,7 +3648,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) predictions, loss, train_result, summary_str = sess.run(( @@ -3679,7 +3679,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.EVAL, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) # loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1]. # weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6 @@ -3718,7 +3718,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Assert predictions, loss, and metrics. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNone(spec.scaffold.summary_op) loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[ @@ -3750,7 +3750,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels)[0] - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) # loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1]. # weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6 @@ -3796,7 +3796,7 @@ class RegressionHead(test.TestCase): _assert_no_hooks(self, spec) # Evaluate predictions, loss, train_op, and summaries. - with self.test_session() as sess: + with self.cached_session() as sess: _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) predictions, loss, train_result, summary_str = sess.run(( @@ -3857,7 +3857,7 @@ class RegressionHead(test.TestCase): self.assertIsNone(spec.train_op) _assert_no_hooks(self, spec) - with self.test_session() as sess: + with self.cached_session() as sess: # Finalize graph and initialize variables. _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) @@ -3915,7 +3915,7 @@ class RegressionHead(test.TestCase): self.assertEqual(dtypes.float32, spec.loss.dtype) self.assertIsNotNone(spec.train_op) - with self.test_session() as sess: + with self.cached_session() as sess: # Finalize graph and initialize variables. _initialize_variables(self, spec.scaffold) self.assertIsNotNone(spec.scaffold.summary_op) @@ -3955,7 +3955,7 @@ class RegressionHead(test.TestCase): mode=model_fn.ModeKeys.TRAIN, logits=logits, labels=labels) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_training_loss, training_loss.eval()) self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval()) @@ -3988,7 +3988,7 @@ class RegressionHead(test.TestCase): logits=logits, labels=labels, train_op_fn=_train_op_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) self.assertAllClose(expected_loss, spec.loss.eval()) @@ -4013,7 +4013,7 @@ class RegressionHead(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, @@ -4042,7 +4042,7 @@ class RegressionHead(test.TestCase): logits=logits, labels=labels, train_op_fn=_no_op_train_fn) - with self.test_session(): + with self.cached_session(): _initialize_variables(self, monitored_session.Scaffold()) with self.assertRaisesRegexp( errors.InvalidArgumentError, diff --git a/tensorflow/python/estimator/inputs/numpy_io_test.py b/tensorflow/python/estimator/inputs/numpy_io_test.py index 4e7b00b307..632908415f 100644 --- a/tensorflow/python/estimator/inputs/numpy_io_test.py +++ b/tensorflow/python/estimator/inputs/numpy_io_test.py @@ -42,7 +42,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -28) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features, target = input_fn() @@ -68,7 +68,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -30) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=128, shuffle=False, num_epochs=2) features, target = input_fn() @@ -93,7 +93,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -28) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=0) features, target = input_fn() @@ -114,7 +114,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -27) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=batch_size, shuffle=False, num_epochs=1) features, target = input_fn() @@ -150,7 +150,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -29) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=batch_size, shuffle=False, num_epochs=3) features, target = input_fn() @@ -196,7 +196,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -28) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=batch_size, shuffle=False, num_epochs=1) features, target = input_fn() @@ -221,7 +221,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = np.arange(-32, -30) - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features, target = input_fn() @@ -240,7 +240,7 @@ class NumpyIoTest(test.TestCase): def testNumpyInputFnWithXAsNonDict(self): x = list(range(32, 36)) y = np.arange(4) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(TypeError, 'x must be a dict or array'): failing_input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) @@ -249,7 +249,7 @@ class NumpyIoTest(test.TestCase): def testNumpyInputFnWithXIsEmptyDict(self): x = {} y = np.arange(4) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'): failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False) failing_input_fn() @@ -257,7 +257,7 @@ class NumpyIoTest(test.TestCase): def testNumpyInputFnWithXIsEmptyArray(self): x = np.array([[], []]) y = np.arange(4) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, 'x cannot be an empty'): failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False) failing_input_fn() @@ -268,7 +268,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = None - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features_tensor = input_fn() @@ -291,7 +291,7 @@ class NumpyIoTest(test.TestCase): def testNumpyInputFnWithNonBoolShuffle(self): x = np.arange(32, 36) y = np.arange(4) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, 'shuffle must be provided and explicitly ' 'set as boolean'): @@ -303,7 +303,7 @@ class NumpyIoTest(test.TestCase): x = {'__target_key__': array} y = np.arange(4) - with self.test_session(): + with self.cached_session(): input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) input_fn() @@ -318,7 +318,7 @@ class NumpyIoTest(test.TestCase): x_mismatch_length = {'a': np.arange(1), 'b': b} y_longer_length = np.arange(10) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( ValueError, 'Length of tensors in x and y is mismatched.'): failing_input_fn = numpy_io.numpy_input_fn( @@ -341,7 +341,7 @@ class NumpyIoTest(test.TestCase): x = {'a': a, 'b': b} y = {'y1': np.arange(-32, -28), 'y2': np.arange(32, 28, -1)} - with self.test_session() as session: + with self.cached_session() as session: input_fn = numpy_io.numpy_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) features_tensor, targets_tensor = input_fn() @@ -369,7 +369,7 @@ class NumpyIoTest(test.TestCase): b = np.arange(32, 36) x = {'a': a, 'b': b} y = {} - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, 'y cannot be empty'): failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False) failing_input_fn() @@ -379,7 +379,7 @@ class NumpyIoTest(test.TestCase): b = np.arange(32, 36) x = {'a': a, 'b': b} y = {'y1': np.arange(-32, -28), 'a': a, 'y2': np.arange(32, 28, -1), 'b': b} - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp( ValueError, '2 duplicate keys are found in both x and y'): failing_input_fn = numpy_io.numpy_input_fn(x, y, shuffle=False) diff --git a/tensorflow/python/estimator/inputs/pandas_io_test.py b/tensorflow/python/estimator/inputs/pandas_io_test.py index 6f13bc95d2..9e69fc72dc 100644 --- a/tensorflow/python/estimator/inputs/pandas_io_test.py +++ b/tensorflow/python/estimator/inputs/pandas_io_test.py @@ -102,7 +102,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_ProducesExpectedOutputs(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) @@ -116,7 +116,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrameWithYAsDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) @@ -131,7 +131,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrameWithYAsDataFrame() y = y.rename(columns={'a_target': 'a', 'b_target': 'b'}) input_fn = pandas_io.pandas_input_fn( @@ -147,7 +147,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrameWithYAsDataFrame() y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'}) input_fn = pandas_io.pandas_input_fn( @@ -163,7 +163,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: index = np.arange(100, 102) a = np.arange(2) b = np.arange(32, 34) @@ -191,7 +191,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: index = np.arange(100, 105) a = np.arange(5) b = np.arange(32, 37) @@ -230,7 +230,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_OnlyX(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, _ = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y=None, batch_size=2, shuffle=False, num_epochs=1) @@ -243,7 +243,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_ExcludesIndex(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=False, num_epochs=1) @@ -266,7 +266,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_RespectsEpoch_NoShuffle(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=4, shuffle=False, num_epochs=1) @@ -276,7 +276,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_RespectsEpoch_WithShuffle(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=4, shuffle=True, num_epochs=1) @@ -286,7 +286,7 @@ class PandasIoTest(test.TestCase): def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self): if not HAS_PANDAS: return - with self.test_session() as session: + with self.cached_session() as session: x, y = self.makeTestDataFrame() input_fn = pandas_io.pandas_input_fn( x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2) @@ -297,7 +297,7 @@ class PandasIoTest(test.TestCase): if not HAS_PANDAS: return x, y = self.makeTestDataFrame() - with self.test_session() as session: + with self.cached_session() as session: input_fn = pandas_io.pandas_input_fn( x, y, batch_size=3, shuffle=False, num_epochs=1) |