diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2018-08-21 19:24:19 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-08-21 19:27:54 -0700 |
commit | 496023e9dc84a076caeb2e5e8e13b6a3d819ad6d (patch) | |
tree | 9776c9865f7b98a15817bc6be4c2b683323a67b1 /tensorflow/contrib/seq2seq | |
parent | 361a82d73a50a800510674b3aaa20e4845e56434 (diff) |
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about:
* the fact that the session may be reused.
* the session is not closed even when doing a "with self.test_session()" statement.
PiperOrigin-RevId: 209701635
Diffstat (limited to 'tensorflow/contrib/seq2seq')
3 files changed, 15 insertions, 15 deletions
diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_test.py index cd162bae25..f2c43f30d4 100644 --- a/tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_test.py +++ b/tensorflow/contrib/seq2seq/python/kernel_tests/attention_wrapper_test.py @@ -512,7 +512,7 @@ class AttentionWrapperTest(test.TestCase): for axis in [0, 1]: for exclusive in [True, False]: - with self.test_session(): + with self.cached_session(): # Compute cumprod with regular tf.cumprod cumprod_output = math_ops.cumprod( test_input, axis=axis, exclusive=exclusive).eval() @@ -548,7 +548,7 @@ class AttentionWrapperTest(test.TestCase): for p, a in zip(p_choose_i, previous_attention)]) # Compute output with TensorFlow function, for both calculation types - with self.test_session(): + with self.cached_session(): recursive_output = wrapper.monotonic_attention( p_choose_i, previous_attention, 'recursive').eval() @@ -569,7 +569,7 @@ class AttentionWrapperTest(test.TestCase): for p, a in zip(p_choose_i, previous_attention)]) # Compute output with TensorFlow function, for both calculation types - with self.test_session(): + with self.cached_session(): parallel_output = wrapper.monotonic_attention( p_choose_i, previous_attention, 'parallel').eval() @@ -594,7 +594,7 @@ class AttentionWrapperTest(test.TestCase): for p, a in zip(p_choose_i, previous_attention)]) # Compute output with TensorFlow function, for both calculation types - with self.test_session(): + with self.cached_session(): hard_output = wrapper.monotonic_attention( # TensorFlow is unhappy when these are not wrapped as tf.constant constant_op.constant(p_choose_i), @@ -634,7 +634,7 @@ class AttentionWrapperTest(test.TestCase): recursive_output = [np.array([1] + [0]*(p_choose_i.shape[1] - 1), np.float32)] # Compute output with TensorFlow function, for both calculation types - with self.test_session(): + with self.cached_session(): for j in range(p_choose_i.shape[0]): # Compute attention distribution for this output time step recursive_output.append(wrapper.monotonic_attention( diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py index 4073b390fc..f5b6b1bde9 100644 --- a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py +++ b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py @@ -66,7 +66,7 @@ class TestGatherTree(test.TestCase): max_sequence_lengths=max_sequence_lengths, end_token=11) - with self.test_session() as sess: + with self.cached_session() as sess: res_ = sess.run(res) self.assertAllEqual(expected_result, res_) @@ -115,7 +115,7 @@ class TestGatherTree(test.TestCase): sorted_array = beam_search_decoder.gather_tree_from_array( array, parent_ids, sequence_length) - with self.test_session() as sess: + with self.cached_session() as sess: sorted_array = sess.run(sorted_array) expected_array = sess.run(expected_array) self.assertAllEqual(expected_array, sorted_array) @@ -170,7 +170,7 @@ class TestGatherTree(test.TestCase): sorted_array = beam_search_decoder.gather_tree_from_array( array, parent_ids, sequence_length) - with self.test_session() as sess: + with self.cached_session() as sess: sorted_array, expected_array = sess.run([sorted_array, expected_array]) self.assertAllEqual(expected_array, sorted_array) @@ -186,7 +186,7 @@ class TestArrayShapeChecks(test.TestCase): batch_size = array_ops.constant(batch_size) check_op = beam_search_decoder._check_batch_beam(t, batch_size, beam_width) # pylint: disable=protected-access - with self.test_session() as sess: + with self.cached_session() as sess: if is_valid: sess.run(check_op) else: @@ -220,7 +220,7 @@ class TestEosMasking(test.TestCase): masked = beam_search_decoder._mask_probs(probs, eos_token, previously_finished) - with self.test_session() as sess: + with self.cached_session() as sess: probs = sess.run(probs) masked = sess.run(masked) @@ -283,7 +283,7 @@ class TestBeamStep(test.TestCase): end_token=self.end_token, length_penalty_weight=self.length_penalty_weight) - with self.test_session() as sess: + with self.cached_session() as sess: outputs_, next_state_, state_, log_probs_ = sess.run( [outputs, next_beam_state, beam_state, log_probs]) @@ -338,7 +338,7 @@ class TestBeamStep(test.TestCase): end_token=self.end_token, length_penalty_weight=self.length_penalty_weight) - with self.test_session() as sess: + with self.cached_session() as sess: outputs_, next_state_, state_, log_probs_ = sess.run( [outputs, next_beam_state, beam_state, log_probs]) @@ -436,7 +436,7 @@ class TestLargeBeamStep(test.TestCase): end_token=self.end_token, length_penalty_weight=self.length_penalty_weight) - with self.test_session() as sess: + with self.cached_session() as sess: outputs_, next_state_, _, _ = sess.run( [outputs, next_beam_state, beam_state, log_probs]) @@ -471,7 +471,7 @@ class BeamSearchDecoderTest(test.TestCase): output_layer = layers_core.Dense(vocab_size, use_bias=True, activation=None) beam_width = 3 - with self.test_session() as sess: + with self.cached_session() as sess: batch_size_tensor = constant_op.constant(batch_size) embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32) cell = rnn_cell.LSTMCell(cell_depth) diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py index 277c5b6ef7..9662a5780a 100644 --- a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py +++ b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py @@ -67,7 +67,7 @@ class GatherTreeTest(test.TestCase): parent_ids=parent_ids, max_sequence_lengths=max_sequence_lengths, end_token=end_token) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError( r"parent id -1 at \(batch, time, beam\) == \(0, 0, 1\)"): _ = beams.eval() |