diff options
author | 2018-08-22 15:02:24 -0700 | |
---|---|---|
committer | 2018-08-22 15:13:03 -0700 | |
commit | d44030304e4c0a50a3169ea06b528bd780780acb (patch) | |
tree | d3a2903a0f2360e37ceae93e46af7cfe62d32d4f /tensorflow/contrib/opt/python | |
parent | fb3bde1994d4ed7d6cb928326e8e2a1777930e5e (diff) |
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about:
* the fact that the session may be reused.
* the session is not closed even when doing a "with self.test_session()" statement.
PiperOrigin-RevId: 209836477
Diffstat (limited to 'tensorflow/contrib/opt/python')
12 files changed, 51 insertions, 51 deletions
diff --git a/tensorflow/contrib/opt/python/training/adamax_test.py b/tensorflow/contrib/opt/python/training/adamax_test.py index 5790d8a3f1..61d8b94eca 100644 --- a/tensorflow/contrib/opt/python/training/adamax_test.py +++ b/tensorflow/contrib/opt/python/training/adamax_test.py @@ -74,7 +74,7 @@ class AdaMaxOptimizerTest(test.TestCase): def doTestSparse(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots() @@ -142,7 +142,7 @@ class AdaMaxOptimizerTest(test.TestCase): def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable( @@ -233,7 +233,7 @@ class AdaMaxOptimizerTest(test.TestCase): opt.get_slot(var=var0, name="m").name) def testBasic(self): - with self.test_session(): + with self.cached_session(): self.doTestBasic(use_resource=False) @test_util.run_in_graph_and_eager_modes(reset_test=True) @@ -242,7 +242,7 @@ class AdaMaxOptimizerTest(test.TestCase): def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -278,7 +278,7 @@ class AdaMaxOptimizerTest(test.TestCase): def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) diff --git a/tensorflow/contrib/opt/python/training/external_optimizer_test.py b/tensorflow/contrib/opt/python/training/external_optimizer_test.py index 953586ee70..9997103016 100644 --- a/tensorflow/contrib/opt/python/training/external_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/external_optimizer_test.py @@ -85,7 +85,7 @@ class ExternalOptimizerInterfaceTest(TestCase): optimizer = MockOptimizerInterface(loss) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) @@ -107,7 +107,7 @@ class ExternalOptimizerInterfaceTest(TestCase): optimizer = MockOptimizerInterface(loss) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) initial_vector_val = sess.run(vector) @@ -164,7 +164,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface( self._objective(x), method=method, options=options) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) @@ -176,7 +176,7 @@ class ScipyOptimizerInterfaceTest(TestCase): x = variables.Variable(array_ops.zeros(dimension)) optimizer = external_optimizer.ScipyOptimizerInterface(self._objective(x)) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) @@ -242,7 +242,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface( loss, equalities=equalities, inequalities=inequalities, method='SLSQP') - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) self.assertAllClose(np.ones(2), sess.run(vector)) @@ -260,7 +260,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface( loss, var_to_bounds=var_to_bounds) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) self.assertAllClose(np.ones(2), sess.run(vector)) @@ -277,7 +277,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface( loss, var_to_bounds=var_to_bounds) - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) self.assertAllClose([0., 2.], sess.run(vector)) @@ -293,7 +293,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface( loss, method='SLSQP') - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) method = optimizer.optimizer_kwargs.get('method') @@ -312,7 +312,7 @@ class ScipyOptimizerInterfaceTest(TestCase): optimizer = external_optimizer.ScipyOptimizerInterface(loss, method='SLSQP') - with self.test_session() as sess: + with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) initial_vector_val = sess.run(vector) diff --git a/tensorflow/contrib/opt/python/training/ggt_test.py b/tensorflow/contrib/opt/python/training/ggt_test.py index 1d2a79957b..1775edabb3 100644 --- a/tensorflow/contrib/opt/python/training/ggt_test.py +++ b/tensorflow/contrib/opt/python/training/ggt_test.py @@ -171,7 +171,7 @@ class GGTOptimizerTest(test.TestCase): self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testBasic(self): - with self.test_session(): + with self.cached_session(): self.doTestBasic(use_resource=False) @test_util.run_in_graph_and_eager_modes(reset_test=True) diff --git a/tensorflow/contrib/opt/python/training/lars_optimizer_test.py b/tensorflow/contrib/opt/python/training/lars_optimizer_test.py index d94249b994..b76db763da 100644 --- a/tensorflow/contrib/opt/python/training/lars_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/lars_optimizer_test.py @@ -31,7 +31,7 @@ class LARSOptimizerTest(test.TestCase): def testLARSGradientOneStep(self): for _ in range(10): for dtype in [dtypes.float32, dtypes.float64]: - with self.test_session() as sess: + with self.cached_session() as sess: shape = [3, 3] var_np = np.ones(shape) grad_np = np.ones(shape) @@ -77,7 +77,7 @@ class LARSOptimizerTest(test.TestCase): def testLARSGradientMultiStep(self): for _ in range(10): for dtype in [dtypes.float32, dtypes.float64]: - with self.test_session() as sess: + with self.cached_session() as sess: shape = [3, 3] var_np = np.ones(shape) grad_np = np.ones(shape) diff --git a/tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py b/tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py index a16857db7d..dc4c462ce4 100644 --- a/tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py @@ -53,7 +53,7 @@ class AdamOptimizerTest(test.TestCase): def testSparse(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -109,7 +109,7 @@ class AdamOptimizerTest(test.TestCase): def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable( diff --git a/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py b/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py index d15716f6f6..f22e724528 100644 --- a/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/moving_average_optimizer_test.py @@ -165,7 +165,7 @@ class MovingAverageOptimizerTest(test.TestCase): self.assertLess(avg_val1[i], orig_val1[i]) def testFailWhenSaverCreatedBeforeInitialized(self): - with self.test_session(): + with self.cached_session(): var = variables.Variable([1.0], name='var', dtype=dtypes.float32) opt = moving_average_optimizer.MovingAverageOptimizer( gradient_descent.GradientDescentOptimizer(learning_rate=2.0)) @@ -187,7 +187,7 @@ class MovingAverageOptimizerTest(test.TestCase): self.apply_gradients_called = True return super(WrapperOptimizer, self).apply_gradients(*args, **kwargs) - with self.test_session() as sess: + with self.cached_session() as sess: var = variables.Variable([1.2], name='var', dtype=dtypes.float32) loss = var ** 2 wrapper_opt = WrapperOptimizer(learning_rate=2.0) diff --git a/tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py b/tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py index 618d8eb18d..904aa9ab13 100644 --- a/tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py +++ b/tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py @@ -34,7 +34,7 @@ class MultitaskOptimizerWrapperTest(test.TestCase): """ def testWrapper(self): - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32) var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32) grads0 = constant_op.constant([0.1, 0.1], dtype=dtypes.float32) @@ -92,7 +92,7 @@ class MultitaskOptimizerWrapperTest(test.TestCase): self.evaluate(slot1)) def testGradientClipping(self): - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32) var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32) var2 = variables.Variable([3.0, 4.0], dtype=dtypes.float32) diff --git a/tensorflow/contrib/opt/python/training/nadam_optimizer_test.py b/tensorflow/contrib/opt/python/training/nadam_optimizer_test.py index 825c08a09a..85e05ce71c 100644 --- a/tensorflow/contrib/opt/python/training/nadam_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/nadam_optimizer_test.py @@ -53,7 +53,7 @@ class NadamOptimizerTest(test.TestCase): def doTestSparse(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) @@ -106,7 +106,7 @@ class NadamOptimizerTest(test.TestCase): def doTestBasic(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) diff --git a/tensorflow/contrib/opt/python/training/reg_adagrad_optimizer_test.py b/tensorflow/contrib/opt/python/training/reg_adagrad_optimizer_test.py index ea56e1646a..c09e2ac76d 100644 --- a/tensorflow/contrib/opt/python/training/reg_adagrad_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/reg_adagrad_optimizer_test.py @@ -36,7 +36,7 @@ class RegAdagradOptimizerTest(test.TestCase): def doTestBasic(self, use_locking=False, use_resource=False): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): if use_resource: var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) @@ -73,7 +73,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = resource_variable_ops.ResourceVariable( [[1.0, 2.0], [3.0, 4.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) @@ -92,7 +92,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testTensorLearningRate(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) @@ -116,7 +116,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSparseBasic(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) grads0 = ops.IndexedSlices( @@ -144,7 +144,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSparseRepeatedIndices(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): repeated_index_update_var = variables.Variable( [[1.0], [2.0]], dtype=dtype) aggregated_update_var = variables.Variable([[1.0], [2.0]], dtype=dtype) @@ -170,7 +170,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSparseRepeatedIndicesResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var_repeated = resource_variable_ops.ResourceVariable( [1.0, 2.0], dtype=dtype) loss_repeated = math_ops.reduce_sum( @@ -194,7 +194,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSparseStability(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): shape = [1, 6] var0 = variables.Variable( [[ @@ -230,7 +230,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSharing(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) @@ -263,7 +263,7 @@ class RegAdagradOptimizerTest(test.TestCase): np.array([2.715679168701172, 3.715679168701172]), var1.eval()) def testDynamicShapeVariable_Ok(self): - with self.test_session(): + with self.cached_session(): v = variable_scope.get_variable( "v", initializer=constant_op.constant(1.), validate_shape=False) self.assertFalse(v.shape.is_fully_defined()) @@ -274,7 +274,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSkipUpdatingSlots(self): iav = 0.130005 # A value that works with float16 for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) @@ -306,7 +306,7 @@ class RegAdagradOptimizerTest(test.TestCase): def testSparseSkipUpdatingSlots(self): iav = 0.130005 # A value that works with float16 for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: - with self.test_session(): + with self.cached_session(): var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) grads0 = ops.IndexedSlices( diff --git a/tensorflow/contrib/opt/python/training/shampoo_test.py b/tensorflow/contrib/opt/python/training/shampoo_test.py index 2e0a202ae2..b3688ab181 100644 --- a/tensorflow/contrib/opt/python/training/shampoo_test.py +++ b/tensorflow/contrib/opt/python/training/shampoo_test.py @@ -52,7 +52,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): grad_np = np.random.rand(size) grad_np_2 = np.random.rand(size) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -103,7 +103,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): grad_np = np.random.rand(size[0], size[1]) grad_np_2 = np.random.rand(size[0], size[1]) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -162,7 +162,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): grad_np = np.random.rand(size[0], size[1], size[2]) grad_np_2 = np.random.rand(size[0], size[1], size[2]) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -240,7 +240,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): grad_np = np.random.rand(size) grad_np_2 = np.random.rand(size) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -294,7 +294,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): grad_np = np.random.rand(size[0], size[1]) grad_np_2 = np.random.rand(size[0], size[1]) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -365,7 +365,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): replace=False)) grad_np_2 = np.random.rand(sample_size_2, size[1]) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -445,7 +445,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): replace=False)) grad_np = np.random.rand(sample_size, size[1], size[2]) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -512,7 +512,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): gbar_decay = 0.9 gbar_weight = 0.1 - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -601,7 +601,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): mat_g3_a = np.eye(size[2]) mat_g3 = np.zeros_like(mat_g3_a) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( @@ -672,7 +672,7 @@ class ShampooTest(test.TestCase, parameterized.TestCase): mat_g3_a = np.eye(size[2]) mat_g3 = np.zeros_like(mat_g3_a) - with self.test_session() as sess: + with self.cached_session() as sess: global_step = variables.Variable( 0, dtype=dtypes.int64, use_resource=use_resource_var) var = variables.Variable( diff --git a/tensorflow/contrib/opt/python/training/sign_decay_test.py b/tensorflow/contrib/opt/python/training/sign_decay_test.py index c31cb924ea..3a84789afd 100644 --- a/tensorflow/contrib/opt/python/training/sign_decay_test.py +++ b/tensorflow/contrib/opt/python/training/sign_decay_test.py @@ -66,7 +66,7 @@ class SignDecaysTest(test.TestCase): linear_decay_fn = sign_decay.get_linear_decay_fn(num_training_steps) for step in range(0, 1000, 100): - with self.test_session(): + with self.cached_session(): tf_decayed = linear_decay_fn(step).eval() py_decayed = py_linear_decay_fn(num_training_steps)(step) self.assertAlmostEqual(tf_decayed, py_decayed, places=4) @@ -78,7 +78,7 @@ class SignDecaysTest(test.TestCase): num_training_steps, num_periods=5, zero_after=2) for step in range(0, 1000, 100): - with self.test_session(): + with self.cached_session(): tf_decayed = cosine_decay_fn(step).eval() py_decayed = py_cosine_decay_fn(num_training_steps)(step) self.assertAlmostEqual(tf_decayed, py_decayed, places=4) @@ -95,7 +95,7 @@ class SignDecaysTest(test.TestCase): num_training_steps, num_periods=5, zero_after=2) for step in range(0, 1000, 100): - with self.test_session(): + with self.cached_session(): tf_decayed = restart_decay_fn(step).eval() py_decayed = py_restart_decay_fn(num_training_steps)(step) self.assertAlmostEqual(tf_decayed, py_decayed, places=4) diff --git a/tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py b/tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py index fdda86b0b5..ff0ea8d766 100644 --- a/tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py +++ b/tensorflow/contrib/opt/python/training/variable_clipping_optimizer_test.py @@ -158,7 +158,7 @@ class VariableClippingOptimizerTest(test.TestCase): def testDenseLocal(self): for dtype in [dtypes.float32, dtypes.float64, dtypes.half]: - with self.test_session(): + with self.cached_session(): var0, var1, update_op = self._setupDense(False, dtype) self._assertDenseCorrect(var0, var1, update_op) @@ -171,7 +171,7 @@ class VariableClippingOptimizerTest(test.TestCase): def testSparseLocal(self): for dtype in [dtypes.float64, dtypes.float32, dtypes.half]: - with self.test_session(): + with self.cached_session(): var0, var1, update_op = self._setupSparse(False, dtype) self._assertSparseCorrect(var0, var1, update_op) |