diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2018-08-21 19:10:08 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-08-21 19:16:45 -0700 |
commit | 59fa06466894daf708f40368cd2ee56ed4d160c9 (patch) | |
tree | 4b25541aac1fc9eaf2f49b5ee7d47b3c2cde508f /tensorflow/contrib/distributions | |
parent | 754fffb399efa6204bb8aae51ce99042cb2ab18e (diff) |
Move from deprecated self.test_session() to self.cached_session().
self.test_session() has been deprecated in 9962eb5e84b15e309410071b06c2ed2d6148ed44 as its name confuses readers of the test. Moving to cached_session() instead which is more explicit about:
* the fact that the session may be reused.
* the session is not closed even when doing a "with self.test_session()" statement.
PiperOrigin-RevId: 209700663
Diffstat (limited to 'tensorflow/contrib/distributions')
39 files changed, 462 insertions, 462 deletions
diff --git a/tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py b/tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py index 0928dc3f35..a22d4d825b 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py @@ -53,7 +53,7 @@ class AutogressiveTest(test_util.VectorDistributionTestHelpers, test.TestCase): def testSampleAndLogProbConsistency(self): batch_shape = [] event_size = 2 - with self.test_session() as sess: + with self.cached_session() as sess: batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0) sample0 = array_ops.zeros(batch_event_shape) affine = Affine(scale_tril=self._random_scale_tril(event_size)) @@ -67,7 +67,7 @@ class AutogressiveTest(test_util.VectorDistributionTestHelpers, test.TestCase): sample_shape = np.int32([4, 5]) batch_shape = np.int32([]) event_size = np.int32(2) - with self.test_session() as sess: + with self.cached_session() as sess: batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0) sample0 = array_ops.zeros(batch_event_shape) affine = Affine(scale_tril=self._random_scale_tril(event_size)) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py b/tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py index f2bb2d3325..62623deccd 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py @@ -76,7 +76,7 @@ class _BatchReshapeTest(object): wishart.log_prob(x), expected_log_prob_shape) actual_log_prob = reshape_wishart.log_prob(expected_sample) - with self.test_session() as sess: + with self.cached_session() as sess: [ batch_shape_, event_shape_, @@ -132,7 +132,7 @@ class _BatchReshapeTest(object): wishart.variance(), expected_matrix_stat_shape) actual_variance = reshape_wishart.variance() - with self.test_session() as sess: + with self.cached_session() as sess: [ expected_entropy_, actual_entropy_, expected_mean_, actual_mean_, @@ -202,7 +202,7 @@ class _BatchReshapeTest(object): normal.log_prob(x), expected_log_prob_shape) actual_log_prob = reshape_normal.log_prob(expected_sample) - with self.test_session() as sess: + with self.cached_session() as sess: [ batch_shape_, event_shape_, @@ -255,7 +255,7 @@ class _BatchReshapeTest(object): normal.variance(), expected_scalar_stat_shape) actual_variance = reshape_normal.variance() - with self.test_session() as sess: + with self.cached_session() as sess: [ expected_entropy_, actual_entropy_, expected_mean_, actual_mean_, @@ -323,7 +323,7 @@ class _BatchReshapeTest(object): mvn.log_prob(x), expected_log_prob_shape) actual_log_prob = reshape_mvn.log_prob(expected_sample) - with self.test_session() as sess: + with self.cached_session() as sess: [ batch_shape_, event_shape_, @@ -385,7 +385,7 @@ class _BatchReshapeTest(object): mvn.covariance(), expected_matrix_stat_shape) actual_covariance = reshape_mvn.covariance() - with self.test_session() as sess: + with self.cached_session() as sess: [ expected_entropy_, actual_entropy_, expected_mean_, actual_mean_, @@ -447,7 +447,7 @@ class _BatchReshapeTest(object): validate_args=True) else: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError(r"Shape sizes do not match."): batch_reshape_lib.BatchReshape( distribution=mvn, @@ -482,7 +482,7 @@ class _BatchReshapeTest(object): validate_args=True) else: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError(r".*must be >=-1.*"): batch_reshape_lib.BatchReshape( distribution=mvn, @@ -512,7 +512,7 @@ class _BatchReshapeTest(object): validate_args=True) else: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError(r".*must be a vector.*"): batch_reshape_lib.BatchReshape( distribution=mvn, @@ -548,11 +548,11 @@ class _BatchReshapeTest(object): return with self.assertRaisesOpError("too few batch and event dims"): - with self.test_session(): + with self.cached_session(): poisson_141_reshaped.log_prob(x_4).eval() with self.assertRaisesOpError("unexpected batch and event shape"): - with self.test_session(): + with self.cached_session(): poisson_141_reshaped.log_prob(x_114).eval() diff --git a/tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py b/tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py index d30f6e418d..c317393fbc 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py @@ -28,7 +28,7 @@ from tensorflow.python.platform import test class BinomialTest(test.TestCase): def testSimpleShapes(self): - with self.test_session(): + with self.cached_session(): p = np.float32(np.random.beta(1, 1)) binom = binomial.Binomial(total_count=1., probs=p) self.assertAllEqual([], binom.event_shape_tensor().eval()) @@ -37,7 +37,7 @@ class BinomialTest(test.TestCase): self.assertEqual(tensor_shape.TensorShape([]), binom.batch_shape) def testComplexShapes(self): - with self.test_session(): + with self.cached_session(): p = np.random.beta(1, 1, size=(3, 2)).astype(np.float32) n = [[3., 2], [4, 5], [6, 7]] binom = binomial.Binomial(total_count=n, probs=p) @@ -50,14 +50,14 @@ class BinomialTest(test.TestCase): def testNProperty(self): p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]] n = [[3.], [4]] - with self.test_session(): + with self.cached_session(): binom = binomial.Binomial(total_count=n, probs=p) self.assertEqual((2, 1), binom.total_count.get_shape()) self.assertAllClose(n, binom.total_count.eval()) def testPProperty(self): p = [[0.1, 0.2, 0.7]] - with self.test_session(): + with self.cached_session(): binom = binomial.Binomial(total_count=3., probs=p) self.assertEqual((1, 3), binom.probs.get_shape()) self.assertEqual((1, 3), binom.logits.get_shape()) @@ -65,7 +65,7 @@ class BinomialTest(test.TestCase): def testLogitsProperty(self): logits = [[0., 9., -0.5]] - with self.test_session(): + with self.cached_session(): binom = binomial.Binomial(total_count=3., logits=logits) self.assertEqual((1, 3), binom.probs.get_shape()) self.assertEqual((1, 3), binom.logits.get_shape()) @@ -74,7 +74,7 @@ class BinomialTest(test.TestCase): def testPmfAndCdfNandCountsAgree(self): p = [[0.1, 0.2, 0.7]] n = [[5.]] - with self.test_session(): + with self.cached_session(): binom = binomial.Binomial(total_count=n, probs=p, validate_args=True) binom.prob([2., 3, 2]).eval() binom.prob([3., 1, 2]).eval() @@ -92,7 +92,7 @@ class BinomialTest(test.TestCase): def testPmfAndCdfNonIntegerCounts(self): p = [[0.1, 0.2, 0.7]] n = [[5.]] - with self.test_session(): + with self.cached_session(): # No errors with integer n. binom = binomial.Binomial(total_count=n, probs=p, validate_args=True) binom.prob([2., 3, 2]).eval() @@ -116,7 +116,7 @@ class BinomialTest(test.TestCase): binom.cdf([1.0, 2.5, 1.5]).eval() def testPmfAndCdfBothZeroBatches(self): - with self.test_session(): + with self.cached_session(): # Both zero-batches. No broadcast p = 0.5 counts = 1. @@ -129,7 +129,7 @@ class BinomialTest(test.TestCase): self.assertEqual((), cdf.get_shape()) def testPmfAndCdfBothZeroBatchesNontrivialN(self): - with self.test_session(): + with self.cached_session(): # Both zero-batches. No broadcast p = 0.1 counts = 3. @@ -142,7 +142,7 @@ class BinomialTest(test.TestCase): self.assertEqual((), cdf.get_shape()) def testPmfAndCdfPStretchedInBroadcastWhenSameRank(self): - with self.test_session(): + with self.cached_session(): p = [[0.1, 0.9]] counts = [[1., 2.]] binom = binomial.Binomial(total_count=3., probs=p) @@ -154,7 +154,7 @@ class BinomialTest(test.TestCase): self.assertEqual((1, 2), cdf.get_shape()) def testPmfAndCdfPStretchedInBroadcastWhenLowerRank(self): - with self.test_session(): + with self.cached_session(): p = [0.1, 0.4] counts = [[1.], [0.]] binom = binomial.Binomial(total_count=1., probs=p) @@ -166,7 +166,7 @@ class BinomialTest(test.TestCase): self.assertEqual((2, 2), cdf.get_shape()) def testBinomialMean(self): - with self.test_session(): + with self.cached_session(): n = 5. p = [0.1, 0.2, 0.7] binom = binomial.Binomial(total_count=n, probs=p) @@ -175,7 +175,7 @@ class BinomialTest(test.TestCase): self.assertAllClose(expected_means, binom.mean().eval()) def testBinomialVariance(self): - with self.test_session(): + with self.cached_session(): n = 5. p = [0.1, 0.2, 0.7] binom = binomial.Binomial(total_count=n, probs=p) @@ -184,7 +184,7 @@ class BinomialTest(test.TestCase): self.assertAllClose(expected_variances, binom.variance().eval()) def testBinomialMode(self): - with self.test_session(): + with self.cached_session(): n = 5. p = [0.1, 0.2, 0.7] binom = binomial.Binomial(total_count=n, probs=p) @@ -193,7 +193,7 @@ class BinomialTest(test.TestCase): self.assertAllClose(expected_modes, binom.mode().eval()) def testBinomialMultipleMode(self): - with self.test_session(): + with self.cached_session(): n = 9. p = [0.1, 0.2, 0.7] binom = binomial.Binomial(total_count=n, probs=p) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py b/tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py index afa6ed9255..4411d6f461 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py @@ -56,7 +56,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual(all_true, is_finite) def _testParamShapes(self, sample_shape, expected): - with self.test_session(): + with self.cached_session(): param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape) loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"] self.assertAllEqual(expected, loc_shape.eval()) @@ -85,7 +85,7 @@ class CauchyTest(test.TestCase): tensor_shape.TensorShape(sample_shape), sample_shape) def testCauchyLogPDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 loc = constant_op.constant([3.0] * batch_size) scale = constant_op.constant([np.sqrt(10.0)] * batch_size) @@ -112,7 +112,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pdf), pdf.eval()) def testCauchyLogPDFMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 loc = constant_op.constant([[3.0, -3.0]] * batch_size) scale = constant_op.constant( @@ -144,7 +144,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pdf), pdf_values) def testCauchyCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 loc = self._rng.randn(batch_size) scale = self._rng.rand(batch_size) + 1.0 @@ -162,7 +162,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(expected_cdf, cdf.eval(), atol=0) def testCauchySurvivalFunction(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 loc = self._rng.randn(batch_size) scale = self._rng.rand(batch_size) + 1.0 @@ -181,7 +181,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(expected_sf, sf.eval(), atol=0) def testCauchyLogCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 loc = self._rng.randn(batch_size) scale = self._rng.rand(batch_size) + 1.0 @@ -221,7 +221,7 @@ class CauchyTest(test.TestCase): self.assertAllFinite(grads[1]) def testCauchyLogSurvivalFunction(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 loc = self._rng.randn(batch_size) scale = self._rng.rand(batch_size) + 1.0 @@ -241,7 +241,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5) def testCauchyEntropy(self): - with self.test_session(): + with self.cached_session(): loc = np.array([1.0, 1.0, 1.0]) scale = np.array([[1.0, 2.0, 3.0]]) cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale) @@ -259,7 +259,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(expected_entropy, entropy.eval()) def testCauchyMode(self): - with self.test_session(): + with self.cached_session(): # Mu will be broadcast to [7, 7, 7]. loc = [7.] scale = [11., 12., 13.] @@ -270,7 +270,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual([7., 7, 7], cauchy.mode().eval()) def testCauchyMean(self): - with self.test_session(): + with self.cached_session(): loc = [1., 2., 3.] scale = [7.] cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale) @@ -279,7 +279,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual([np.nan] * 3, cauchy.mean().eval()) def testCauchyNanMean(self): - with self.test_session(): + with self.cached_session(): loc = [1., 2., 3.] scale = [7.] cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False) @@ -288,7 +288,7 @@ class CauchyTest(test.TestCase): cauchy.mean().eval() def testCauchyQuantile(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 loc = self._rng.randn(batch_size) scale = self._rng.rand(batch_size) + 1.0 @@ -308,7 +308,7 @@ class CauchyTest(test.TestCase): self.assertAllClose(expected_x, x.eval(), atol=0.) def testCauchyVariance(self): - with self.test_session(): + with self.cached_session(): # scale will be broadcast to [7, 7, 7] loc = [1., 2., 3.] scale = [7.] @@ -318,7 +318,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual([np.nan] * 3, cauchy.variance().eval()) def testCauchyNanVariance(self): - with self.test_session(): + with self.cached_session(): # scale will be broadcast to [7, 7, 7] loc = [1., 2., 3.] scale = [7.] @@ -328,7 +328,7 @@ class CauchyTest(test.TestCase): cauchy.variance().eval() def testCauchyStandardDeviation(self): - with self.test_session(): + with self.cached_session(): # scale will be broadcast to [7, 7, 7] loc = [1., 2., 3.] scale = [7.] @@ -338,7 +338,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval()) def testCauchyNanStandardDeviation(self): - with self.test_session(): + with self.cached_session(): # scale will be broadcast to [7, 7, 7] loc = [1., 2., 3.] scale = [7.] @@ -348,7 +348,7 @@ class CauchyTest(test.TestCase): cauchy.stddev().eval() def testCauchySample(self): - with self.test_session(): + with self.cached_session(): loc = constant_op.constant(3.0) scale = constant_op.constant(1.0) loc_v = 3.0 @@ -373,7 +373,7 @@ class CauchyTest(test.TestCase): self.assertAllEqual(expected_shape, sample_values.shape) def testCauchySampleMultiDimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 2 loc = constant_op.constant([[3.0, -3.0]] * batch_size) scale = constant_op.constant([[0.5, 1.0]] * batch_size) @@ -399,13 +399,13 @@ class CauchyTest(test.TestCase): self.assertAllEqual(expected_shape, sample_values.shape) def testCauchyNegativeLocFails(self): - with self.test_session(): + with self.cached_session(): cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True) with self.assertRaisesOpError("Condition x > 0 did not hold"): cauchy.mode().eval() def testCauchyShape(self): - with self.test_session(): + with self.cached_session(): loc = constant_op.constant([-3.0] * 5) scale = constant_op.constant(11.0) cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale) @@ -420,7 +420,7 @@ class CauchyTest(test.TestCase): scale = array_ops.placeholder(dtype=dtypes.float32) cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale) - with self.test_session() as sess: + with self.cached_session() as sess: # get_batch_shape should return an "<unknown>" tensor. self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None)) self.assertEqual(cauchy.event_shape, ()) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py b/tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py index 75d48791ec..3b5a6aa90c 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py @@ -29,7 +29,7 @@ from tensorflow.python.platform import test class Chi2Test(test.TestCase): def testChi2LogPDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 df = constant_op.constant([2.0] * batch_size, dtype=np.float64) df_v = 2.0 @@ -46,7 +46,7 @@ class Chi2Test(test.TestCase): self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) def testChi2CDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 df = constant_op.constant([2.0] * batch_size, dtype=np.float64) df_v = 2.0 @@ -60,7 +60,7 @@ class Chi2Test(test.TestCase): self.assertAllClose(cdf.eval(), expected_cdf) def testChi2Mean(self): - with self.test_session(): + with self.cached_session(): df_v = np.array([1., 3, 5], dtype=np.float64) expected_mean = stats.chi2.mean(df_v) chi2 = chi2_lib.Chi2(df=df_v) @@ -68,7 +68,7 @@ class Chi2Test(test.TestCase): self.assertAllClose(chi2.mean().eval(), expected_mean) def testChi2Variance(self): - with self.test_session(): + with self.cached_session(): df_v = np.array([1., 3, 5], np.float64) expected_variances = stats.chi2.var(df_v) chi2 = chi2_lib.Chi2(df=df_v) @@ -76,7 +76,7 @@ class Chi2Test(test.TestCase): self.assertAllClose(chi2.variance().eval(), expected_variances) def testChi2Entropy(self): - with self.test_session(): + with self.cached_session(): df_v = np.array([1., 3, 5], dtype=np.float64) expected_entropy = stats.chi2.entropy(df_v) chi2 = chi2_lib.Chi2(df=df_v) @@ -84,7 +84,7 @@ class Chi2Test(test.TestCase): self.assertAllClose(chi2.entropy().eval(), expected_entropy) def testChi2WithAbsDf(self): - with self.test_session(): + with self.cached_session(): df_v = np.array([-1.3, -3.2, 5], dtype=np.float64) chi2 = chi2_lib.Chi2WithAbsDf(df=df_v) self.assertAllClose( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py b/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py index 4e8989b6c2..7e63b5ca5f 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py @@ -69,7 +69,7 @@ class ConditionalTransformedDistributionTest( return ds.ConditionalTransformedDistribution def testConditioning(self): - with self.test_session(): + with self.cached_session(): conditional_normal = ds.ConditionalTransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=_ChooseLocation(loc=[-100., 100.])) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py b/tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py index 200310bc41..36fc7a70c8 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py @@ -29,7 +29,7 @@ rng = np.random.RandomState(0) class DeterministicTest(test.TestCase): def testShape(self): - with self.test_session(): + with self.cached_session(): loc = rng.rand(2, 3, 4) deterministic = deterministic_lib.Deterministic(loc) @@ -42,20 +42,20 @@ class DeterministicTest(test.TestCase): loc = rng.rand(2, 3, 4).astype(np.float32) deterministic = deterministic_lib.Deterministic( loc, atol=-1, validate_args=True) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x >= 0"): deterministic.prob(0.).eval() def testProbWithNoBatchDimsIntegerType(self): deterministic = deterministic_lib.Deterministic(0) - with self.test_session(): + with self.cached_session(): self.assertAllClose(1, deterministic.prob(0).eval()) self.assertAllClose(0, deterministic.prob(2).eval()) self.assertAllClose([1, 0], deterministic.prob([0, 2]).eval()) def testProbWithNoBatchDims(self): deterministic = deterministic_lib.Deterministic(0.) - with self.test_session(): + with self.cached_session(): self.assertAllClose(1., deterministic.prob(0.).eval()) self.assertAllClose(0., deterministic.prob(2.).eval()) self.assertAllClose([1., 0.], deterministic.prob([0., 2.]).eval()) @@ -65,7 +65,7 @@ class DeterministicTest(test.TestCase): x = [[0., 1.1], [1.99, 3.]] deterministic = deterministic_lib.Deterministic(loc) expected_prob = [[1., 0.], [0., 1.]] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((2, 2), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -75,7 +75,7 @@ class DeterministicTest(test.TestCase): x = [[0., 1.1], [1.99, 3.]] deterministic = deterministic_lib.Deterministic(loc, atol=0.05) expected_prob = [[1., 0.], [1., 1.]] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((2, 2), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -85,7 +85,7 @@ class DeterministicTest(test.TestCase): x = [[0, 2], [4, 2]] deterministic = deterministic_lib.Deterministic(loc, atol=1) expected_prob = [[1, 1], [0, 1]] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((2, 2), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -95,7 +95,7 @@ class DeterministicTest(test.TestCase): x = [[0., 1.1], [100.1, 103.]] deterministic = deterministic_lib.Deterministic(loc, rtol=0.01) expected_prob = [[1., 0.], [1., 0.]] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((2, 2), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -107,7 +107,7 @@ class DeterministicTest(test.TestCase): # Batch 1 will have rtol = 1 (100% slack allowed) deterministic = deterministic_lib.Deterministic(loc, rtol=[[0], [1]]) expected_prob = [[1, 0, 0], [1, 1, 0]] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((2, 3), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -117,7 +117,7 @@ class DeterministicTest(test.TestCase): x = [[-1., -0.1], [-0.01, 1.000001]] deterministic = deterministic_lib.Deterministic(loc) expected_cdf = [[0., 0.], [0., 1.]] - with self.test_session(): + with self.cached_session(): cdf = deterministic.cdf(x) self.assertAllEqual((2, 2), cdf.get_shape()) self.assertAllEqual(expected_cdf, cdf.eval()) @@ -127,7 +127,7 @@ class DeterministicTest(test.TestCase): x = [[-1., -0.1], [-0.01, 1.000001]] deterministic = deterministic_lib.Deterministic(loc, atol=0.05) expected_cdf = [[0., 0.], [1., 1.]] - with self.test_session(): + with self.cached_session(): cdf = deterministic.cdf(x) self.assertAllEqual((2, 2), cdf.get_shape()) self.assertAllEqual(expected_cdf, cdf.eval()) @@ -137,7 +137,7 @@ class DeterministicTest(test.TestCase): x = [[0.9, 1.], [99.9, 97]] deterministic = deterministic_lib.Deterministic(loc, rtol=0.01) expected_cdf = [[0., 1.], [1., 0.]] - with self.test_session(): + with self.cached_session(): cdf = deterministic.cdf(x) self.assertAllEqual((2, 2), cdf.get_shape()) self.assertAllEqual(expected_cdf, cdf.eval()) @@ -145,7 +145,7 @@ class DeterministicTest(test.TestCase): def testSampleNoBatchDims(self): deterministic = deterministic_lib.Deterministic(0.) for sample_shape in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample = deterministic.sample(sample_shape) self.assertAllEqual(sample_shape, sample.get_shape()) self.assertAllClose( @@ -154,7 +154,7 @@ class DeterministicTest(test.TestCase): def testSampleWithBatchDims(self): deterministic = deterministic_lib.Deterministic([0., 0.]) for sample_shape in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample = deterministic.sample(sample_shape) self.assertAllEqual(sample_shape + (2,), sample.get_shape()) self.assertAllClose( @@ -166,7 +166,7 @@ class DeterministicTest(test.TestCase): deterministic = deterministic_lib.Deterministic(loc) for sample_shape_ in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample_ = deterministic.sample(sample_shape).eval( feed_dict={loc: [0., 0.], sample_shape: sample_shape_}) @@ -176,7 +176,7 @@ class DeterministicTest(test.TestCase): def testEntropy(self): loc = np.array([-0.1, -3.2, 7.]) deterministic = deterministic_lib.Deterministic(loc=loc) - with self.test_session() as sess: + with self.cached_session() as sess: entropy_ = sess.run(deterministic.entropy()) self.assertAllEqual(np.zeros(3), entropy_) @@ -184,7 +184,7 @@ class DeterministicTest(test.TestCase): class VectorDeterministicTest(test.TestCase): def testShape(self): - with self.test_session(): + with self.cached_session(): loc = rng.rand(2, 3, 4) deterministic = deterministic_lib.VectorDeterministic(loc) @@ -197,7 +197,7 @@ class VectorDeterministicTest(test.TestCase): loc = rng.rand(2, 3, 4).astype(np.float32) deterministic = deterministic_lib.VectorDeterministic( loc, atol=-1, validate_args=True) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x >= 0"): deterministic.prob(loc).eval() @@ -205,14 +205,14 @@ class VectorDeterministicTest(test.TestCase): loc = rng.rand(2, 3, 4).astype(np.float32) deterministic = deterministic_lib.VectorDeterministic( loc, atol=-1, validate_args=True) - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, "must have rank at least 1"): deterministic.prob(0.).eval() def testProbVectorDeterministicWithNoBatchDims(self): # 0 batch of deterministics on R^1. deterministic = deterministic_lib.VectorDeterministic([0.]) - with self.test_session(): + with self.cached_session(): self.assertAllClose(1., deterministic.prob([0.]).eval()) self.assertAllClose(0., deterministic.prob([2.]).eval()) self.assertAllClose([1., 0.], deterministic.prob([[0.], [2.]]).eval()) @@ -223,7 +223,7 @@ class VectorDeterministicTest(test.TestCase): x = [[0., 1.], [1.9, 3.], [3.99, 5.]] deterministic = deterministic_lib.VectorDeterministic(loc) expected_prob = [1., 0., 0.] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((3,), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -234,7 +234,7 @@ class VectorDeterministicTest(test.TestCase): x = [[0., 1.], [1.9, 3.], [3.99, 5.]] deterministic = deterministic_lib.VectorDeterministic(loc, atol=0.05) expected_prob = [1., 0., 1.] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((3,), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -245,7 +245,7 @@ class VectorDeterministicTest(test.TestCase): x = [[0., 1.], [0.9, 1.], [99.9, 100.1]] deterministic = deterministic_lib.VectorDeterministic(loc, rtol=0.01) expected_prob = [1., 0., 1.] - with self.test_session(): + with self.cached_session(): prob = deterministic.prob(x) self.assertAllEqual((3,), prob.get_shape()) self.assertAllEqual(expected_prob, prob.eval()) @@ -254,7 +254,7 @@ class VectorDeterministicTest(test.TestCase): # 0 batch of deterministics on R^0. deterministic = deterministic_lib.VectorDeterministic( [], validate_args=True) - with self.test_session(): + with self.cached_session(): self.assertAllClose(1., deterministic.prob([]).eval()) def testProbVectorDeterministicWithNoBatchDimsOnRZeroRaisesIfXNotInSameRk( @@ -262,14 +262,14 @@ class VectorDeterministicTest(test.TestCase): # 0 batch of deterministics on R^0. deterministic = deterministic_lib.VectorDeterministic( [], validate_args=True) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("not defined in the same space"): deterministic.prob([1.]).eval() def testSampleNoBatchDims(self): deterministic = deterministic_lib.VectorDeterministic([0.]) for sample_shape in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample = deterministic.sample(sample_shape) self.assertAllEqual(sample_shape + (1,), sample.get_shape()) self.assertAllClose( @@ -278,7 +278,7 @@ class VectorDeterministicTest(test.TestCase): def testSampleWithBatchDims(self): deterministic = deterministic_lib.VectorDeterministic([[0.], [0.]]) for sample_shape in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample = deterministic.sample(sample_shape) self.assertAllEqual(sample_shape + (2, 1), sample.get_shape()) self.assertAllClose( @@ -290,7 +290,7 @@ class VectorDeterministicTest(test.TestCase): deterministic = deterministic_lib.VectorDeterministic(loc) for sample_shape_ in [(), (4,)]: - with self.test_session(): + with self.cached_session(): sample_ = deterministic.sample(sample_shape).eval( feed_dict={loc: [[0.], [0.]], sample_shape: sample_shape_}) @@ -300,7 +300,7 @@ class VectorDeterministicTest(test.TestCase): def testEntropy(self): loc = np.array([[8.3, 1.2, 3.3], [-0.1, -3.2, 7.]]) deterministic = deterministic_lib.VectorDeterministic(loc=loc) - with self.test_session() as sess: + with self.cached_session() as sess: entropy_ = sess.run(deterministic.entropy()) self.assertAllEqual(np.zeros(2), entropy_) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py b/tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py index f42feae25d..f073f51a69 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py @@ -47,7 +47,7 @@ class DistributionTest(test.TestCase): ] sample_shapes = [(), (10,), (10, 20, 30)] - with self.test_session(): + with self.cached_session(): for cls in classes: for sample_shape in sample_shapes: param_shapes = cls.param_shapes(sample_shape) @@ -62,7 +62,7 @@ class DistributionTest(test.TestCase): self.assertEqual(dist.parameters, dist_copy.parameters) def testCopyExtraArgs(self): - with self.test_session(): + with self.cached_session(): # Note: we cannot easily test all distributions since each requires # different initialization arguments. We therefore spot test a few. normal = tfd.Normal(loc=1., scale=2., validate_args=True) @@ -72,7 +72,7 @@ class DistributionTest(test.TestCase): self.assertEqual(wishart.parameters, wishart.copy().parameters) def testCopyOverride(self): - with self.test_session(): + with self.cached_session(): normal = tfd.Normal(loc=1., scale=2., validate_args=True) unused_normal_copy = normal.copy(validate_args=False) base_params = normal.parameters.copy() @@ -82,7 +82,7 @@ class DistributionTest(test.TestCase): self.assertEqual(base_params, copy_params) def testIsScalar(self): - with self.test_session(): + with self.cached_session(): mu = 1. sigma = 2. @@ -152,7 +152,7 @@ class DistributionTest(test.TestCase): def testSampleShapeHints(self): fake_distribution = self._GetFakeDistribution() - with self.test_session(): + with self.cached_session(): # Make a new session since we're playing with static shapes. [And below.] x = array_ops.placeholder(dtype=dtypes.float32) dist = fake_distribution(batch_shape=[2, 3], event_shape=[5]) @@ -162,28 +162,28 @@ class DistributionTest(test.TestCase): # unknown values, ie, Dimension(None). self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list()) - with self.test_session(): + with self.cached_session(): x = array_ops.placeholder(dtype=dtypes.float32) dist = fake_distribution(batch_shape=[None, 3], event_shape=[5]) sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list()) - with self.test_session(): + with self.cached_session(): x = array_ops.placeholder(dtype=dtypes.float32) dist = fake_distribution(batch_shape=[None, 3], event_shape=[None]) sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list()) - with self.test_session(): + with self.cached_session(): x = array_ops.placeholder(dtype=dtypes.float32) dist = fake_distribution(batch_shape=None, event_shape=None) sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32) y = dist._set_sample_static_shape(x, sample_shape) self.assertTrue(y.get_shape().ndims is None) - with self.test_session(): + with self.cached_session(): x = array_ops.placeholder(dtype=dtypes.float32) dist = fake_distribution(batch_shape=[None, 3], event_shape=None) sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py b/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py index 181c46d2e5..f7b2efa7bc 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py @@ -100,7 +100,7 @@ class MakeTrilScaleTest(test.TestCase): def _testLegalInputs( self, loc=None, shape_hint=None, scale_params=None): for args in _powerset(scale_params.items()): - with self.test_session(): + with self.cached_session(): args = dict(args) scale_args = dict({ @@ -143,19 +143,19 @@ class MakeTrilScaleTest(test.TestCase): }) def testZeroTriU(self): - with self.test_session(): + with self.cached_session(): scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]]) self.assertAllClose([[1., 0], [1., 1.]], scale.to_dense().eval()) def testValidateArgs(self): - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("diagonal part must be non-zero"): scale = distribution_util.make_tril_scale( scale_tril=[[0., 1], [1., 1.]], validate_args=True) scale.to_dense().eval() def testAssertPositive(self): - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("diagonal part must be positive"): scale = distribution_util.make_tril_scale( scale_tril=[[-1., 1], [1., 1.]], @@ -169,7 +169,7 @@ class MakeDiagScaleTest(test.TestCase): def _testLegalInputs( self, loc=None, shape_hint=None, scale_params=None): for args in _powerset(scale_params.items()): - with self.test_session(): + with self.cached_session(): args = dict(args) scale_args = dict({ @@ -204,14 +204,14 @@ class MakeDiagScaleTest(test.TestCase): }) def testValidateArgs(self): - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("diagonal part must be non-zero"): scale = distribution_util.make_diag_scale( scale_diag=[[0., 1], [1., 1.]], validate_args=True) scale.to_dense().eval() def testAssertPositive(self): - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("diagonal part must be positive"): scale = distribution_util.make_diag_scale( scale_diag=[[-1., 1], [1., 1.]], @@ -241,7 +241,7 @@ class ShapesFromLocAndScaleTest(test.TestCase): loc = constant_op.constant(np.zeros((2, 3))) diag = array_ops.placeholder(dtypes.float64) scale = linear_operator_diag.LinearOperatorDiag(diag) - with self.test_session() as sess: + with self.cached_session() as sess: batch_shape, event_shape = sess.run( distribution_util.shapes_from_loc_and_scale(loc, scale), feed_dict={diag: np.ones((5, 1, 3))}) @@ -252,7 +252,7 @@ class ShapesFromLocAndScaleTest(test.TestCase): loc = array_ops.placeholder(dtypes.float64) diag = constant_op.constant(np.ones((5, 2, 3))) scale = linear_operator_diag.LinearOperatorDiag(diag) - with self.test_session(): + with self.cached_session(): batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale( loc, scale) # batch_shape depends on both args, and so is dynamic. Since loc did not @@ -266,7 +266,7 @@ class ShapesFromLocAndScaleTest(test.TestCase): loc = array_ops.placeholder(dtypes.float64) diag = array_ops.placeholder(dtypes.float64) scale = linear_operator_diag.LinearOperatorDiag(diag) - with self.test_session() as sess: + with self.cached_session() as sess: batch_shape, event_shape = sess.run( distribution_util.shapes_from_loc_and_scale(loc, scale), feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))}) @@ -286,7 +286,7 @@ class ShapesFromLocAndScaleTest(test.TestCase): loc = None diag = array_ops.placeholder(dtypes.float64) scale = linear_operator_diag.LinearOperatorDiag(diag) - with self.test_session() as sess: + with self.cached_session() as sess: batch_shape, event_shape = sess.run( distribution_util.shapes_from_loc_and_scale(loc, scale), feed_dict={diag: np.ones((5, 1, 3))}) @@ -307,7 +307,7 @@ class GetBroadcastShapeTest(test.TestCase): x = array_ops.ones((2, 1, 3)) y = array_ops.placeholder(x.dtype) z = array_ops.ones(()) - with self.test_session() as sess: + with self.cached_session() as sess: bcast_shape = sess.run( distribution_util.get_broadcast_shape(x, y, z), feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)}) @@ -317,7 +317,7 @@ class GetBroadcastShapeTest(test.TestCase): class TridiagTest(test.TestCase): def testWorksCorrectlyNoBatches(self): - with self.test_session(): + with self.cached_session(): self.assertAllEqual( [[4., 8., 0., 0.], [1., 5., 9., 0.], @@ -329,7 +329,7 @@ class TridiagTest(test.TestCase): [8., 9., 10.]).eval()) def testWorksCorrectlyBatches(self): - with self.test_session(): + with self.cached_session(): self.assertAllClose( [[[4., 8., 0., 0.], [1., 5., 9., 0.], @@ -349,7 +349,7 @@ class TridiagTest(test.TestCase): rtol=1e-5, atol=0.) def testHandlesNone(self): - with self.test_session(): + with self.cached_session(): self.assertAllClose( [[[4., 0., 0., 0.], [0., 5., 0., 0.], @@ -396,7 +396,7 @@ class MixtureStddevTest(test.TestCase): means_tf, sigmas_tf) - with self.test_session() as sess: + with self.cached_session() as sess: actual_devs = sess.run(mix_dev) self.assertAllClose(actual_devs, expected_devs) @@ -405,7 +405,7 @@ class MixtureStddevTest(test.TestCase): class PadMixtureDimensionsTest(test.TestCase): def test_pad_mixture_dimensions_mixture(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture.Mixture( cat=categorical.Categorical(probs=[[0.3, 0.7]]), components=[ @@ -422,7 +422,7 @@ class PadMixtureDimensionsTest(test.TestCase): self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1])) def test_pad_mixture_dimensions_mixture_same_family(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture_same_family.MixtureSameFamily( mixture_distribution=categorical.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag.MultivariateNormalDiag( @@ -444,7 +444,7 @@ class _PadTest(object): [4, 5, 6]]) value_ = np.float32(0.25) count_ = np.int32(2) - with self.test_session() as sess: + with self.cached_session() as sess: x = array_ops.placeholder_with_default( x_, shape=x_.shape if self.is_static_shape else None) value = (constant_op.constant(value_) if self.is_static_shape @@ -491,7 +491,7 @@ class _PadTest(object): [4, 5, 6]]) value_ = np.float32(0.25) count_ = np.int32(2) - with self.test_session() as sess: + with self.cached_session() as sess: x = array_ops.placeholder_with_default( x_, shape=x_.shape if self.is_static_shape else None) value = (constant_op.constant(value_) if self.is_static_shape diff --git a/tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py b/tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py index 87cdd0485a..a627d85229 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py @@ -34,7 +34,7 @@ from tensorflow.python.platform import test class GeometricTest(test.TestCase): def testGeometricShape(self): - with self.test_session(): + with self.cached_session(): probs = constant_op.constant([.1] * 5) geom = geometric.Geometric(probs=probs) @@ -45,19 +45,19 @@ class GeometricTest(test.TestCase): def testInvalidP(self): invalid_ps = [-.01, -0.01, -2.] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x >= 0"): geom = geometric.Geometric(probs=invalid_ps, validate_args=True) geom.probs.eval() invalid_ps = [1.1, 3., 5.] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x <= y"): geom = geometric.Geometric(probs=invalid_ps, validate_args=True) geom.probs.eval() def testGeomLogPmf(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = constant_op.constant([.2] * batch_size) probs_v = .2 @@ -73,7 +73,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(np.exp(expected_log_prob), pmf.eval()) def testGeometricLogPmf_validate_args(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = constant_op.constant([.9] * batch_size) x = array_ops.placeholder(dtypes.float32, shape=[6]) @@ -95,7 +95,7 @@ class GeometricTest(test.TestCase): self.assertEqual([6,], pmf.get_shape()) def testGeometricLogPmfMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = constant_op.constant([[.2, .3, .5]] * batch_size) probs_v = np.array([.2, .3, .5]) @@ -113,7 +113,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(np.exp(expected_log_prob), pmf_values) def testGeometricCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = constant_op.constant([[.2, .4, .5]] * batch_size) probs_v = np.array([.2, .4, .5]) @@ -127,7 +127,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(expected_cdf, cdf.eval()) def testGeometricEntropy(self): - with self.test_session(): + with self.cached_session(): probs_v = np.array([.1, .3, .25], dtype=np.float32) geom = geometric.Geometric(probs=probs_v) expected_entropy = stats.geom.entropy(probs_v, loc=-1) @@ -135,7 +135,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(expected_entropy, geom.entropy().eval()) def testGeometricMean(self): - with self.test_session(): + with self.cached_session(): probs_v = np.array([.1, .3, .25]) geom = geometric.Geometric(probs=probs_v) expected_means = stats.geom.mean(probs_v, loc=-1) @@ -143,7 +143,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(expected_means, geom.mean().eval()) def testGeometricVariance(self): - with self.test_session(): + with self.cached_session(): probs_v = np.array([.1, .3, .25]) geom = geometric.Geometric(probs=probs_v) expected_vars = stats.geom.var(probs_v, loc=-1) @@ -151,7 +151,7 @@ class GeometricTest(test.TestCase): self.assertAllClose(expected_vars, geom.variance().eval()) def testGeometricStddev(self): - with self.test_session(): + with self.cached_session(): probs_v = np.array([.1, .3, .25]) geom = geometric.Geometric(probs=probs_v) expected_stddevs = stats.geom.std(probs_v, loc=-1) @@ -159,14 +159,14 @@ class GeometricTest(test.TestCase): self.assertAllClose(geom.stddev().eval(), expected_stddevs) def testGeometricMode(self): - with self.test_session(): + with self.cached_session(): probs_v = np.array([.1, .3, .25]) geom = geometric.Geometric(probs=probs_v) self.assertEqual([3,], geom.mode().get_shape()) self.assertAllClose([0.] * 3, geom.mode().eval()) def testGeometricSample(self): - with self.test_session(): + with self.cached_session(): probs_v = [.3, .9] probs = constant_op.constant(probs_v) n = constant_op.constant(100000) @@ -186,7 +186,7 @@ class GeometricTest(test.TestCase): rtol=.02) def testGeometricSampleMultiDimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 2 probs_v = [.3, .9] probs = constant_op.constant([probs_v] * batch_size) @@ -215,7 +215,7 @@ class GeometricTest(test.TestCase): rtol=.02) def testGeometricAtBoundary(self): - with self.test_session(): + with self.cached_session(): geom = geometric.Geometric(probs=1., validate_args=True) x = np.array([0., 2., 3., 4., 5., 6., 7.], dtype=np.float32) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/half_normal_test.py b/tensorflow/contrib/distributions/python/kernel_tests/half_normal_test.py index 1df1467b2d..686de9d246 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/half_normal_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/half_normal_test.py @@ -55,7 +55,7 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual(all_true, is_finite) def _testParamShapes(self, sample_shape, expected): - with self.test_session(): + with self.cached_session(): param_shapes = hn_lib.HalfNormal.param_shapes(sample_shape) scale_shape = param_shapes["scale"] self.assertAllEqual(expected, scale_shape.eval()) @@ -87,7 +87,7 @@ class HalfNormalTest(test.TestCase): tensor_shape.TensorShape(sample_shape), sample_shape) def testHalfNormalLogPDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 scale = constant_op.constant([3.0] * batch_size) x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32) @@ -106,7 +106,7 @@ class HalfNormalTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pdf), pdf.eval()) def testHalfNormalLogPDFMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 scale = constant_op.constant([[3.0, 1.0]] * batch_size) x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T @@ -125,7 +125,7 @@ class HalfNormalTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pdf), pdf.eval()) def testHalfNormalCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 scale = self._rng.rand(batch_size) + 1.0 x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64) @@ -144,7 +144,7 @@ class HalfNormalTest(test.TestCase): self.assertAllClose(np.exp(expected_logcdf), cdf.eval(), atol=0) def testHalfNormalSurvivalFunction(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 scale = self._rng.rand(batch_size) + 1.0 x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64) @@ -163,7 +163,7 @@ class HalfNormalTest(test.TestCase): self.assertAllClose(np.exp(expected_logsf), sf.eval(), atol=0) def testHalfNormalQuantile(self): - with self.test_session(): + with self.cached_session(): batch_size = 50 scale = self._rng.rand(batch_size) + 1.0 p = np.linspace(0., 1.0, batch_size).astype(np.float64) @@ -197,7 +197,7 @@ class HalfNormalTest(test.TestCase): self.assertAllFinite(grads[0]) def testHalfNormalEntropy(self): - with self.test_session(): + with self.cached_session(): scale = np.array([[1.0, 2.0, 3.0]]) halfnorm = hn_lib.HalfNormal(scale=scale) @@ -210,7 +210,7 @@ class HalfNormalTest(test.TestCase): self.assertAllClose(expected_entropy, entropy.eval()) def testHalfNormalMeanAndMode(self): - with self.test_session(): + with self.cached_session(): scale = np.array([11., 12., 13.]) halfnorm = hn_lib.HalfNormal(scale=scale) @@ -223,7 +223,7 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual([0., 0., 0.], halfnorm.mode().eval()) def testHalfNormalVariance(self): - with self.test_session(): + with self.cached_session(): scale = np.array([7., 7., 7.]) halfnorm = hn_lib.HalfNormal(scale=scale) expected_variance = scale ** 2.0 * (1.0 - 2.0 / np.pi) @@ -232,7 +232,7 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual(expected_variance, halfnorm.variance().eval()) def testHalfNormalStandardDeviation(self): - with self.test_session(): + with self.cached_session(): scale = np.array([7., 7., 7.]) halfnorm = hn_lib.HalfNormal(scale=scale) expected_variance = scale ** 2.0 * (1.0 - 2.0 / np.pi) @@ -241,7 +241,7 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual(np.sqrt(expected_variance), halfnorm.stddev().eval()) def testHalfNormalSample(self): - with self.test_session(): + with self.cached_session(): scale = constant_op.constant(3.0) n = constant_op.constant(100000) halfnorm = hn_lib.HalfNormal(scale=scale) @@ -263,7 +263,7 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual(expected_shape_static, sample.eval().shape) def testHalfNormalSampleMultiDimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 2 scale = constant_op.constant([[2.0, 3.0]] * batch_size) n = constant_op.constant(100000) @@ -287,13 +287,13 @@ class HalfNormalTest(test.TestCase): self.assertAllEqual(expected_shape_static, sample.eval().shape) def testNegativeSigmaFails(self): - with self.test_session(): + with self.cached_session(): halfnorm = hn_lib.HalfNormal(scale=[-5.], validate_args=True, name="G") with self.assertRaisesOpError("Condition x > 0 did not hold"): halfnorm.mean().eval() def testHalfNormalShape(self): - with self.test_session(): + with self.cached_session(): scale = constant_op.constant([6.0] * 5) halfnorm = hn_lib.HalfNormal(scale=scale) @@ -306,7 +306,7 @@ class HalfNormalTest(test.TestCase): scale = array_ops.placeholder(dtype=dtypes.float32) halfnorm = hn_lib.HalfNormal(scale=scale) - with self.test_session() as sess: + with self.cached_session() as sess: # get_batch_shape should return an "<unknown>" tensor. self.assertEqual(halfnorm.batch_shape, tensor_shape.TensorShape(None)) self.assertEqual(halfnorm.event_shape, ()) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/independent_test.py b/tensorflow/contrib/distributions/python/kernel_tests/independent_test.py index 6a69f9e60b..ecf27289d7 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/independent_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/independent_test.py @@ -52,7 +52,7 @@ class ProductDistributionTest(test.TestCase): def testSampleAndLogProbUnivariate(self): loc = np.float32([-1., 1]) scale = np.float32([0.1, 0.5]) - with self.test_session() as sess: + with self.cached_session() as sess: ind = independent_lib.Independent( distribution=normal_lib.Normal(loc=loc, scale=scale), reinterpreted_batch_ndims=1) @@ -73,7 +73,7 @@ class ProductDistributionTest(test.TestCase): def testSampleAndLogProbMultivariate(self): loc = np.float32([[-1., 1], [1, -1]]) scale = np.float32([1., 0.5]) - with self.test_session() as sess: + with self.cached_session() as sess: ind = independent_lib.Independent( distribution=mvn_diag_lib.MultivariateNormalDiag( loc=loc, @@ -98,7 +98,7 @@ class ProductDistributionTest(test.TestCase): loc = np.float32([[-1., 1], [1, -1]]) scale = np.float32([1., 0.5]) n_samp = 1e4 - with self.test_session() as sess: + with self.cached_session() as sess: ind = independent_lib.Independent( distribution=mvn_diag_lib.MultivariateNormalDiag( loc=loc, @@ -231,7 +231,7 @@ class ProductDistributionTest(test.TestCase): def expected_log_prob(x, logits): return (x * logits - np.log1p(np.exp(logits))).sum(-1).sum(-1).sum(-1) - with self.test_session() as sess: + with self.cached_session() as sess: logits_ph = array_ops.placeholder( dtypes.float32, shape=logits.shape if static_shape else None) ind = independent_lib.Independent( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/inverse_gamma_test.py b/tensorflow/contrib/distributions/python/kernel_tests/inverse_gamma_test.py index 6eb96ea9ff..70551d89d9 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/inverse_gamma_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/inverse_gamma_test.py @@ -30,7 +30,7 @@ from tensorflow.python.platform import test class InverseGammaTest(test.TestCase): def testInverseGammaShape(self): - with self.test_session(): + with self.cached_session(): alpha = constant_op.constant([3.0] * 5) beta = constant_op.constant(11.0) inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta) @@ -43,7 +43,7 @@ class InverseGammaTest(test.TestCase): [])) def testInverseGammaLogPDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 alpha = constant_op.constant([2.0] * batch_size) beta = constant_op.constant([3.0] * batch_size) @@ -61,7 +61,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) def testInverseGammaLogPDFMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 alpha = constant_op.constant([[2.0, 4.0]] * batch_size) beta = constant_op.constant([[3.0, 4.0]] * batch_size) @@ -81,7 +81,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testInverseGammaLogPDFMultidimensionalBroadcasting(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 alpha = constant_op.constant([[2.0, 4.0]] * batch_size) beta = constant_op.constant(3.0) @@ -101,7 +101,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testInverseGammaCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 alpha_v = 2.0 beta_v = 3.0 @@ -117,7 +117,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(cdf.eval(), expected_cdf) def testInverseGammaMode(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([5.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v) @@ -126,7 +126,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(inv_gamma.mode().eval(), expected_modes) def testInverseGammaMeanAllDefined(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([5.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v) @@ -135,7 +135,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(inv_gamma.mean().eval(), expected_means) def testInverseGammaMeanAllowNanStats(self): - with self.test_session(): + with self.cached_session(): # Mean will not be defined for the first entry. alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) @@ -145,7 +145,7 @@ class InverseGammaTest(test.TestCase): inv_gamma.mean().eval() def testInverseGammaMeanNanStats(self): - with self.test_session(): + with self.cached_session(): # Mode will not be defined for the first two entries. alpha_v = np.array([0.5, 1.0, 3.0, 2.5]) beta_v = np.array([1.0, 2.0, 4.0, 5.0]) @@ -158,7 +158,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(inv_gamma.mean().eval(), expected_means) def testInverseGammaVarianceAllDefined(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([7.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v) @@ -167,7 +167,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(inv_gamma.variance().eval(), expected_variances) def testInverseGammaVarianceAllowNanStats(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([1.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) inv_gamma = inverse_gamma.InverseGamma( @@ -176,7 +176,7 @@ class InverseGammaTest(test.TestCase): inv_gamma.variance().eval() def testInverseGammaVarianceNanStats(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([1.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) inv_gamma = inverse_gamma.InverseGamma( @@ -187,7 +187,7 @@ class InverseGammaTest(test.TestCase): self.assertAllClose(inv_gamma.variance().eval(), expected_variances) def testInverseGammaEntropy(self): - with self.test_session(): + with self.cached_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) expected_entropy = stats.invgamma.entropy(alpha_v, scale=beta_v) @@ -292,7 +292,7 @@ class InverseGammaTest(test.TestCase): self.assertNear(1., total, err=err) def testInverseGammaNonPositiveInitializationParamsRaises(self): - with self.test_session(): + with self.cached_session(): alpha_v = constant_op.constant(0.0, name="alpha") beta_v = constant_op.constant(1.0, name="beta") inv_gamma = inverse_gamma.InverseGamma( @@ -307,7 +307,7 @@ class InverseGammaTest(test.TestCase): inv_gamma.mean().eval() def testInverseGammaWithSoftplusConcentrationRate(self): - with self.test_session(): + with self.cached_session(): alpha = constant_op.constant([-0.1, -2.9], name="alpha") beta = constant_op.constant([1.0, -4.8], name="beta") inv_gamma = inverse_gamma.InverseGammaWithSoftplusConcentrationRate( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/kumaraswamy_test.py b/tensorflow/contrib/distributions/python/kernel_tests/kumaraswamy_test.py index 2980e2bfe9..e39db51728 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/kumaraswamy_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/kumaraswamy_test.py @@ -77,7 +77,7 @@ def _kumaraswamy_pdf(a, b, x): class KumaraswamyTest(test.TestCase): def testSimpleShapes(self): - with self.test_session(): + with self.cached_session(): a = np.random.rand(3) b = np.random.rand(3) dist = kumaraswamy_lib.Kumaraswamy(a, b) @@ -87,7 +87,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual(tensor_shape.TensorShape([3]), dist.batch_shape) def testComplexShapes(self): - with self.test_session(): + with self.cached_session(): a = np.random.rand(3, 2, 2) b = np.random.rand(3, 2, 2) dist = kumaraswamy_lib.Kumaraswamy(a, b) @@ -97,7 +97,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual(tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape) def testComplexShapesBroadcast(self): - with self.test_session(): + with self.cached_session(): a = np.random.rand(3, 2, 2) b = np.random.rand(2, 2) dist = kumaraswamy_lib.Kumaraswamy(a, b) @@ -109,7 +109,7 @@ class KumaraswamyTest(test.TestCase): def testAProperty(self): a = [[1., 2, 3]] b = [[2., 4, 3]] - with self.test_session(): + with self.cached_session(): dist = kumaraswamy_lib.Kumaraswamy(a, b) self.assertEqual([1, 3], dist.concentration1.get_shape()) self.assertAllClose(a, dist.concentration1.eval()) @@ -117,7 +117,7 @@ class KumaraswamyTest(test.TestCase): def testBProperty(self): a = [[1., 2, 3]] b = [[2., 4, 3]] - with self.test_session(): + with self.cached_session(): dist = kumaraswamy_lib.Kumaraswamy(a, b) self.assertEqual([1, 3], dist.concentration0.get_shape()) self.assertAllClose(b, dist.concentration0.eval()) @@ -125,7 +125,7 @@ class KumaraswamyTest(test.TestCase): def testPdfXProper(self): a = [[1., 2, 3]] b = [[2., 4, 3]] - with self.test_session(): + with self.cached_session(): dist = kumaraswamy_lib.Kumaraswamy(a, b, validate_args=True) dist.prob([.1, .3, .6]).eval() dist.prob([.2, .3, .5]).eval() @@ -136,7 +136,7 @@ class KumaraswamyTest(test.TestCase): dist.prob([.1, .2, 1.2]).eval() def testPdfTwoBatches(self): - with self.test_session(): + with self.cached_session(): a = [1., 2] b = [1., 2] x = [.5, .5] @@ -147,7 +147,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((2,), pdf.get_shape()) def testPdfTwoBatchesNontrivialX(self): - with self.test_session(): + with self.cached_session(): a = [1., 2] b = [1., 2] x = [.3, .7] @@ -158,7 +158,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((2,), pdf.get_shape()) def testPdfUniformZeroBatch(self): - with self.test_session(): + with self.cached_session(): # This is equivalent to a uniform distribution a = 1. b = 1. @@ -170,7 +170,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((5,), pdf.get_shape()) def testPdfAStretchedInBroadcastWhenSameRank(self): - with self.test_session(): + with self.cached_session(): a = [[1., 2]] b = [[1., 2]] x = [[.5, .5], [.3, .7]] @@ -181,7 +181,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((2, 2), pdf.get_shape()) def testPdfAStretchedInBroadcastWhenLowerRank(self): - with self.test_session(): + with self.cached_session(): a = [1., 2] b = [1., 2] x = [[.5, .5], [.2, .8]] @@ -191,7 +191,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((2, 2), pdf.get_shape()) def testPdfXStretchedInBroadcastWhenSameRank(self): - with self.test_session(): + with self.cached_session(): a = [[1., 2], [2., 3]] b = [[1., 2], [2., 3]] x = [[.5, .5]] @@ -201,7 +201,7 @@ class KumaraswamyTest(test.TestCase): self.assertEqual((2, 2), pdf.get_shape()) def testPdfXStretchedInBroadcastWhenLowerRank(self): - with self.test_session(): + with self.cached_session(): a = [[1., 2], [2., 3]] b = [[1., 2], [2., 3]] x = [.5, .5] @@ -289,7 +289,7 @@ class KumaraswamyTest(test.TestCase): self.assertAllClose(expected_entropy, dist.entropy().eval()) def testKumaraswamySample(self): - with self.test_session(): + with self.cached_session(): a = 1. b = 2. kumaraswamy = kumaraswamy_lib.Kumaraswamy(a, b) @@ -316,7 +316,7 @@ class KumaraswamyTest(test.TestCase): # Test that sampling with the same seed twice gives the same results. def testKumaraswamySampleMultipleTimes(self): - with self.test_session(): + with self.cached_session(): a_val = 1. b_val = 2. n_val = 100 @@ -334,7 +334,7 @@ class KumaraswamyTest(test.TestCase): self.assertAllClose(samples1, samples2) def testKumaraswamySampleMultidimensional(self): - with self.test_session(): + with self.cached_session(): a = np.random.rand(3, 2, 2).astype(np.float32) b = np.random.rand(3, 2, 2).astype(np.float32) kumaraswamy = kumaraswamy_lib.Kumaraswamy(a, b) @@ -351,7 +351,7 @@ class KumaraswamyTest(test.TestCase): atol=1e-1) def testKumaraswamyCdf(self): - with self.test_session(): + with self.cached_session(): shape = (30, 40, 50) for dt in (np.float32, np.float64): a = 10. * np.random.random(shape).astype(dt) @@ -366,7 +366,7 @@ class KumaraswamyTest(test.TestCase): _kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0) def testKumaraswamyLogCdf(self): - with self.test_session(): + with self.cached_session(): shape = (30, 40, 50) for dt in (np.float32, np.float64): a = 10. * np.random.random(shape).astype(dt) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/logistic_test.py b/tensorflow/contrib/distributions/python/kernel_tests/logistic_test.py index 251be9ed4f..12a2d4f8ec 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/logistic_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/logistic_test.py @@ -39,7 +39,7 @@ class LogisticTest(test.TestCase): dist.reparameterization_type == distribution.FULLY_REPARAMETERIZED) def testLogisticLogProb(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -57,7 +57,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(prob.eval(), np.exp(expected_log_prob)) def testLogisticCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -72,7 +72,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(cdf.eval(), expected_cdf) def testLogisticLogCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -87,7 +87,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(logcdf.eval(), expected_logcdf) def testLogisticSurvivalFunction(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -102,7 +102,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(survival_function.eval(), expected_survival_function) def testLogisticLogSurvivalFunction(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -118,7 +118,7 @@ class LogisticTest(test.TestCase): expected_logsurvival_function) def testLogisticMean(self): - with self.test_session(): + with self.cached_session(): loc = [2.0, 1.5, 1.0] scale = 1.5 expected_mean = stats.logistic.mean(loc, scale) @@ -126,7 +126,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(dist.mean().eval(), expected_mean) def testLogisticVariance(self): - with self.test_session(): + with self.cached_session(): loc = [2.0, 1.5, 1.0] scale = 1.5 expected_variance = stats.logistic.var(loc, scale) @@ -134,7 +134,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(dist.variance().eval(), expected_variance) def testLogisticEntropy(self): - with self.test_session(): + with self.cached_session(): batch_size = 3 np_loc = np.array([2.0] * batch_size, dtype=np.float32) loc = constant_op.constant(np_loc) @@ -144,7 +144,7 @@ class LogisticTest(test.TestCase): self.assertAllClose(dist.entropy().eval(), expected_entropy) def testLogisticSample(self): - with self.test_session(): + with self.cached_session(): loc = [3.0, 4.0, 2.0] scale = 1.0 dist = logistic.Logistic(loc, scale) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py index ff6092fc26..faff42d243 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py @@ -35,7 +35,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, test.TestCase): def testSampleAndLogProbUnivariateShapes(self): - with self.test_session(): + with self.cached_session(): gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=normal_lib.Normal( @@ -46,7 +46,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, self.assertEqual([4, 5], log_prob_x.shape) def testSampleAndLogProbBatch(self): - with self.test_session(): + with self.cached_session(): gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[[0.3, 0.7]]), components_distribution=normal_lib.Normal( @@ -59,7 +59,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, def testSampleAndLogProbShapesBroadcastMix(self): mix_probs = np.float32([.3, .7]) bern_probs = np.float32([[.4, .6], [.25, .75]]) - with self.test_session(): + with self.cached_session(): bm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=mix_probs), components_distribution=bernoulli_lib.Bernoulli(probs=bern_probs)) @@ -72,7 +72,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, np.ones_like(x_, dtype=np.bool), np.logical_or(x_ == 0., x_ == 1.)) def testSampleAndLogProbMultivariateShapes(self): - with self.test_session(): + with self.cached_session(): gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag_lib.MultivariateNormalDiag( @@ -83,7 +83,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, self.assertEqual([4, 5], log_prob_x.shape) def testSampleAndLogProbBatchMultivariateShapes(self): - with self.test_session(): + with self.cached_session(): gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag_lib.MultivariateNormalDiag( @@ -98,7 +98,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, self.assertEqual([4, 5, 2], log_prob_x.shape) def testSampleConsistentLogProb(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag_lib.MultivariateNormalDiag( @@ -111,7 +111,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, sess.run, gm, radius=1., center=[1., -1], rtol=0.02) def testLogCdf(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=normal_lib.Normal( @@ -128,7 +128,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, rtol=1e-6, atol=0.0) def testSampleConsistentMeanCovariance(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag_lib.MultivariateNormalDiag( @@ -136,7 +136,7 @@ class MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers, self.run_test_sample_consistent_mean_covariance(sess.run, gm) def testVarianceConsistentCovariance(self): - with self.test_session() as sess: + with self.cached_session() as sess: gm = mixture_same_family_lib.MixtureSameFamily( mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]), components_distribution=mvn_diag_lib.MultivariateNormalDiag( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py index 0206489175..f8dbd34d02 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py @@ -152,7 +152,7 @@ class MixtureTest(test.TestCase): use_static_graph = False def testShapes(self): - with self.test_session(): + with self.cached_session(): for batch_shape in ([], [1], [2, 3, 4]): dist = make_univariate_mixture(batch_shape, num_components=10, use_static_graph=self.use_static_graph) @@ -200,7 +200,7 @@ class MixtureTest(test.TestCase): use_static_graph=self.use_static_graph) def testBrokenShapesDynamic(self): - with self.test_session(): + with self.cached_session(): d0_param = array_ops.placeholder(dtype=dtypes.float32) d1_param = array_ops.placeholder(dtype=dtypes.float32) d = ds.Mixture( @@ -246,7 +246,7 @@ class MixtureTest(test.TestCase): # mixture are checked for equivalence. def testMeanUnivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: for batch_shape in ((), (2,), (2, 3)): dist = make_univariate_mixture( batch_shape=batch_shape, num_components=2, @@ -268,7 +268,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(true_mean, mean_value) def testMeanMultivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: for batch_shape in ((), (2,), (2, 3)): dist = make_multivariate_mixture( batch_shape=batch_shape, num_components=2, event_shape=(4,), @@ -296,7 +296,7 @@ class MixtureTest(test.TestCase): def testStddevShapeUnivariate(self): num_components = 2 # This is the same shape test which is done in 'testMeanUnivariate'. - with self.test_session() as sess: + with self.cached_session() as sess: for batch_shape in ((), (2,), (2, 3)): dist = make_univariate_mixture( batch_shape=batch_shape, num_components=num_components, @@ -337,7 +337,7 @@ class MixtureTest(test.TestCase): num_components = 2 # This is the same shape test which is done in 'testMeanMultivariate'. - with self.test_session() as sess: + with self.cached_session() as sess: for batch_shape in ((), (2,), (2, 3)): dist = make_multivariate_mixture( batch_shape=batch_shape, @@ -392,12 +392,12 @@ class MixtureTest(test.TestCase): ], use_static_graph=self.use_static_graph) mix_dev = mixture_dist.stddev() - with self.test_session() as sess: + with self.cached_session() as sess: actual_stddev = sess.run(mix_dev) self.assertAllClose(actual_stddev, ground_truth_stddev) def testProbScalarUnivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: dist = make_univariate_mixture(batch_shape=[], num_components=2, use_static_graph=self.use_static_graph) for x in [ @@ -423,7 +423,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(total_prob, p_x_value) def testProbScalarMultivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: dist = make_multivariate_mixture( batch_shape=[], num_components=2, event_shape=[3], use_static_graph=self.use_static_graph) @@ -452,7 +452,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(total_prob, p_x_value) def testProbBatchUnivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2, use_static_graph=self.use_static_graph) @@ -479,7 +479,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(total_prob, p_x_value) def testProbBatchMultivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: dist = make_multivariate_mixture( batch_shape=[2, 3], num_components=2, event_shape=[4], use_static_graph=self.use_static_graph) @@ -506,7 +506,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(total_prob, p_x_value) def testSampleScalarBatchUnivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: num_components = 3 batch_shape = [] dist = make_univariate_mixture( @@ -539,7 +539,7 @@ class MixtureTest(test.TestCase): mus = [-5.0, 0.0, 5.0, 4.0, 20.0] sigmas = [0.1, 5.0, 3.0, 0.2, 4.0] - with self.test_session(): + with self.cached_session(): n = 100 random_seed.set_random_seed(654321) @@ -567,7 +567,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(samples1, samples2) def testSampleScalarBatchMultivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: num_components = 3 dist = make_multivariate_mixture( batch_shape=[], num_components=num_components, event_shape=[2], @@ -592,7 +592,7 @@ class MixtureTest(test.TestCase): self.assertAllClose(which_dist_samples, sample_values[which_c, :]) def testSampleBatchUnivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: num_components = 3 dist = make_univariate_mixture( batch_shape=[2, 3], num_components=num_components, @@ -620,7 +620,7 @@ class MixtureTest(test.TestCase): sample_values[which_c_s, which_c_b0, which_c_b1]) def _testSampleBatchMultivariate(self, fully_known_batch_shape): - with self.test_session() as sess: + with self.cached_session() as sess: num_components = 3 if fully_known_batch_shape: batch_shape = [2, 3] @@ -672,7 +672,7 @@ class MixtureTest(test.TestCase): self._testSampleBatchMultivariate(fully_known_batch_shape=False) def testEntropyLowerBoundMultivariate(self): - with self.test_session() as sess: + with self.cached_session() as sess: for batch_shape in ((), (2,), (2, 3)): dist = make_multivariate_mixture( batch_shape=batch_shape, num_components=2, event_shape=(4,), @@ -732,7 +732,7 @@ class MixtureTest(test.TestCase): x_cdf_tf = mixture_tf.cdf(x_tensor) x_log_cdf_tf = mixture_tf.log_cdf(x_tensor) - with self.test_session() as sess: + with self.cached_session() as sess: for x_feed in xs_to_check: x_cdf_tf_result, x_log_cdf_tf_result = sess.run( [x_cdf_tf, x_log_cdf_tf], feed_dict={x_tensor: x_feed}) @@ -778,7 +778,7 @@ class MixtureTest(test.TestCase): x_cdf_tf = mixture_tf.cdf(x_tensor) x_log_cdf_tf = mixture_tf.log_cdf(x_tensor) - with self.test_session() as sess: + with self.cached_session() as sess: for x_feed in xs_to_check: x_cdf_tf_result, x_log_cdf_tf_result = sess.run( [x_cdf_tf, x_log_cdf_tf], @@ -802,7 +802,7 @@ class MixtureTest(test.TestCase): Mixture's use of dynamic partition requires `random_gamma` correctly returns an empty `Tensor`. """ - with self.test_session(): + with self.cached_session(): gm = ds.Mixture( cat=ds.Categorical(probs=[.3, .7]), components=[ds.Gamma(1., 2.), diff --git a/tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py b/tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py index 509fc66c05..3c988dad8a 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py @@ -36,7 +36,7 @@ class MovingReduceMeanVarianceTest(test.TestCase): shape = [1, 2] true_mean = np.array([[0., 3.]]) true_stddev = np.array([[1.1, 0.5]]) - with self.test_session() as sess: + with self.cached_session() as sess: # Start "x" out with this mean. mean_var = variables.Variable(array_ops.zeros_like(true_mean)) variance_var = variables.Variable(array_ops.ones_like(true_stddev)) @@ -84,7 +84,7 @@ class MovingReduceMeanVarianceTest(test.TestCase): shape = [1, 2] true_mean = np.array([[0., 3.]]) true_stddev = np.array([[1.1, 0.5]]) - with self.test_session() as sess: + with self.cached_session() as sess: # Start "x" out with this mean. x = random_ops.random_normal(shape, dtype=np.float64, seed=0) x = true_stddev * x + true_mean @@ -111,7 +111,7 @@ class MovingLogExponentialMovingMeanExpTest(test.TestCase): true_mean = np.array([[0., 3.]]) true_stddev = np.array([[1.1, 0.5]]) decay = 0.99 - with self.test_session() as sess: + with self.cached_session() as sess: # Start "x" out with this mean. x = random_ops.random_normal(shape, dtype=np.float64, seed=0) x = true_stddev * x + true_mean diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py index a924d2e383..88d0d346a4 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py @@ -39,7 +39,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): diag = np.array([[1., 2], [3, 4], [5, 6]]) # batch_shape: [1], event_shape: [] identity_multiplier = np.array([5.]) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagPlusLowRank( scale_diag=diag, scale_identity_multiplier=identity_multiplier, @@ -61,7 +61,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): diag = np.array([[1., 2], [3, 4], [5, 6]]) # batch_shape: [3, 1], event_shape: [] identity_multiplier = np.array([[5.], [4], [3]]) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagPlusLowRank( scale_diag=diag, scale_identity_multiplier=identity_multiplier, @@ -75,7 +75,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): diag = np.array([[1., 2], [3, 4], [5, 6]]) # batch_shape: [3], event_shape: [] identity_multiplier = np.array([5., 4, 3]) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagPlusLowRank( scale_diag=diag, scale_identity_multiplier=identity_multiplier, @@ -94,7 +94,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): loc = np.array([1., 0, -1]) # batch_shape: [3], event_shape: [] identity_multiplier = np.array([5., 4, 3]) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagPlusLowRank( loc=loc, scale_identity_multiplier=identity_multiplier, @@ -116,7 +116,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): diag_large = [1.0, 5.0] v = [[2.0], [3.0]] diag_small = [3.0] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagPlusLowRank( loc=mu, scale_diag=diag_large, @@ -146,7 +146,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): true_variance = np.diag(true_covariance) true_stddev = np.sqrt(true_variance) - with self.test_session() as sess: + with self.cached_session() as sess: dist = ds.MultivariateNormalDiagPlusLowRank( loc=mu, scale_diag=diag_large, @@ -380,7 +380,7 @@ class MultivariateNormalDiagPlusLowRankTest(test.TestCase): cov = np.stack([np.matmul(scale[0], scale[0].T), np.matmul(scale[1], scale[1].T)]) logging.vlog(2, "expected_cov:\n{}".format(cov)) - with self.test_session(): + with self.cached_session(): mvn = ds.MultivariateNormalDiagPlusLowRank( loc=mu, scale_perturb_factor=u, diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py index 9635134b08..6a3d171f6c 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py @@ -45,14 +45,14 @@ class MultivariateNormalDiagTest(test.TestCase): def testScalarParams(self): mu = -1. diag = -5. - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, "at least 1 dimension"): ds.MultivariateNormalDiag(mu, diag) def testVectorParams(self): mu = [-1.] diag = [-5.] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) self.assertAllEqual([3, 1], dist.sample(3).get_shape()) @@ -63,7 +63,7 @@ class MultivariateNormalDiagTest(test.TestCase): # Batch shape = [1], event shape = [3] mu = array_ops.zeros((1, 3)) diag = array_ops.ones((1, 3)) - with self.test_session(): + with self.cached_session(): base_dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) dist = ds.TransformedDistribution( base_dist, @@ -75,14 +75,14 @@ class MultivariateNormalDiagTest(test.TestCase): def testMean(self): mu = [-1., 1] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) self.assertAllEqual(mu, dist.mean().eval()) def testMeanWithBroadcastLoc(self): mu = [-1.] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) self.assertAllEqual([-1., -1.], dist.mean().eval()) @@ -91,14 +91,14 @@ class MultivariateNormalDiagTest(test.TestCase): diag = [-1., 5] diag_mat = np.diag(diag) scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4) def testSample(self): mu = [-1., 1] diag = [1., -2] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) samps = dist.sample(int(1e3), seed=0).eval() cov_mat = array_ops.matrix_diag(diag).eval()**2 @@ -111,7 +111,7 @@ class MultivariateNormalDiagTest(test.TestCase): def testSingularScaleRaises(self): mu = [-1., 1] diag = [1., 0] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) with self.assertRaisesOpError("Singular"): dist.sample().eval() @@ -123,7 +123,7 @@ class MultivariateNormalDiagTest(test.TestCase): # diag corresponds to no batches of 3-variate normals diag = np.ones([3]) - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True) mean = dist.mean() @@ -142,7 +142,7 @@ class MultivariateNormalDiagTest(test.TestCase): atol=0.10, rtol=0.05) def testCovariance(self): - with self.test_session(): + with self.cached_session(): mvn = ds.MultivariateNormalDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -178,7 +178,7 @@ class MultivariateNormalDiagTest(test.TestCase): mvn.covariance().eval()) def testVariance(self): - with self.test_session(): + with self.cached_session(): mvn = ds.MultivariateNormalDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -203,7 +203,7 @@ class MultivariateNormalDiagTest(test.TestCase): mvn.variance().eval()) def testStddev(self): - with self.test_session(): + with self.cached_session(): mvn = ds.MultivariateNormalDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -229,7 +229,7 @@ class MultivariateNormalDiagTest(test.TestCase): def testMultivariateNormalDiagWithSoftplusScale(self): mu = [-1.0, 1.0] diag = [-1.0, -2.0] - with self.test_session(): + with self.cached_session(): dist = ds.MultivariateNormalDiagWithSoftplusScale( mu, diag, validate_args=True) samps = dist.sample(1000, seed=0).eval() @@ -241,7 +241,7 @@ class MultivariateNormalDiagTest(test.TestCase): def testMultivariateNormalDiagNegLogLikelihood(self): num_draws = 50 dims = 3 - with self.test_session() as sess: + with self.cached_session() as sess: x_pl = array_ops.placeholder(dtype=dtypes.float32, shape=[None, dims], name="x") @@ -291,7 +291,7 @@ class MultivariateNormalDiagTest(test.TestCase): def testKLDivIdenticalGradientDefined(self): dims = 3 - with self.test_session() as sess: + with self.cached_session() as sess: loc = array_ops.zeros([dims], dtype=dtypes.float32) mvn = ds.MultivariateNormalDiag( loc=loc, diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py index b003526392..bbf803f045 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py @@ -40,7 +40,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): return math_ops.matmul(chol, chol, adjoint_b=True).eval() def testRaisesIfInitializedWithNonSymmetricMatrix(self): - with self.test_session(): + with self.cached_session(): mu = [1., 2.] sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True) @@ -48,14 +48,14 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): mvn.covariance().eval() def testNamePropertyIsSetByInitArg(self): - with self.test_session(): + with self.cached_session(): mu = [1., 2.] sigma = [[1., 0.], [0., 1.]] mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy") self.assertEqual(mvn.name, "Billy/") def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self): - with self.test_session(): + with self.cached_session(): mu = rng.rand(10) sigma = self._random_pd_matrix(10, 10) mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True) @@ -63,7 +63,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): mvn.covariance().eval() def testLogPDFScalarBatch(self): - with self.test_session(): + with self.cached_session(): mu = rng.rand(2) sigma = self._random_pd_matrix(2, 2) mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True) @@ -82,7 +82,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): self.assertAllClose(expected_pdf, pdf.eval()) def testLogPDFScalarBatchCovarianceNotProvided(self): - with self.test_session(): + with self.cached_session(): mu = rng.rand(2) mvn = ds.MultivariateNormalFullCovariance( mu, covariance_matrix=None, validate_args=True) @@ -102,7 +102,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): self.assertAllClose(expected_pdf, pdf.eval()) def testShapes(self): - with self.test_session(): + with self.cached_session(): mu = rng.rand(3, 5, 2) covariance = self._random_pd_matrix(3, 5, 2, 2) @@ -133,7 +133,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): def testKLBatch(self): batch_shape = [2] event_shape = [3] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape) mvn_a = ds.MultivariateNormalFullCovariance( @@ -159,7 +159,7 @@ class MultivariateNormalFullCovarianceTest(test.TestCase): def testKLBatchBroadcast(self): batch_shape = [2] event_shape = [3] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) # No batch shape. mu_b, sigma_b = self._random_mu_and_sigma([], event_shape) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/mvn_tril_test.py b/tensorflow/contrib/distributions/python/kernel_tests/mvn_tril_test.py index b556d06123..776fc2ca9d 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/mvn_tril_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/mvn_tril_test.py @@ -45,7 +45,7 @@ class MultivariateNormalTriLTest(test.TestCase): return chol.eval(), sigma.eval() def testLogPDFScalarBatch(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(2) chol, sigma = self._random_chol(2, 2) chol[1, 1] = -chol[1, 1] @@ -65,7 +65,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_pdf, pdf.eval()) def testLogPDFXIsHigherRank(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(2) chol, sigma = self._random_chol(2, 2) chol[0, 0] = -chol[0, 0] @@ -85,7 +85,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_pdf, pdf.eval(), atol=0., rtol=0.03) def testLogPDFXLowerDimension(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(3, 2) chol, sigma = self._random_chol(3, 2, 2) chol[0, 0, 0] = -chol[0, 0, 0] @@ -108,7 +108,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_pdf, pdf.eval()[1]) def testEntropy(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(2) chol, sigma = self._random_chol(2, 2) chol[0, 0] = -chol[0, 0] @@ -121,7 +121,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_entropy, entropy.eval()) def testEntropyMultidimensional(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(3, 5, 2) chol, sigma = self._random_chol(3, 5, 2, 2) chol[1, 0, 0, 0] = -chol[1, 0, 0, 0] @@ -136,7 +136,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_entropy, entropy.eval()[1, 1]) def testSample(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(2) chol, sigma = self._random_chol(2, 2) chol[0, 0] = -chol[0, 0] @@ -152,7 +152,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(np.cov(sample_values, rowvar=0), sigma, atol=0.06) def testSingularScaleRaises(self): - with self.test_session(): + with self.cached_session(): mu = None chol = [[1., 0.], [0., 0.]] mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True) @@ -160,7 +160,7 @@ class MultivariateNormalTriLTest(test.TestCase): mvn.sample().eval() def testSampleWithSampleShape(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(3, 5, 2) chol, sigma = self._random_chol(3, 5, 2, 2) chol[1, 0, 0, 0] = -chol[1, 0, 0, 0] @@ -185,7 +185,7 @@ class MultivariateNormalTriLTest(test.TestCase): self.assertAllClose(expected_log_pdf, x_log_pdf) def testSampleMultiDimensional(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(3, 5, 2) chol, sigma = self._random_chol(3, 5, 2, 2) chol[1, 0, 0, 0] = -chol[1, 0, 0, 0] @@ -205,7 +205,7 @@ class MultivariateNormalTriLTest(test.TestCase): atol=1e-1) def testShapes(self): - with self.test_session(): + with self.cached_session(): mu = self._rng.rand(3, 5, 2) chol, _ = self._random_chol(3, 5, 2, 2) chol[1, 0, 0, 0] = -chol[1, 0, 0, 0] @@ -237,7 +237,7 @@ class MultivariateNormalTriLTest(test.TestCase): def testKLNonBatch(self): batch_shape = [] event_shape = [2] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape) mvn_a = ds.MultivariateNormalTriL( @@ -259,7 +259,7 @@ class MultivariateNormalTriLTest(test.TestCase): def testKLBatch(self): batch_shape = [2] event_shape = [3] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape) mvn_a = ds.MultivariateNormalTriL( @@ -285,7 +285,7 @@ class MultivariateNormalTriLTest(test.TestCase): def testKLBatchBroadcast(self): batch_shape = [2] event_shape = [3] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) # No batch shape. mu_b, sigma_b = self._random_mu_and_sigma([], event_shape) @@ -312,7 +312,7 @@ class MultivariateNormalTriLTest(test.TestCase): def testKLTwoIdenticalDistributionsIsZero(self): batch_shape = [2] event_shape = [3] - with self.test_session(): + with self.cached_session(): mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) mvn_a = ds.MultivariateNormalTriL( loc=mu_a, @@ -336,7 +336,7 @@ class MultivariateNormalTriLTest(test.TestCase): true_variance = np.diag(true_covariance) true_stddev = np.sqrt(true_variance) - with self.test_session() as sess: + with self.cached_session() as sess: dist = ds.MultivariateNormalTriL( loc=mu, scale_tril=scale_tril, diff --git a/tensorflow/contrib/distributions/python/kernel_tests/negative_binomial_test.py b/tensorflow/contrib/distributions/python/kernel_tests/negative_binomial_test.py index 37edaa42cd..a46b81af35 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/negative_binomial_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/negative_binomial_test.py @@ -34,7 +34,7 @@ from tensorflow.python.platform import test class NegativeBinomialTest(test.TestCase): def testNegativeBinomialShape(self): - with self.test_session(): + with self.cached_session(): probs = [.1] * 5 total_count = [2.0] * 5 negbinom = negative_binomial.NegativeBinomial( @@ -46,7 +46,7 @@ class NegativeBinomialTest(test.TestCase): self.assertEqual(tensor_shape.TensorShape([]), negbinom.event_shape) def testNegativeBinomialShapeBroadcast(self): - with self.test_session(): + with self.cached_session(): probs = [[.1, .2, .3]] * 5 total_count = [[2.]] * 5 negbinom = negative_binomial.NegativeBinomial( @@ -60,7 +60,7 @@ class NegativeBinomialTest(test.TestCase): def testLogits(self): logits = [[0., 9., -0.5]] - with self.test_session(): + with self.cached_session(): negbinom = negative_binomial.NegativeBinomial( total_count=3., logits=logits) self.assertEqual([1, 3], negbinom.probs.get_shape()) @@ -69,14 +69,14 @@ class NegativeBinomialTest(test.TestCase): def testInvalidP(self): invalid_ps = [-.01, 0., -2.,] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x >= 0"): negbinom = negative_binomial.NegativeBinomial( 5., probs=invalid_ps, validate_args=True) negbinom.probs.eval() invalid_ps = [1.01, 2., 1.001,] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("probs has components greater than 1."): negbinom = negative_binomial.NegativeBinomial( 5., probs=invalid_ps, validate_args=True) @@ -84,14 +84,14 @@ class NegativeBinomialTest(test.TestCase): def testInvalidNegativeCount(self): invalid_rs = [-.01, 0., -2.,] - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x > 0"): negbinom = negative_binomial.NegativeBinomial( total_count=invalid_rs, probs=0.1, validate_args=True) negbinom.total_count.eval() def testNegativeBinomialLogCdf(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = [.2] * batch_size probs_v = .2 @@ -109,7 +109,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(np.exp(expected_log_cdf), cdf.eval()) def testNegativeBinomialLogCdfValidateArgs(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = [.9] * batch_size total_count = 5. @@ -119,7 +119,7 @@ class NegativeBinomialTest(test.TestCase): negbinom.log_cdf(-1.).eval() def testNegativeBinomialLogPmf(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = [.2] * batch_size probs_v = .2 @@ -137,7 +137,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pmf), pmf.eval()) def testNegativeBinomialLogPmfValidateArgs(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = [.9] * batch_size total_count = 5. @@ -162,7 +162,7 @@ class NegativeBinomialTest(test.TestCase): self.assertEqual([6], pmf.get_shape()) def testNegativeBinomialLogPmfMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 probs = constant_op.constant([[.2, .3, .5]] * batch_size) probs_v = np.array([.2, .3, .5]) @@ -183,7 +183,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(np.exp(expected_log_pmf), pmf_values) def testNegativeBinomialMean(self): - with self.test_session(): + with self.cached_session(): total_count = 5. probs = np.array([.1, .3, .25], dtype=np.float32) negbinom = negative_binomial.NegativeBinomial( @@ -193,7 +193,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(expected_means, negbinom.mean().eval()) def testNegativeBinomialVariance(self): - with self.test_session(): + with self.cached_session(): total_count = 5. probs = np.array([.1, .3, .25], dtype=np.float32) negbinom = negative_binomial.NegativeBinomial( @@ -203,7 +203,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(expected_vars, negbinom.variance().eval()) def testNegativeBinomialStddev(self): - with self.test_session(): + with self.cached_session(): total_count = 5. probs = np.array([.1, .3, .25], dtype=np.float32) negbinom = negative_binomial.NegativeBinomial( @@ -213,7 +213,7 @@ class NegativeBinomialTest(test.TestCase): self.assertAllClose(expected_stds, negbinom.stddev().eval()) def testNegativeBinomialSample(self): - with self.test_session() as sess: + with self.cached_session() as sess: probs = [.3, .9] total_count = [4., 11.] n = int(100e3) @@ -242,7 +242,7 @@ class NegativeBinomialTest(test.TestCase): rtol=.02) def testLogProbOverflow(self): - with self.test_session() as sess: + with self.cached_session() as sess: logits = np.float32([20., 30., 40.]) total_count = np.float32(1.) x = np.float32(0.) @@ -253,7 +253,7 @@ class NegativeBinomialTest(test.TestCase): np.isfinite(log_prob_)) def testLogProbUnderflow(self): - with self.test_session() as sess: + with self.cached_session() as sess: logits = np.float32([-90, -100, -110]) total_count = np.float32(1.) x = np.float32(0.) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py b/tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py index 111f88eeb5..84ee19123c 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py @@ -44,7 +44,7 @@ class OneHotCategoricalTest(test.TestCase): def testP(self): p = [0.2, 0.8] dist = onehot_categorical.OneHotCategorical(probs=p) - with self.test_session(): + with self.cached_session(): self.assertAllClose(p, dist.probs.eval()) self.assertAllEqual([2], dist.logits.get_shape()) @@ -52,14 +52,14 @@ class OneHotCategoricalTest(test.TestCase): p = np.array([0.2, 0.8], dtype=np.float32) logits = np.log(p) - 50. dist = onehot_categorical.OneHotCategorical(logits=logits) - with self.test_session(): + with self.cached_session(): self.assertAllEqual([2], dist.probs.get_shape()) self.assertAllEqual([2], dist.logits.get_shape()) self.assertAllClose(dist.probs.eval(), p) self.assertAllClose(dist.logits.eval(), logits) def testShapes(self): - with self.test_session(): + with self.cached_session(): for batch_shape in ([], [1], [2, 3, 4]): dist = make_onehot_categorical(batch_shape, 10) self.assertAllEqual(batch_shape, dist.batch_shape.as_list()) @@ -97,7 +97,7 @@ class OneHotCategoricalTest(test.TestCase): np.array([1]+[0]*4, dtype=np.int64)).dtype) def testUnknownShape(self): - with self.test_session(): + with self.cached_session(): logits = array_ops.placeholder(dtype=dtypes.float32) dist = onehot_categorical.OneHotCategorical(logits) sample = dist.sample() @@ -112,7 +112,7 @@ class OneHotCategoricalTest(test.TestCase): def testEntropyNoBatch(self): logits = np.log([0.2, 0.8]) - 50. dist = onehot_categorical.OneHotCategorical(logits) - with self.test_session(): + with self.cached_session(): self.assertAllClose( dist.entropy().eval(), -(0.2 * np.log(0.2) + 0.8 * np.log(0.8))) @@ -120,7 +120,7 @@ class OneHotCategoricalTest(test.TestCase): def testEntropyWithBatch(self): logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50. dist = onehot_categorical.OneHotCategorical(logits) - with self.test_session(): + with self.cached_session(): self.assertAllClose(dist.entropy().eval(), [ -(0.2 * np.log(0.2) + 0.8 * np.log(0.8)), -(0.6 * np.log(0.6) + 0.4 * np.log(0.4)) @@ -128,7 +128,7 @@ class OneHotCategoricalTest(test.TestCase): def testPmf(self): # check that probability of samples correspond to their class probabilities - with self.test_session(): + with self.cached_session(): logits = self._rng.random_sample(size=(8, 2, 10)) prob = np.exp(logits)/np.sum(np.exp(logits), axis=-1, keepdims=True) dist = onehot_categorical.OneHotCategorical(logits=logits) @@ -138,7 +138,7 @@ class OneHotCategoricalTest(test.TestCase): self.assertAllClose(expected_prob, np_prob.flatten()) def testSample(self): - with self.test_session(): + with self.cached_session(): probs = [[[0.2, 0.8], [0.4, 0.6]]] dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.) n = 100 @@ -150,7 +150,7 @@ class OneHotCategoricalTest(test.TestCase): self.assertFalse(np.any(sample_values > 1)) def testSampleWithSampleShape(self): - with self.test_session(): + with self.cached_session(): probs = [[[0.2, 0.8], [0.4, 0.6]]] dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.) samples = dist.sample((100, 100), seed=123) @@ -166,7 +166,7 @@ class OneHotCategoricalTest(test.TestCase): exp_logits = np.exp(logits) return exp_logits / exp_logits.sum(axis=-1, keepdims=True) - with self.test_session() as sess: + with self.cached_session() as sess: for categories in [2, 10]: for batch_size in [1, 2]: p_logits = self._rng.random_sample((batch_size, categories)) @@ -193,7 +193,7 @@ class OneHotCategoricalTest(test.TestCase): self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.) def testSampleUnbiasedNonScalarBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: logits = self._rng.rand(4, 3, 2).astype(np.float32) dist = onehot_categorical.OneHotCategorical(logits=logits) n = int(3e3) @@ -221,7 +221,7 @@ class OneHotCategoricalTest(test.TestCase): actual_covariance_, sample_covariance_, atol=0., rtol=0.10) def testSampleUnbiasedScalarBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: logits = self._rng.rand(3).astype(np.float32) dist = onehot_categorical.OneHotCategorical(logits=logits) n = int(1e4) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py b/tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py index 1035cb00f7..e2d04c9c27 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py @@ -29,7 +29,7 @@ class _PoissonLogNormalQuadratureCompoundTest( """Tests the PoissonLogNormalQuadratureCompoundTest distribution.""" def testSampleProbConsistent(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( -2., @@ -43,7 +43,7 @@ class _PoissonLogNormalQuadratureCompoundTest( sess.run, pln, batch_size=1, rtol=0.1) def testMeanVariance(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( 0., @@ -57,7 +57,7 @@ class _PoissonLogNormalQuadratureCompoundTest( sess.run, pln, rtol=0.02) def testSampleProbConsistentBroadcastScalar(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [0., -0.5], @@ -71,7 +71,7 @@ class _PoissonLogNormalQuadratureCompoundTest( sess.run, pln, batch_size=2, rtol=0.1, atol=0.01) def testMeanVarianceBroadcastScalar(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [0., -0.5], @@ -85,7 +85,7 @@ class _PoissonLogNormalQuadratureCompoundTest( sess.run, pln, rtol=0.1, atol=0.01) def testSampleProbConsistentBroadcastBoth(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [[0.], [-0.5]], @@ -99,7 +99,7 @@ class _PoissonLogNormalQuadratureCompoundTest( sess.run, pln, batch_size=4, rtol=0.1, atol=0.08) def testMeanVarianceBroadcastBoth(self): - with self.test_session() as sess: + with self.cached_session() as sess: pln = poisson_lognormal.PoissonLogNormalQuadratureCompound( loc=array_ops.placeholder_with_default( [[0.], [-0.5]], diff --git a/tensorflow/contrib/distributions/python/kernel_tests/poisson_test.py b/tensorflow/contrib/distributions/python/kernel_tests/poisson_test.py index 19a7472d91..29eba5afca 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/poisson_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/poisson_test.py @@ -35,7 +35,7 @@ class PoissonTest(test.TestCase): return poisson_lib.Poisson(rate=rate, validate_args=validate_args) def testPoissonShape(self): - with self.test_session(): + with self.cached_session(): lam = constant_op.constant([3.0] * 5) poisson = self._make_poisson(rate=lam) @@ -47,13 +47,13 @@ class PoissonTest(test.TestCase): def testInvalidLam(self): invalid_lams = [-.01, 0., -2.] for lam in invalid_lams: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x > 0"): poisson = self._make_poisson(rate=lam, validate_args=True) poisson.rate.eval() def testPoissonLogPmf(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([3.0] * batch_size) lam_v = 3.0 @@ -68,7 +68,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(pmf.eval(), stats.poisson.pmf(x, lam_v)) def testPoissonLogPmfValidateArgs(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([3.0] * batch_size) x = array_ops.placeholder(dtypes.float32, shape=[6]) @@ -91,7 +91,7 @@ class PoissonTest(test.TestCase): self.assertEqual(pmf.get_shape(), (6,)) def testPoissonLogPmfMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([[2.0, 4.0, 5.0]] * batch_size) lam_v = [2.0, 4.0, 5.0] @@ -107,7 +107,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(pmf.eval(), stats.poisson.pmf(x, lam_v)) def testPoissonCDF(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([3.0] * batch_size) lam_v = 3.0 @@ -123,7 +123,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(cdf.eval(), stats.poisson.cdf(x, lam_v)) def testPoissonCDFNonIntegerValues(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([3.0] * batch_size) lam_v = 3.0 @@ -142,7 +142,7 @@ class PoissonTest(test.TestCase): poisson_validate.cdf(x).eval() def testPoissonCdfMultidimensional(self): - with self.test_session(): + with self.cached_session(): batch_size = 6 lam = constant_op.constant([[2.0, 4.0, 5.0]] * batch_size) lam_v = [2.0, 4.0, 5.0] @@ -158,7 +158,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(cdf.eval(), stats.poisson.cdf(x, lam_v)) def testPoissonMean(self): - with self.test_session(): + with self.cached_session(): lam_v = [1.0, 3.0, 2.5] poisson = self._make_poisson(rate=lam_v) self.assertEqual(poisson.mean().get_shape(), (3,)) @@ -166,7 +166,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(poisson.mean().eval(), lam_v) def testPoissonVariance(self): - with self.test_session(): + with self.cached_session(): lam_v = [1.0, 3.0, 2.5] poisson = self._make_poisson(rate=lam_v) self.assertEqual(poisson.variance().get_shape(), (3,)) @@ -174,7 +174,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(poisson.variance().eval(), lam_v) def testPoissonStd(self): - with self.test_session(): + with self.cached_session(): lam_v = [1.0, 3.0, 2.5] poisson = self._make_poisson(rate=lam_v) self.assertEqual(poisson.stddev().get_shape(), (3,)) @@ -182,14 +182,14 @@ class PoissonTest(test.TestCase): self.assertAllClose(poisson.stddev().eval(), np.sqrt(lam_v)) def testPoissonMode(self): - with self.test_session(): + with self.cached_session(): lam_v = [1.0, 3.0, 2.5, 3.2, 1.1, 0.05] poisson = self._make_poisson(rate=lam_v) self.assertEqual(poisson.mode().get_shape(), (6,)) self.assertAllClose(poisson.mode().eval(), np.floor(lam_v)) def testPoissonMultipleMode(self): - with self.test_session(): + with self.cached_session(): lam_v = [1.0, 3.0, 2.0, 4.0, 5.0, 10.0] poisson = self._make_poisson(rate=lam_v) # For the case where lam is an integer, the modes are: lam and lam - 1. @@ -198,7 +198,7 @@ class PoissonTest(test.TestCase): self.assertAllClose(lam_v, poisson.mode().eval()) def testPoissonSample(self): - with self.test_session(): + with self.cached_session(): lam_v = 4.0 lam = constant_op.constant(lam_v) # Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be @@ -215,7 +215,7 @@ class PoissonTest(test.TestCase): sample_values.var(), stats.poisson.var(lam_v), rtol=.01) def testPoissonSampleMultidimensionalMean(self): - with self.test_session(): + with self.cached_session(): lam_v = np.array([np.arange(1, 51, dtype=np.float32)]) # 1 x 50 poisson = self._make_poisson(rate=lam_v) # Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be @@ -232,7 +232,7 @@ class PoissonTest(test.TestCase): atol=0) def testPoissonSampleMultidimensionalVariance(self): - with self.test_session(): + with self.cached_session(): lam_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10 poisson = self._make_poisson(rate=lam_v) # Choosing `n >= 2 * lam * (k/rtol)**2, roughly ensures our sample diff --git a/tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py b/tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py index 71f63378e2..07528cafaf 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py @@ -38,7 +38,7 @@ class QuantizedDistributionTest(test.TestCase): self.assertTrue(np.isfinite(array).all()) def testQuantizationOfUniformWithCutoffsHavingNoEffect(self): - with self.test_session() as sess: + with self.cached_session() as sess: # The Quantized uniform with cutoffs == None divides the real line into: # R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]... # j = ... 0 1 2 3 4 ... @@ -93,7 +93,7 @@ class QuantizedDistributionTest(test.TestCase): self.assertAllClose(3 / 3, cdf_5) def testQuantizationOfUniformWithCutoffsInTheMiddle(self): - with self.test_session() as sess: + with self.cached_session() as sess: # The uniform is supported on [-3, 3] # Consider partitions the real line in intervals # ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ... @@ -131,7 +131,7 @@ class QuantizedDistributionTest(test.TestCase): def testQuantizationOfBatchOfUniforms(self): batch_shape = (5, 5) - with self.test_session(): + with self.cached_session(): # The uniforms are supported on [0, 10]. The qdist considers the # intervals # ... (0, 1](1, 2]...(9, 10]... @@ -165,7 +165,7 @@ class QuantizedDistributionTest(test.TestCase): def testSamplingFromBatchOfNormals(self): batch_shape = (2,) - with self.test_session(): + with self.cached_session(): normal = distributions.Normal( loc=array_ops.zeros( batch_shape, dtype=dtypes.float32), @@ -199,7 +199,7 @@ class QuantizedDistributionTest(test.TestCase): # pretend that the cdf F is a bijection, and hence F(X) is uniform. # Note that F cannot be bijection since it is constant between the # integers. Hence, F(X) (see below) will not be uniform exactly. - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Exponential(rate=0.01)) # X ~ QuantizedExponential @@ -222,7 +222,7 @@ class QuantizedDistributionTest(test.TestCase): # it makes sure the bin edges are consistent. # Make an exponential with mean 5. - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Exponential(rate=0.2)) # Standard error should be less than 1 / (2 * sqrt(n_samples)) @@ -243,7 +243,7 @@ class QuantizedDistributionTest(test.TestCase): batch_shape = (3, 3) mu = rng.randn(*batch_shape) sigma = rng.rand(*batch_shape) + 1.0 - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal( loc=mu, scale=sigma)) @@ -260,7 +260,7 @@ class QuantizedDistributionTest(test.TestCase): batch_shape = (3, 3) mu = rng.randn(*batch_shape) sigma = rng.rand(*batch_shape) + 1.0 - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal( loc=mu, scale=sigma)) @@ -275,7 +275,7 @@ class QuantizedDistributionTest(test.TestCase): def testNormalProbWithCutoffs(self): # At integer values, the result should be the same as the standard normal. - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal(loc=0., scale=1.), low=-2., @@ -297,7 +297,7 @@ class QuantizedDistributionTest(test.TestCase): def testNormalLogProbWithCutoffs(self): # At integer values, the result should be the same as the standard normal. - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal(loc=0., scale=1.), low=-2., @@ -342,7 +342,7 @@ class QuantizedDistributionTest(test.TestCase): self._assert_all_finite(grads[1].eval()) def testProbAndGradGivesFiniteResultsForCommonEvents(self): - with self.test_session(): + with self.cached_session(): mu = variables.Variable(0.0, name="mu") sigma = variables.Variable(1.0, name="sigma") qdist = distributions.QuantizedDistribution( @@ -360,7 +360,7 @@ class QuantizedDistributionTest(test.TestCase): self._assert_all_finite(grads[1].eval()) def testLowerCutoffMustBeBelowUpperCutoffOrWeRaise(self): - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal(loc=0., scale=1.), low=1., # not strictly less than high. @@ -372,7 +372,7 @@ class QuantizedDistributionTest(test.TestCase): qdist.sample().eval() def testCutoffsMustBeIntegerValuedIfValidateArgsTrue(self): - with self.test_session(): + with self.cached_session(): low = array_ops.placeholder(dtypes.float32) qdist = distributions.QuantizedDistribution( distribution=distributions.Normal(loc=0., scale=1.), @@ -385,7 +385,7 @@ class QuantizedDistributionTest(test.TestCase): qdist.sample().eval(feed_dict={low: 1.5}) def testCutoffsCanBeFloatValuedIfValidateArgsFalse(self): - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal( loc=0., scale=1., validate_args=False), @@ -399,7 +399,7 @@ class QuantizedDistributionTest(test.TestCase): def testDtypeAndShapeInheritedFromBaseDist(self): batch_shape = (2, 3) - with self.test_session(): + with self.cached_session(): qdist = distributions.QuantizedDistribution( distribution=distributions.Normal( loc=array_ops.zeros(batch_shape), diff --git a/tensorflow/contrib/distributions/python/kernel_tests/relaxed_bernoulli_test.py b/tensorflow/contrib/distributions/python/kernel_tests/relaxed_bernoulli_test.py index 2cf12bbe50..fec2374928 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/relaxed_bernoulli_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/relaxed_bernoulli_test.py @@ -34,29 +34,29 @@ class RelaxedBernoulliTest(test.TestCase): temperature = 1.0 p = [0.1, 0.4] dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p) - with self.test_session(): + with self.cached_session(): self.assertAllClose(p, dist.probs.eval()) def testLogits(self): temperature = 2.0 logits = [-42., 42.] dist = relaxed_bernoulli.RelaxedBernoulli(temperature, logits=logits) - with self.test_session(): + with self.cached_session(): self.assertAllClose(logits, dist.logits.eval()) - with self.test_session(): + with self.cached_session(): self.assertAllClose(scipy.special.expit(logits), dist.probs.eval()) p = [0.01, 0.99, 0.42] dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p) - with self.test_session(): + with self.cached_session(): self.assertAllClose(scipy.special.logit(p), dist.logits.eval()) def testInvalidP(self): temperature = 1.0 invalid_ps = [1.01, 2.] for p in invalid_ps: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("probs has components greater than 1"): dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p, @@ -65,7 +65,7 @@ class RelaxedBernoulliTest(test.TestCase): invalid_ps = [-0.01, -3.] for p in invalid_ps: - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("Condition x >= 0"): dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p, @@ -74,13 +74,13 @@ class RelaxedBernoulliTest(test.TestCase): valid_ps = [0.0, 0.5, 1.0] for p in valid_ps: - with self.test_session(): + with self.cached_session(): dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p) self.assertEqual(p, dist.probs.eval()) def testShapes(self): - with self.test_session(): + with self.cached_session(): for batch_shape in ([], [1], [2, 3, 4]): temperature = 1.0 p = np.random.random(batch_shape).astype(np.float32) @@ -96,7 +96,7 @@ class RelaxedBernoulliTest(test.TestCase): p = constant_op.constant([0.1, 0.4]) dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p, validate_args=True) - with self.test_session(): + with self.cached_session(): sample = dist.sample() with self.assertRaises(errors_impl.InvalidArgumentError): sample.eval() @@ -117,7 +117,7 @@ class RelaxedBernoulliTest(test.TestCase): self.assertEqual(dist64.dtype, dist64.sample(5).dtype) def testLogProb(self): - with self.test_session(): + with self.cached_session(): t = np.array(1.0, dtype=np.float64) p = np.array(0.1, dtype=np.float64) # P(x=1) dist = relaxed_bernoulli.RelaxedBernoulli(t, probs=p) @@ -131,7 +131,7 @@ class RelaxedBernoulliTest(test.TestCase): self.assertAllClose(expected_log_pdf, log_pdf) def testBoundaryConditions(self): - with self.test_session(): + with self.cached_session(): temperature = 1e-2 dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=1.0) self.assertAllClose(np.nan, dist.log_prob(0.0).eval()) @@ -139,7 +139,7 @@ class RelaxedBernoulliTest(test.TestCase): def testSampleN(self): """mean of quantized samples still approximates the Bernoulli mean.""" - with self.test_session(): + with self.cached_session(): temperature = 1e-2 p = [0.2, 0.6, 0.5] dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py b/tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py index faae9da6ad..ff13c2decc 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/relaxed_onehot_categorical_test.py @@ -46,7 +46,7 @@ class ExpRelaxedOneHotCategoricalTest(test.TestCase): dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature, logits) expected_p = np.exp(logits)/np.sum(np.exp(logits)) - with self.test_session(): + with self.cached_session(): self.assertAllClose(expected_p, dist.probs.eval()) self.assertAllEqual([3], dist.probs.get_shape()) @@ -57,7 +57,7 @@ class ExpRelaxedOneHotCategoricalTest(test.TestCase): p = np.exp(logits)/np.sum(np.exp(logits)) dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature, logits) - with self.test_session(): + with self.cached_session(): x = dist.sample().eval() # analytical ExpConcrete density presented in Maddison et al. 2016 prod_term = p*np.exp(-temperature * x) @@ -74,14 +74,14 @@ class RelaxedOneHotCategoricalTest(test.TestCase): logits = [2.0, 3.0, -4.0] dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature, logits) - with self.test_session(): + with self.cached_session(): # check p for ExpRelaxed base distribution self.assertAllClose(logits, dist._distribution.logits.eval()) self.assertAllEqual([3], dist._distribution.logits.get_shape()) def testSample(self): temperature = 1.4 - with self.test_session(): + with self.cached_session(): # single logit logits = [.3, .1, .4] dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature, @@ -115,7 +115,7 @@ class RelaxedOneHotCategoricalTest(test.TestCase): expected_pdf = term1*np.power(term2, -k)*term3 return expected_pdf - with self.test_session(): + with self.cached_session(): temperature = .4 logits = np.array([[.3, .1, .4]]).astype(np.float32) dist = relaxed_onehot_categorical.RelaxedOneHotCategorical(temperature, @@ -136,7 +136,7 @@ class RelaxedOneHotCategoricalTest(test.TestCase): self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4) def testShapes(self): - with self.test_session(): + with self.cached_session(): for batch_shape in ([], [1], [2, 3, 4]): dist = make_relaxed_categorical(batch_shape, 10) self.assertAllEqual(batch_shape, dist.batch_shape.as_list()) @@ -153,12 +153,12 @@ class RelaxedOneHotCategoricalTest(test.TestCase): self.assertAllEqual([10], dist.event_shape_tensor().eval()) def testUnknownShape(self): - with self.test_session(): + with self.cached_session(): logits_pl = array_ops.placeholder(dtypes.float32) temperature = 1.0 dist = relaxed_onehot_categorical.ExpRelaxedOneHotCategorical(temperature, logits_pl) - with self.test_session(): + with self.cached_session(): feed_dict = {logits_pl: [.3, .1, .4]} self.assertAllEqual([3], dist.sample().eval(feed_dict=feed_dict).shape) self.assertAllEqual([5, 3], @@ -166,7 +166,7 @@ class RelaxedOneHotCategoricalTest(test.TestCase): def testDTypes(self): # check that sampling and log_prob work for a range of dtypes - with self.test_session(): + with self.cached_session(): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64): logits = random_ops.random_uniform(shape=[3, 3], dtype=dtype) dist = relaxed_onehot_categorical.RelaxedOneHotCategorical( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py b/tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py index ea04e8c29a..d6020e7866 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py @@ -47,7 +47,7 @@ class _AutoCorrelationTest(object): input=x_, shape=x_.shape if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session() as sess: + with self.cached_session() as sess: # Setting normalize = True means we divide by zero. auto_corr = sample_stats.auto_correlation( x_ph, axis=1, center=False, normalize=False) @@ -65,7 +65,7 @@ class _AutoCorrelationTest(object): input=x_, shape=x_.shape if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session() as sess: + with self.cached_session() as sess: # Setting normalize = True means we divide by zero. auto_corr = sample_stats.auto_correlation( x_ph, axis=1, normalize=False, center=True) @@ -100,7 +100,7 @@ class _AutoCorrelationTest(object): x_ph = array_ops.placeholder_with_default( x, shape=x.shape if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session(): + with self.cached_session(): auto_corr = sample_stats.auto_correlation( x_ph, axis=axis, max_lags=max_lags, center=center, normalize=normalize) @@ -167,7 +167,7 @@ class _AutoCorrelationTest(object): x_ph = array_ops.placeholder_with_default( x, shape=(l,) if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session(): + with self.cached_session(): rxx = sample_stats.auto_correlation( x_ph, max_lags=l // 2, center=True, normalize=False) if self.use_static_shape: @@ -188,7 +188,7 @@ class _AutoCorrelationTest(object): x_ph = array_ops.placeholder_with_default( x, shape=(1000 * 10,) if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session(): + with self.cached_session(): rxx = sample_stats.auto_correlation( x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False) if self.use_static_shape: @@ -209,7 +209,7 @@ class _AutoCorrelationTest(object): x_ph = array_ops.placeholder_with_default( x, shape=(l,) if self.use_static_shape else None) with spectral_ops_test_util.fft_kernel_label_map(): - with self.test_session(): + with self.cached_session(): rxx = sample_stats.auto_correlation( x_ph, max_lags=l // 2, center=True, normalize=True) if self.use_static_shape: @@ -271,7 +271,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, axis=0) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) self.assertAllEqual((), pct.get_shape()) @@ -282,7 +282,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation) self.assertAllEqual((), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval()) @@ -292,7 +292,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, axis=0) - with self.test_session(): + with self.cached_session(): # Get dim 1 with negative and positive indices. pct_neg_index = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) @@ -308,7 +308,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, axis=0) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x, q=q, interpolation=self._interpolation, axis=[0]) self.assertAllEqual((2,), pct.get_shape()) @@ -319,7 +319,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation, keepdims=True, axis=0) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x, q=q, @@ -334,7 +334,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]: expected_percentile = np.percentile( x, q=0.77, interpolation=self._interpolation, axis=axis) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x, q=0.77, @@ -352,7 +352,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): interpolation=self._interpolation, axis=axis, keepdims=True) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x, q=0.77, @@ -368,7 +368,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]: expected_percentile = np.percentile( x, q=0.77, interpolation=self._interpolation, axis=axis) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x_ph, q=0.77, @@ -386,7 +386,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): interpolation=self._interpolation, axis=axis, keepdims=True) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile( x_ph, q=0.77, @@ -400,7 +400,7 @@ class PercentileTestWithLowerInterpolation(test.TestCase): for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation) self.assertEqual(dtypes.int32, pct.dtype) self.assertAllEqual((), pct.get_shape()) @@ -423,7 +423,7 @@ class PercentileTestWithNearestInterpolation(test.TestCase): for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation) self.assertAllEqual((), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval()) @@ -433,7 +433,7 @@ class PercentileTestWithNearestInterpolation(test.TestCase): for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]: expected_percentile = np.percentile( x, q=q, interpolation=self._interpolation) - with self.test_session(): + with self.cached_session(): pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation) self.assertAllEqual((), pct.get_shape()) self.assertAllClose(expected_percentile, pct.eval()) @@ -452,7 +452,7 @@ class PercentileTestWithNearestInterpolation(test.TestCase): x = [1., 5., 3., 2., 4.] q_ph = array_ops.placeholder(dtypes.float32) pct = sample_stats.percentile(x, q=q_ph, validate_args=True) - with self.test_session(): + with self.cached_session(): with self.assertRaisesOpError("rank"): pct.eval(feed_dict={q_ph: [0.5]}) @@ -462,7 +462,7 @@ class PercentileTestWithNearestInterpolation(test.TestCase): # If float is used, it fails with InvalidArgumentError about an index out of # bounds. x = math_ops.linspace(0., 3e7, num=int(3e7)) - with self.test_session(): + with self.cached_session(): minval = sample_stats.percentile(x, q=0, validate_args=True) self.assertAllEqual(0, minval.eval()) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/shape_test.py b/tensorflow/contrib/distributions/python/kernel_tests/shape_test.py index 243b5a0348..a4d2aa381c 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/shape_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/shape_test.py @@ -73,7 +73,7 @@ class MakeBatchReadyTest(test.TestCase): return y, sample_shape, should_be_x_value def _test_dynamic(self, x, batch_ndims, event_ndims, expand_batch_dim=True): - with self.test_session() as sess: + with self.cached_session() as sess: x_pl = array_ops.placeholder(x.dtype) batch_ndims_pl = array_ops.placeholder(dtypes.int32) event_ndims_pl = array_ops.placeholder(dtypes.int32) @@ -91,7 +91,7 @@ class MakeBatchReadyTest(test.TestCase): self.assertAllEqual(x, should_be_x_value_) def _test_static(self, x, batch_ndims, event_ndims, expand_batch_dim): - with self.test_session() as sess: + with self.cached_session() as sess: [y_, sample_shape_, should_be_x_value_] = sess.run( self._build_graph(x, batch_ndims, event_ndims, expand_batch_dim)) expected_y, expected_sample_shape = self._get_expected( @@ -544,7 +544,7 @@ class DistributionShapeTest(test.TestCase): self.assertAllEqual(expected_item, next(actual_item)) def testDistributionShapeGetNdimsStatic(self): - with self.test_session(): + with self.cached_session(): shaper = _DistributionShape(batch_ndims=0, event_ndims=0) x = 1 self.assertEqual(0, shaper.get_sample_ndims(x).eval()) @@ -572,7 +572,7 @@ class DistributionShapeTest(test.TestCase): self.assertEqual(1, shaper.event_ndims.eval()) def testDistributionShapeGetNdimsDynamic(self): - with self.test_session() as sess: + with self.cached_session() as sess: batch_ndims = array_ops.placeholder(dtypes.int32) event_ndims = array_ops.placeholder(dtypes.int32) shaper = _DistributionShape( @@ -583,7 +583,7 @@ class DistributionShapeTest(test.TestCase): self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict)) def testDistributionShapeGetDimsStatic(self): - with self.test_session(): + with self.cached_session(): shaper = _DistributionShape(batch_ndims=0, event_ndims=0) x = 1 self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape), @@ -597,7 +597,7 @@ class DistributionShapeTest(test.TestCase): _constant(shaper.get_dims(x))) def testDistributionShapeGetDimsDynamic(self): - with self.test_session() as sess: + with self.cached_session() as sess: # Works for static {batch,event}_ndims despite unfed input. shaper = _DistributionShape(batch_ndims=1, event_ndims=2) y = array_ops.placeholder(dtypes.float32, shape=(10, None, 5, 5)) @@ -615,7 +615,7 @@ class DistributionShapeTest(test.TestCase): ([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict)) def testDistributionShapeGetShapeStatic(self): - with self.test_session(): + with self.cached_session(): shaper = _DistributionShape(batch_ndims=0, event_ndims=0) self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape), _constant(shaper.get_shape(1.))) @@ -657,7 +657,7 @@ class DistributionShapeTest(test.TestCase): _constant(shaper.get_shape(np.ones((3, 2, 1))))) def testDistributionShapeGetShapeDynamic(self): - with self.test_session() as sess: + with self.cached_session() as sess: # Works for static ndims despite unknown static shape. shaper = _DistributionShape(batch_ndims=1, event_ndims=1) y = array_ops.placeholder(dtypes.int32, shape=(None, None, 2)) diff --git a/tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py b/tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py index 88b48736dd..1811d85b7e 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py @@ -34,7 +34,7 @@ class SinhArcsinhTest(test.TestCase): b = 10 scale = rng.rand(b) + 0.5 loc = rng.randn(b) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.Normal( loc=loc, scale=scale, @@ -58,7 +58,7 @@ class SinhArcsinhTest(test.TestCase): norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1) def test_broadcast_params_dynamic(self): - with self.test_session() as sess: + with self.cached_session() as sess: loc = array_ops.placeholder(dtypes.float64) scale = array_ops.placeholder(dtypes.float64) skewness = array_ops.placeholder(dtypes.float64) @@ -78,7 +78,7 @@ class SinhArcsinhTest(test.TestCase): b = 10 scale = rng.rand(b) + 0.5 loc = rng.randn(b) - with self.test_session() as sess: + with self.cached_session() as sess: lap = ds.Laplace( loc=loc, scale=scale, @@ -106,7 +106,7 @@ class SinhArcsinhTest(test.TestCase): batch_size = 10 scale = rng.rand(batch_size) + 0.5 loc = 0.1 * rng.randn(batch_size) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.Normal( loc=loc, scale=scale, @@ -148,7 +148,7 @@ class SinhArcsinhTest(test.TestCase): batch_size = 10 scale = rng.rand(batch_size) + 0.5 loc = np.float64(0.) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.Normal( loc=loc, scale=scale, @@ -190,7 +190,7 @@ class SinhArcsinhTest(test.TestCase): batch_size = 10 scale = rng.rand(batch_size) + 0.5 loc = rng.randn(batch_size) - with self.test_session() as sess: + with self.cached_session() as sess: sasnorm = ds.SinhArcsinh( loc=loc, scale=scale, @@ -201,7 +201,7 @@ class SinhArcsinhTest(test.TestCase): np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0)) def test_pdf_reflected_for_negative_skewness(self): - with self.test_session() as sess: + with self.cached_session() as sess: sas_pos_skew = ds.SinhArcsinh( loc=0., scale=1., diff --git a/tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py b/tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py index bb32430c4a..196cc41335 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py @@ -147,7 +147,7 @@ class TransformedDistributionTest(test.TestCase): abs_normal.log_prob(2.13).eval()) def testQuantile(self): - with self.test_session() as sess: + with self.cached_session() as sess: logit_normal = self._cls()( distribution=ds.Normal(loc=0., scale=1.), bijector=bs.Sigmoid(), @@ -169,7 +169,7 @@ class TransformedDistributionTest(test.TestCase): exp_forward_only._inverse_log_det_jacobian = self._make_unimplemented( "inverse_log_det_jacobian ") - with self.test_session() as sess: + with self.cached_session() as sess: mu = 3.0 sigma = 0.02 log_normal = self._cls()( @@ -195,7 +195,7 @@ class TransformedDistributionTest(test.TestCase): log_forward_only = bs.Invert(exp_inverse_only) - with self.test_session() as sess: + with self.cached_session() as sess: # The log bijector isn't defined over the whole real line, so we make # sigma sufficiently small so that the draws are positive. mu = 2. @@ -211,7 +211,7 @@ class TransformedDistributionTest(test.TestCase): self.assertAllClose(expected_log_pdf, log_pdf_val, atol=0.) def testShapeChangingBijector(self): - with self.test_session(): + with self.cached_session(): softmax = bs.SoftmaxCentered() standard_normal = ds.Normal(loc=0., scale=1.) multi_logit_normal = self._cls()( @@ -235,7 +235,7 @@ class TransformedDistributionTest(test.TestCase): def testCastLogDetJacobian(self): """Test log_prob when Jacobian and log_prob dtypes do not match.""" - with self.test_session(): + with self.cached_session(): # Create an identity bijector whose jacobians have dtype int32 int_identity = bs.Inline( forward_fn=array_ops.identity, @@ -257,7 +257,7 @@ class TransformedDistributionTest(test.TestCase): normal.entropy().eval() def testEntropy(self): - with self.test_session(): + with self.cached_session(): shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32) diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32) actual_mvn_entropy = np.concatenate([ @@ -277,7 +277,7 @@ class TransformedDistributionTest(test.TestCase): fake_mvn.entropy().eval()) def testScalarBatchScalarEventIdentityScale(self): - with self.test_session() as sess: + with self.cached_session() as sess: exp2 = self._cls()( ds.Exponential(rate=0.25), bijector=ds.bijectors.AffineScalar(scale=2.) @@ -310,7 +310,7 @@ class ScalarToMultiTest(test.TestCase): batch_shape=(), event_shape=(), not_implemented_message=None): - with self.test_session() as sess: + with self.cached_session() as sess: # Overriding shapes must be compatible w/bijector; most bijectors are # batch_shape agnostic and only care about event_ndims. # In the case of `Affine`, if we got it wrong then it would fire an @@ -428,7 +428,7 @@ class ScalarToMultiTest(test.TestCase): batch_shape=[2], not_implemented_message="not implemented") - with self.test_session(): + with self.cached_session(): # Can't override event_shape for scalar batch, non-scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): self._cls()( @@ -445,7 +445,7 @@ class ScalarToMultiTest(test.TestCase): event_shape=[3], not_implemented_message="not implemented when overriding event_shape") - with self.test_session(): + with self.cached_session(): # Can't override batch_shape for non-scalar batch, scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): self._cls()( @@ -456,7 +456,7 @@ class ScalarToMultiTest(test.TestCase): validate_args=True) def testNonScalarBatchNonScalarEvent(self): - with self.test_session(): + with self.cached_session(): # Can't override event_shape and/or batch_shape for non_scalar batch, # non-scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): @@ -469,7 +469,7 @@ class ScalarToMultiTest(test.TestCase): validate_args=True) def testMatrixEvent(self): - with self.test_session() as sess: + with self.cached_session() as sess: batch_shape = [2] event_shape = [2, 3, 3] batch_shape_pl = array_ops.placeholder( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py b/tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py index 04f047aa0c..856579da32 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py @@ -35,7 +35,7 @@ class VectorDiffeomixtureTest( """Tests the VectorDiffeomixture distribution.""" def testSampleProbConsistentBroadcastMixNoBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 4 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [1.]], @@ -64,7 +64,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, radius=4., center=2., rtol=0.015) def testSampleProbConsistentBroadcastMixNonStandardBase(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 4 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [1.]], @@ -93,7 +93,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, radius=4., center=3., rtol=0.01) def testSampleProbConsistentBroadcastMixBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 4 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [1.]], @@ -128,7 +128,7 @@ class VectorDiffeomixtureTest( dims = 4 loc_1 = rng.randn(2, 3, dims).astype(np.float32) - with self.test_session() as sess: + with self.cached_session() as sess: vdm = vdm_lib.VectorDiffeomixture( mix_loc=(rng.rand(2, 3, 1) - 0.5).astype(np.float32), temperature=[1.], @@ -152,7 +152,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, radius=3., center=loc_1, rtol=0.02) def testMeanCovarianceNoBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 3 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [4.]], @@ -179,7 +179,7 @@ class VectorDiffeomixtureTest( def testTemperatureControlsHowMuchThisLooksLikeDiscreteMixture(self): # As temperature decreases, this should approach a mixture of normals, with # components at -2, 2. - with self.test_session() as sess: + with self.cached_session() as sess: dims = 1 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[0.], @@ -216,7 +216,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, rtol=0.02, cov_rtol=0.08) def testConcentrationLocControlsHowMuchWeightIsOnEachComponent(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 1 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[-1.], [0.], [1.]], @@ -259,7 +259,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, rtol=0.02, cov_rtol=0.08) def testMeanCovarianceNoBatchUncenteredNonStandardBase(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 3 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [4.]], @@ -284,7 +284,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025) def testMeanCovarianceBatch(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 3 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[[0.], [4.]], @@ -312,7 +312,7 @@ class VectorDiffeomixtureTest( sess.run, vdm, rtol=0.02, cov_rtol=0.07) def testSampleProbConsistentQuadrature(self): - with self.test_session() as sess: + with self.cached_session() as sess: dims = 4 vdm = vdm_lib.VectorDiffeomixture( mix_loc=[0.], diff --git a/tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py b/tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py index fd05bd207f..db8186b79a 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py @@ -37,42 +37,42 @@ class VectorExponentialDiagTest(test.TestCase): def testScalarParams(self): mu = -1. diag = -5. - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, "at least 1 dimension"): ds.VectorExponentialDiag(mu, diag) def testVectorParams(self): mu = [-1.] diag = [-5.] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) self.assertAllEqual([3, 1], dist.sample(3).get_shape()) def testMean(self): mu = [-1., 1] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) self.assertAllEqual([-1. + 1., 1. - 5.], dist.mean().eval()) def testMode(self): mu = [-1.] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) self.assertAllEqual([-1., -1.], dist.mode().eval()) def testMeanWithBroadcastLoc(self): mu = [-1.] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) self.assertAllEqual([-1. + 1, -1. - 5], dist.mean().eval()) def testSample(self): mu = [-2., 1] diag = [1., -2] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) samps = dist.sample(int(1e4), seed=0).eval() cov_mat = array_ops.matrix_diag(diag).eval()**2 @@ -85,7 +85,7 @@ class VectorExponentialDiagTest(test.TestCase): def testSingularScaleRaises(self): mu = [-1., 1] diag = [1., 0] - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) with self.assertRaisesOpError("Singular"): dist.sample().eval() @@ -97,7 +97,7 @@ class VectorExponentialDiagTest(test.TestCase): # diag corresponds to no batches of 3-variate normals diag = np.ones([3]) - with self.test_session(): + with self.cached_session(): dist = ds.VectorExponentialDiag(mu, diag, validate_args=True) mean = dist.mean() @@ -117,7 +117,7 @@ class VectorExponentialDiagTest(test.TestCase): atol=0.10, rtol=0.05) def testCovariance(self): - with self.test_session(): + with self.cached_session(): vex = ds.VectorExponentialDiag( loc=array_ops.ones([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -153,7 +153,7 @@ class VectorExponentialDiagTest(test.TestCase): vex.covariance().eval()) def testVariance(self): - with self.test_session(): + with self.cached_session(): vex = ds.VectorExponentialDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -178,7 +178,7 @@ class VectorExponentialDiagTest(test.TestCase): vex.variance().eval()) def testStddev(self): - with self.test_session(): + with self.cached_session(): vex = ds.VectorExponentialDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py b/tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py index 1226c66113..9ee19b7e93 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py @@ -38,14 +38,14 @@ class VectorLaplaceDiagTest(test.TestCase): def testScalarParams(self): mu = -1. diag = -5. - with self.test_session(): + with self.cached_session(): with self.assertRaisesRegexp(ValueError, "at least 1 dimension"): ds.VectorLaplaceDiag(mu, diag) def testVectorParams(self): mu = [-1.] diag = [-5.] - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) self.assertAllEqual([3, 1], dist.sample(3).get_shape()) @@ -56,7 +56,7 @@ class VectorLaplaceDiagTest(test.TestCase): # Batch shape = [1], event shape = [3] mu = array_ops.zeros((1, 3)) diag = array_ops.ones((1, 3)) - with self.test_session(): + with self.cached_session(): base_dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) dist = ds.TransformedDistribution( base_dist, @@ -68,21 +68,21 @@ class VectorLaplaceDiagTest(test.TestCase): def testMean(self): mu = [-1., 1] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) self.assertAllEqual(mu, dist.mean().eval()) def testMeanWithBroadcastLoc(self): mu = [-1.] diag = [1., -5] - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) self.assertAllEqual([-1., -1.], dist.mean().eval()) def testSample(self): mu = [-1., 1] diag = [1., -2] - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) samps = dist.sample(int(1e4), seed=0).eval() cov_mat = 2. * array_ops.matrix_diag(diag).eval()**2 @@ -95,7 +95,7 @@ class VectorLaplaceDiagTest(test.TestCase): def testSingularScaleRaises(self): mu = [-1., 1] diag = [1., 0] - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) with self.assertRaisesOpError("Singular"): dist.sample().eval() @@ -107,7 +107,7 @@ class VectorLaplaceDiagTest(test.TestCase): # diag corresponds to no batches of 3-variate normals diag = np.ones([3]) - with self.test_session(): + with self.cached_session(): dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True) mean = dist.mean() @@ -126,7 +126,7 @@ class VectorLaplaceDiagTest(test.TestCase): atol=0.10, rtol=0.05) def testCovariance(self): - with self.test_session(): + with self.cached_session(): vla = ds.VectorLaplaceDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -162,7 +162,7 @@ class VectorLaplaceDiagTest(test.TestCase): vla.covariance().eval()) def testVariance(self): - with self.test_session(): + with self.cached_session(): vla = ds.VectorLaplaceDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( @@ -187,7 +187,7 @@ class VectorLaplaceDiagTest(test.TestCase): vla.variance().eval()) def testStddev(self): - with self.test_session(): + with self.cached_session(): vla = ds.VectorLaplaceDiag( loc=array_ops.zeros([2, 3], dtype=dtypes.float32)) self.assertAllClose( diff --git a/tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py b/tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py index 2bc6a926dd..0dd7d23eb0 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py @@ -35,7 +35,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, @@ -65,7 +65,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.2) loc = rng.randn(d) - with self.test_session() as sess: + with self.cached_session() as sess: vlap = ds.VectorLaplaceDiag( loc=loc, scale_diag=scale_diag, @@ -96,7 +96,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(0.9) loc = rng.randn(d) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, @@ -141,7 +141,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) - with self.test_session() as sess: + with self.cached_session() as sess: norm = ds.MultivariateNormalDiag( loc=loc, scale_diag=scale_diag, @@ -186,7 +186,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.0) loc = rng.randn(d) - with self.test_session() as sess: + with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( loc=loc, scale_diag=scale_diag, @@ -201,7 +201,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, b, d = 5, 2 scale_diag = rng.rand(b, d) scale_identity_multiplier = np.float64(1.1) - with self.test_session() as sess: + with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, @@ -228,7 +228,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, d = 3 scale_diag = rng.rand(d) scale_identity_multiplier = np.float64(1.1) - with self.test_session() as sess: + with self.cached_session() as sess: sasnorm = ds.VectorSinhArcsinhDiag( scale_diag=scale_diag, scale_identity_multiplier=scale_identity_multiplier, @@ -252,7 +252,7 @@ class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers, rtol=0.1) def test_pdf_reflected_for_negative_skewness(self): - with self.test_session() as sess: + with self.cached_session() as sess: sas_pos_skew = ds.VectorSinhArcsinhDiag( loc=[0.], scale_identity_multiplier=1., diff --git a/tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py b/tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py index b8a3a262ce..aaec1f09d9 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py @@ -75,7 +75,7 @@ class VectorStudentTTest(test.TestCase): self._rng = np.random.RandomState(42) def testProbStaticScalar(self): - with self.test_session(): + with self.cached_session(): # Scalar batch_shape. df = np.asarray(3., dtype=np.float32) # Scalar batch_shape. @@ -116,7 +116,7 @@ class VectorStudentTTest(test.TestCase): expected_mst = _FakeVectorStudentT( df=df, loc=loc, scale_tril=scale_tril) - with self.test_session(): + with self.cached_session(): actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag, validate_args=True) self.assertAllClose(expected_mst.log_prob(x), @@ -145,7 +145,7 @@ class VectorStudentTTest(test.TestCase): expected_mst = _FakeVectorStudentT( df=df, loc=loc, scale_tril=scale_tril) - with self.test_session(): + with self.cached_session(): df_pl = array_ops.placeholder(dtypes.float32, name="df") loc_pl = array_ops.placeholder(dtypes.float32, name="loc") scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag") @@ -180,7 +180,7 @@ class VectorStudentTTest(test.TestCase): loc=loc, scale_tril=scale_tril) - with self.test_session(): + with self.cached_session(): actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag, validate_args=True) self.assertAllClose(expected_mst.log_prob(x), @@ -211,7 +211,7 @@ class VectorStudentTTest(test.TestCase): loc=loc, scale_tril=scale_tril) - with self.test_session(): + with self.cached_session(): df_pl = array_ops.placeholder(dtypes.float32, name="df") loc_pl = array_ops.placeholder(dtypes.float32, name="loc") scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag") @@ -240,7 +240,7 @@ class VectorStudentTTest(test.TestCase): scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :], reps=[len(df), 1, 1])) - with self.test_session(): + with self.cached_session(): actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag, validate_args=True) self.assertAllClose(expected_mst.log_prob(x), @@ -266,7 +266,7 @@ class VectorStudentTTest(test.TestCase): scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :], reps=[len(df), 1, 1])) - with self.test_session(): + with self.cached_session(): df_pl = array_ops.placeholder(dtypes.float32, name="df") loc_pl = array_ops.placeholder(dtypes.float32, name="loc") scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag") diff --git a/tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py b/tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py index dcecce981f..a60056c444 100644 --- a/tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py +++ b/tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py @@ -52,7 +52,7 @@ def wishart_var(df, x): class WishartCholeskyTest(test.TestCase): def testEntropy(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) @@ -64,7 +64,7 @@ class WishartCholeskyTest(test.TestCase): self.assertAllClose(0.78375711047393404, w.entropy().eval()) def testMeanLogDetAndLogNormalizingConstant(self): - with self.test_session(): + with self.cached_session(): def entropy_alt(w): return ( @@ -80,35 +80,35 @@ class WishartCholeskyTest(test.TestCase): self.assertAllClose(w.entropy().eval(), entropy_alt(w)) def testMean(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(df * scale, w.mean().eval()) def testMode(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval()) def testStd(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval()) def testVariance(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 w = distributions.WishartCholesky(df, chol(scale)) self.assertAllEqual(wishart_var(df, scale), w.variance().eval()) def testSample(self): - with self.test_session(): + with self.cached_session(): scale = make_pd(1., 2) df = 4 @@ -161,7 +161,7 @@ class WishartCholeskyTest(test.TestCase): # Test that sampling with the same seed twice gives the same results. def testSampleMultipleTimes(self): - with self.test_session(): + with self.cached_session(): df = 4. n_val = 100 @@ -184,7 +184,7 @@ class WishartCholeskyTest(test.TestCase): self.assertAllClose(samples1, samples2) def testProb(self): - with self.test_session(): + with self.cached_session(): # Generate some positive definite (pd) matrices and their Cholesky # factorizations. x = np.array( @@ -271,7 +271,7 @@ class WishartCholeskyTest(test.TestCase): w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape()) def testBatchShape(self): - with self.test_session() as sess: + with self.cached_session() as sess: scale = make_pd(1., 2) chol_scale = chol(scale) @@ -295,7 +295,7 @@ class WishartCholeskyTest(test.TestCase): feed_dict={scale_deferred: [chol_scale, chol_scale]})) def testEventShape(self): - with self.test_session() as sess: + with self.cached_session() as sess: scale = make_pd(1., 2) chol_scale = chol(scale) @@ -320,7 +320,7 @@ class WishartCholeskyTest(test.TestCase): feed_dict={scale_deferred: [chol_scale, chol_scale]})) def testValidateArgs(self): - with self.test_session() as sess: + with self.cached_session() as sess: df_deferred = array_ops.placeholder(dtypes.float32) chol_scale_deferred = array_ops.placeholder(dtypes.float32) x = make_pd(1., 3) @@ -374,7 +374,7 @@ class WishartCholeskyTest(test.TestCase): chol_scale_deferred: np.ones((3, 3))}) def testStaticAsserts(self): - with self.test_session(): + with self.cached_session(): x = make_pd(1., 3) chol_scale = chol(x) @@ -404,7 +404,7 @@ class WishartCholeskyTest(test.TestCase): batch_shape + [dims, dims]) wishart = distributions.WishartFull(df=5, scale=scale) x = wishart.sample(sample_shape, seed=42) - with self.test_session() as sess: + with self.cached_session() as sess: x_ = sess.run(x) expected_shape = sample_shape + batch_shape + [dims, dims] self.assertAllEqual(expected_shape, x.shape) |