aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/distributions
diff options
context:
space:
mode:
authorGravatar Brian Patton <bjp@google.com>2018-09-20 12:57:56 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-20 13:02:40 -0700
commit07bb219ee9a6f11139396ac73d4138522300f86b (patch)
treea4cc671061550cafa0af348ac0def03816c3be6e /tensorflow/contrib/distributions
parent4aa639c0cbb47f4707f735e0cc80f4c39506d928 (diff)
Modify docs under contrib/distributions to point to tfp.
PiperOrigin-RevId: 213866466
Diffstat (limited to 'tensorflow/contrib/distributions')
-rw-r--r--tensorflow/contrib/distributions/python/ops/autoregressive.py7
-rw-r--r--tensorflow/contrib/distributions/python/ops/batch_reshape.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py9
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/permute.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/reshape.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/cauchy.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/deterministic.py10
-rw-r--r--tensorflow/contrib/distributions/python/ops/gumbel.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/half_normal.py7
-rw-r--r--tensorflow/contrib/distributions/python/ops/independent.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/inverse_gamma.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/logistic.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/mixture.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/mixture_same_family.py7
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn_diag.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn_tril.py7
-rw-r--r--tensorflow/contrib/distributions/python/ops/poisson_lognormal.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/quantized_distribution.py5
-rw-r--r--tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/statistical_testing.py42
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_student_t.py3
-rw-r--r--tensorflow/contrib/distributions/python/ops/wishart.py18
33 files changed, 118 insertions, 74 deletions
diff --git a/tensorflow/contrib/distributions/python/ops/autoregressive.py b/tensorflow/contrib/distributions/python/ops/autoregressive.py
index bb9b8043b2..3ba1c3a665 100644
--- a/tensorflow/contrib/distributions/python/ops/autoregressive.py
+++ b/tensorflow/contrib/distributions/python/ops/autoregressive.py
@@ -65,13 +65,14 @@ class Autoregressive(distribution_lib.Distribution):
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
- constructs a `tf.distributions.Distribution`-like instance, and `x0` is a
+ constructs a `tfp.distributions.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
def normal_fn(self, event_size):
n = event_size * (event_size + 1) / 2
@@ -127,7 +128,7 @@ class Autoregressive(distribution_lib.Distribution):
Args:
distribution_fn: Python `callable` which constructs a
- `tf.distributions.Distribution`-like instance from a `Tensor` (e.g.,
+ `tfp.distributions.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the "autoregressive property",
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of on preceding coordinates.
diff --git a/tensorflow/contrib/distributions/python/ops/batch_reshape.py b/tensorflow/contrib/distributions/python/ops/batch_reshape.py
index 519077bc9a..612376efb7 100644
--- a/tensorflow/contrib/distributions/python/ops/batch_reshape.py
+++ b/tensorflow/contrib/distributions/python/ops/batch_reshape.py
@@ -45,7 +45,8 @@ class BatchReshape(distribution_lib.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
dtype = np.float32
dims = 2
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
index 296e66f2b2..3b3d8ee6f2 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
@@ -61,8 +61,8 @@ class MaskedAutoregressiveFlow(bijector.Bijector):
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
- In the `tf.distributions` framework, a "normalizing flow" is implemented as a
- `tf.contrib.distributions.bijectors.Bijector`. The `forward` "autoregression"
+ In the `tfp` framework, a "normalizing flow" is implemented as a
+ `tfp.bijectors.Bijector`. The `forward` "autoregression"
is implemented using a `tf.while_loop` and a deep neural network (DNN) with
masked weights such that the autoregressive property is automatically met in
the `inverse`.
@@ -126,8 +126,9 @@ class MaskedAutoregressiveFlow(bijector.Bijector):
#### Examples
```python
- tfd = tf.contrib.distributions
- tfb = tfd.bijectors
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+ tfb = tfp.bijectors
dims = 5
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/permute.py b/tensorflow/contrib/distributions/python/ops/bijectors/permute.py
index f182a1adcb..178c3c94bf 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/permute.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/permute.py
@@ -41,9 +41,10 @@ class Permute(bijector.Bijector):
"""Permutes the rightmost dimension of a `Tensor`.
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfb = tfp.bijectors
- reverse = tfd.bijectors.Permute(permutation=[2, 1, 0])
+ reverse = tfb.Permute(permutation=[2, 1, 0])
reverse.forward([-1., 0., 1.])
# ==> [1., 0., -1]
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py b/tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
index 773ae24461..0bcb08cdea 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
@@ -90,8 +90,9 @@ class RealNVP(bijector.Bijector):
#### Example Use
```python
- tfd = tf.contrib.distributions
- tfb = tfd.bijectors
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+ tfb = tfp.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/reshape.py b/tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
index c8282229a3..71ac29038f 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/reshape.py
@@ -80,9 +80,10 @@ class Reshape(bijector.Bijector):
Example usage:
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfb = tfp.bijectors
- r = tfd.bijectors.Reshape(event_shape_out=[1, -1])
+ r = tfb.Reshape(event_shape_out=[1, -1])
r.forward([3., 4.]) # shape [2]
# ==> [[3., 4.]] # shape [1, 2]
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py b/tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
index 6fbe866578..0a6d690b65 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
@@ -42,7 +42,10 @@ class ScaleTriL(chain.Chain):
#### Examples
```python
- tfb = tf.contrib.distributions.bijectors
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+ tfb = tfp.bijectors
+
b = tfb.ScaleTriL(
diag_bijector=tfb.Exp(),
diag_shift=None)
diff --git a/tensorflow/contrib/distributions/python/ops/cauchy.py b/tensorflow/contrib/distributions/python/ops/cauchy.py
index cb5223b055..c461833b9a 100644
--- a/tensorflow/contrib/distributions/python/ops/cauchy.py
+++ b/tensorflow/contrib/distributions/python/ops/cauchy.py
@@ -63,7 +63,8 @@ class Cauchy(distribution.Distribution):
Examples of initialization of one or a batch of distributions.
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Define a single scalar Cauchy distribution.
dist = tfd.Cauchy(loc=0., scale=3.)
diff --git a/tensorflow/contrib/distributions/python/ops/deterministic.py b/tensorflow/contrib/distributions/python/ops/deterministic.py
index affc64a14f..507c5d3679 100644
--- a/tensorflow/contrib/distributions/python/ops/deterministic.py
+++ b/tensorflow/contrib/distributions/python/ops/deterministic.py
@@ -198,8 +198,11 @@ class Deterministic(_BaseDeterministic):
#### Examples
```python
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
# Initialize a single Deterministic supported at zero.
- constant = tf.contrib.distributions.Deterministic(0.)
+ constant = tfd.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
@@ -208,7 +211,7 @@ class Deterministic(_BaseDeterministic):
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
- constant = tf.contrib.distributions.Deterministic(loc)
+ constant = tfd.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
@@ -310,7 +313,8 @@ class VectorDeterministic(_BaseDeterministic):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tfd.Deterministic([0., 2.])
diff --git a/tensorflow/contrib/distributions/python/ops/gumbel.py b/tensorflow/contrib/distributions/python/ops/gumbel.py
index acdea4d61d..4b50df5b48 100644
--- a/tensorflow/contrib/distributions/python/ops/gumbel.py
+++ b/tensorflow/contrib/distributions/python/ops/gumbel.py
@@ -63,7 +63,8 @@ class _Gumbel(distribution.Distribution):
Examples of initialization of one or a batch of distributions.
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
diff --git a/tensorflow/contrib/distributions/python/ops/half_normal.py b/tensorflow/contrib/distributions/python/ops/half_normal.py
index b02c403106..f121637086 100644
--- a/tensorflow/contrib/distributions/python/ops/half_normal.py
+++ b/tensorflow/contrib/distributions/python/ops/half_normal.py
@@ -66,15 +66,18 @@ class HalfNormal(distribution.Distribution):
Examples of initialization of one or a batch of distributions.
```python
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
# Define a single scalar HalfNormal distribution.
- dist = tf.contrib.distributions.HalfNormal(scale=3.0)
+ dist = tfd.HalfNormal(scale=3.0)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued HalfNormals.
# The first has scale 11.0, the second 22.0
- dist = tf.contrib.distributions.HalfNormal(scale=[11.0, 22.0])
+ dist = tfd.HalfNormal(scale=[11.0, 22.0])
# Evaluate the pdf of the first distribution on 1.0, and the second on 1.5,
# returning a length two tensor.
diff --git a/tensorflow/contrib/distributions/python/ops/independent.py b/tensorflow/contrib/distributions/python/ops/independent.py
index 0672702b96..e1cfff3c66 100644
--- a/tensorflow/contrib/distributions/python/ops/independent.py
+++ b/tensorflow/contrib/distributions/python/ops/independent.py
@@ -70,7 +70,8 @@ class Independent(distribution_lib.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Make independent distribution from a 2-batch Normal.
ind = tfd.Independent(
diff --git a/tensorflow/contrib/distributions/python/ops/inverse_gamma.py b/tensorflow/contrib/distributions/python/ops/inverse_gamma.py
index 70d050d7a6..452628257e 100644
--- a/tensorflow/contrib/distributions/python/ops/inverse_gamma.py
+++ b/tensorflow/contrib/distributions/python/ops/inverse_gamma.py
@@ -89,7 +89,9 @@ class InverseGamma(distribution.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
dist = tfd.InverseGamma(concentration=3.0, rate=2.0)
dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
diff --git a/tensorflow/contrib/distributions/python/ops/logistic.py b/tensorflow/contrib/distributions/python/ops/logistic.py
index 02e3bad51e..21c9b5a354 100644
--- a/tensorflow/contrib/distributions/python/ops/logistic.py
+++ b/tensorflow/contrib/distributions/python/ops/logistic.py
@@ -61,7 +61,8 @@ class Logistic(distribution.Distribution):
Examples of initialization of one or a batch of distributions.
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Define a single scalar Logistic distribution.
dist = tfd.Logistic(loc=0., scale=3.)
diff --git a/tensorflow/contrib/distributions/python/ops/mixture.py b/tensorflow/contrib/distributions/python/ops/mixture.py
index 3b7114ef06..52b67f2c54 100644
--- a/tensorflow/contrib/distributions/python/ops/mixture.py
+++ b/tensorflow/contrib/distributions/python/ops/mixture.py
@@ -50,7 +50,9 @@ class Mixture(distribution.Distribution):
```python
# Create a mixture of two Gaussians:
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
diff --git a/tensorflow/contrib/distributions/python/ops/mixture_same_family.py b/tensorflow/contrib/distributions/python/ops/mixture_same_family.py
index 8ffee940d0..f4d394ff29 100644
--- a/tensorflow/contrib/distributions/python/ops/mixture_same_family.py
+++ b/tensorflow/contrib/distributions/python/ops/mixture_same_family.py
@@ -44,7 +44,8 @@ class MixtureSameFamily(distribution.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
### Create a mixture of two scalar Gaussians:
@@ -113,12 +114,12 @@ class MixtureSameFamily(distribution.Distribution):
"""Construct a `MixtureSameFamily` distribution.
Args:
- mixture_distribution: `tf.distributions.Categorical`-like instance.
+ mixture_distribution: `tfp.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
- components_distribution: `tf.distributions.Distribution`-like instance.
+ components_distribution: `tfp.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
diff --git a/tensorflow/contrib/distributions/python/ops/mvn_diag.py b/tensorflow/contrib/distributions/python/ops/mvn_diag.py
index cd0c282ba6..0b5b76be92 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn_diag.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn_diag.py
@@ -85,7 +85,8 @@ class MultivariateNormalDiag(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 2-variate Gaussian.
mvn = tfd.MultivariateNormalDiag(
diff --git a/tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py b/tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
index 74d9d04fc7..80546083d3 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
@@ -87,7 +87,8 @@ class MultivariateNormalDiagPlusLowRank(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate Gaussian with covariance `cov = S @ S.T`,
# `S = diag(d) + U @ diag(m) @ U.T`. The perturbation, `U @ diag(m) @ U.T`, is
diff --git a/tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py b/tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py
index dbc4c1b3dc..bcb4937980 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py
@@ -73,7 +73,8 @@ class MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
diff --git a/tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py b/tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
index efe5a6d0d9..8fdc99824b 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
@@ -91,7 +91,8 @@ class MultivariateNormalLinearOperator(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
diff --git a/tensorflow/contrib/distributions/python/ops/mvn_tril.py b/tensorflow/contrib/distributions/python/ops/mvn_tril.py
index c6a23e4336..c21f70fc3b 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn_tril.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn_tril.py
@@ -77,13 +77,14 @@ class MultivariateNormalTriL(
```
Trainable (batch) lower-triangular matrices can be created with
- `tf.contrib.distributions.matrix_diag_transform()` and/or
- `tf.contrib.distributions.fill_triangular()`
+ `tfp.distributions.matrix_diag_transform()` and/or
+ `tfp.distributions.fill_triangular()`
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
diff --git a/tensorflow/contrib/distributions/python/ops/poisson_lognormal.py b/tensorflow/contrib/distributions/python/ops/poisson_lognormal.py
index 7a7ad1be35..85683e3233 100644
--- a/tensorflow/contrib/distributions/python/ops/poisson_lognormal.py
+++ b/tensorflow/contrib/distributions/python/ops/poisson_lognormal.py
@@ -220,7 +220,8 @@ class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
diff --git a/tensorflow/contrib/distributions/python/ops/quantized_distribution.py b/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
index 18a0f754e6..134658deab 100644
--- a/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
@@ -196,8 +196,9 @@ class QuantizedDistribution(distributions.Distribution):
parameter determining the unnormalized probability of that component.
```python
- tfd = tf.contrib.distributions
- tfb = tfd.bijectors
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+ tfb = tfp.bijectors
net = wavenet(inputs)
loc, unconstrained_scale, logits = tf.split(net,
diff --git a/tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py b/tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py
index a9d0fb4ccf..4b520b912e 100644
--- a/tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py
+++ b/tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py
@@ -124,7 +124,7 @@ class SinhArcsinh(transformed_distribution.TransformedDistribution):
tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)
distribution: `tf.Distribution`-like instance. Distribution that is
transformed to produce this distribution.
- Default is `tf.distributions.Normal(0., 1.)`.
+ Default is `tfp.distributions.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
diff --git a/tensorflow/contrib/distributions/python/ops/statistical_testing.py b/tensorflow/contrib/distributions/python/ops/statistical_testing.py
index c25e8c51d7..af22f4843a 100644
--- a/tensorflow/contrib/distributions/python/ops/statistical_testing.py
+++ b/tensorflow/contrib/distributions/python/ops/statistical_testing.py
@@ -30,27 +30,27 @@ is some expected constant. Suppose the support of P is the interval
`[0, 1]`. Then you might do this:
```python
-tfd = tf.contrib.distributions
-
-expected_mean = ...
-num_samples = 5000
-samples = ... draw 5000 samples from P
-
-# Check that the mean looks right
-check1 = tfd.assert_true_mean_equal_by_dkwm(
- samples, low=0., high=1., expected=expected_mean,
- false_fail_rate=1e-6)
-
-# Check that the difference in means detectable with 5000 samples is
-# small enough
-check2 = tf.assert_less(
- tfd.min_discrepancy_of_true_means_detectable_by_dkwm(
- num_samples, low=0., high=1.0,
- false_fail_rate=1e-6, false_pass_rate=1e-6),
- 0.01)
-
-# Be sure to execute both assertion ops
-sess.run([check1, check2])
+ from tensorflow_probability.python.distributions.internal import statistical_testing
+
+ expected_mean = ...
+ num_samples = 5000
+ samples = ... draw 5000 samples from P
+
+ # Check that the mean looks right
+ check1 = statistical_testing.assert_true_mean_equal_by_dkwm(
+ samples, low=0., high=1., expected=expected_mean,
+ false_fail_rate=1e-6)
+
+ # Check that the difference in means detectable with 5000 samples is
+ # small enough
+ check2 = tf.assert_less(
+ statistical_testing.min_discrepancy_of_true_means_detectable_by_dkwm(
+ num_samples, low=0., high=1.0,
+ false_fail_rate=1e-6, false_pass_rate=1e-6),
+ 0.01)
+
+ # Be sure to execute both assertion ops
+ sess.run([check1, check2])
```
The second assertion is an instance of experiment design. It's a
diff --git a/tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py b/tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py
index 3c8aae2797..a3d178357b 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py
@@ -300,7 +300,8 @@ class VectorDiffeomixture(distribution_lib.Distribution):
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
diff --git a/tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py b/tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py
index 73356a3625..36cbd71f8b 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py
@@ -90,7 +90,8 @@ class VectorExponentialDiag(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
diff --git a/tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py b/tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
index 9a47b48557..fd5bf9ecc7 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
@@ -108,7 +108,8 @@ class VectorExponentialLinearOperator(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
diff --git a/tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py b/tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py
index e68ddc569c..8cd4e128c7 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py
@@ -102,7 +102,8 @@ class VectorLaplaceDiag(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 2-variate VectorLaplace.
vla = tfd.VectorLaplaceDiag(
diff --git a/tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py b/tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py
index 3923161a33..67d2ccd28d 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py
@@ -110,7 +110,8 @@ class VectorLaplaceLinearOperator(
#### Examples
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
diff --git a/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py b/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
index 49ffff24ca..da57d0cb55 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
@@ -152,7 +152,7 @@ class VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):
broadcastable with `event_shape`.
distribution: `tf.Distribution`-like instance. Distribution from which `k`
iid samples are used as input to transformation `F`. Default is
- `tf.distributions.Normal(loc=0., scale=1.)`.
+ `tfp.distributions.Normal(loc=0., scale=1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
diff --git a/tensorflow/contrib/distributions/python/ops/vector_student_t.py b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
index f289b39e51..bad91a0844 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_student_t.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
@@ -92,7 +92,8 @@ class _VectorStudentT(transformed_distribution.TransformedDistribution):
Extra leading dimensions, if provided, allow for batches.
```python
- tfd = tf.contrib.distributions
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
diff --git a/tensorflow/contrib/distributions/python/ops/wishart.py b/tensorflow/contrib/distributions/python/ops/wishart.py
index 49b9de0ab5..ee2fc58864 100644
--- a/tensorflow/contrib/distributions/python/ops/wishart.py
+++ b/tensorflow/contrib/distributions/python/ops/wishart.py
@@ -480,11 +480,14 @@ class WishartCholesky(_WishartLinearOperator):
#### Examples
```python
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.cholesky(...) # Shape is [3, 3].
- dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
+ dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
@@ -498,14 +501,14 @@ class WishartCholesky(_WishartLinearOperator):
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.cholesky(...) # Shape is [2, 3, 3].
- dist = tf.contrib.distributions.WishartCholesky(df=df, scale=chol_scale)
+ dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
- # in tf.contrib.distributions.matrix_diag_transform.
+ # in tfp.distributions.matrix_diag_transform.
```
"""
@@ -604,11 +607,14 @@ class WishartFull(_WishartLinearOperator):
#### Examples
```python
+ import tensorflow_probability as tfp
+ tfd = tfp.distributions
+
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
- dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
+ dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
@@ -622,14 +628,14 @@ class WishartFull(_WishartLinearOperator):
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
- dist = tf.contrib.distributions.WishartFull(df=df, scale=scale)
+ dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
- # in tf.contrib.distributions.matrix_diag_transform.
+ # in tfd.matrix_diag_transform.
```
"""