aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-10-11 08:30:39 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-10-11 09:37:50 -0700
commit48ebbb8c7247b492cdff12306c4a0381ae099830 (patch)
tree700f946aaa485e47219b305e8eea409ca26ab6ee
parent9bff346137aa1c627f2dccb77fdda10863456b45 (diff)
Renames all `DistributionTensor` to `StochasticTensor`
Change: 135804717
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py34
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py20
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/variational_inference.py40
3 files changed, 47 insertions, 47 deletions
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
index a0d37f81ae..fe9866862e 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
@@ -64,7 +64,7 @@ from tensorflow.python.training import training
from tensorflow.python.util.all_util import make_all
-def score_function(dist_tensor, value, loss, baseline=None,
+def score_function(stochastic_tensor, value, loss, baseline=None,
name="ScoreFunction"):
"""Score function estimator.
@@ -74,7 +74,7 @@ def score_function(dist_tensor, value, loss, baseline=None,
It will add a `stop_gradient` to the advantage `(loss - baseline)`.
Args:
- dist_tensor: `DistributionTensor` p(x).
+ stochastic_tensor: `StochasticTensor` p(x).
value: `Tensor` x. Samples from p(x).
loss: `Tensor`.
baseline: `Tensor` broadcastable to `loss`.
@@ -94,7 +94,7 @@ def score_function(dist_tensor, value, loss, baseline=None,
advantage = loss
advantage = array_ops.stop_gradient(advantage)
- return dist_tensor.distribution.log_prob(value) * advantage
+ return stochastic_tensor.distribution.log_prob(value) * advantage
def get_score_function_with_advantage(advantage_fn=None,
@@ -102,21 +102,21 @@ def get_score_function_with_advantage(advantage_fn=None,
"""Score function estimator with advantage function.
Args:
- advantage_fn: callable that takes the `DistributionTensor` and the
+ advantage_fn: callable that takes the `StochasticTensor` and the
downstream `loss` and returns a `Tensor` advantage
(e.g. `loss - baseline`).
name: name to prepend ops with.
Returns:
- Callable score function estimator that takes the `DistributionTensor`, the
+ Callable score function estimator that takes the `StochasticTensor`, the
sampled `value`, and the downstream `loss`, and uses the provided advantage.
"""
- def score_function_with_advantage(dist_tensor, value, loss):
+ def score_function_with_advantage(stochastic_tensor, value, loss):
with ops.name_scope(name, values=[value, loss]):
- advantage = advantage_fn(dist_tensor, loss)
+ advantage = advantage_fn(stochastic_tensor, loss)
advantage = array_ops.stop_gradient(advantage)
- return dist_tensor.distribution.log_prob(value) * advantage
+ return stochastic_tensor.distribution.log_prob(value) * advantage
return score_function_with_advantage
@@ -129,13 +129,13 @@ def get_score_function_with_constant_baseline(baseline, name="ScoreFunction"):
name: name to prepend ops with.
Returns:
- Callable score function estimator that takes the `DistributionTensor`, the
+ Callable score function estimator that takes the `StochasticTensor`, the
sampled `value`, and the downstream `loss`, and subtracts the provided
`baseline` from the `loss`.
"""
- def score_function_with_constant_baseline(dist_tensor, value, loss):
- return score_function(dist_tensor, value, loss, baseline, name)
+ def score_function_with_constant_baseline(stochastic_tensor, value, loss):
+ return score_function(stochastic_tensor, value, loss, baseline, name)
return score_function_with_constant_baseline
@@ -144,23 +144,23 @@ def get_score_function_with_baseline(baseline_fn=None, name="ScoreFunction"):
"""Score function estimator with baseline function.
Args:
- baseline_fn: callable that takes the `DistributionTensor` and the downstream
+ baseline_fn: callable that takes the `StochasticTensor` and the downstream
`loss` and returns a `Tensor` baseline to be subtracted from the `loss`.
If None, defaults to `get_mean_baseline`, which is an EMA of the loss.
name: name to prepend ops with.
Returns:
- Callable score function estimator that takes the `DistributionTensor`, the
+ Callable score function estimator that takes the `StochasticTensor`, the
sampled `value`, and the downstream `loss`, and subtracts the provided
`baseline` from the `loss`.
"""
if baseline_fn is None:
baseline_fn = get_mean_baseline()
- def score_function_with_baseline(dist_tensor, value, loss):
+ def score_function_with_baseline(stochastic_tensor, value, loss):
with ops.name_scope(name):
- b = baseline_fn(dist_tensor, loss)
- return score_function(dist_tensor, value, loss, b)
+ b = baseline_fn(stochastic_tensor, loss)
+ return score_function(stochastic_tensor, value, loss, b)
return score_function_with_baseline
@@ -178,7 +178,7 @@ def get_mean_baseline(ema_decay=0.99, name=None):
name: name for variable scope of the ExponentialMovingAverage.
Returns:
- Callable baseline function that takes the `DistributionTensor` (unused) and
+ Callable baseline function that takes the `StochasticTensor` (unused) and
the downstream `loss`, and returns an EMA of the loss.
"""
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
index 749644c70f..06661059ff 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
@@ -170,10 +170,10 @@ class SampleValue(_StochasticValueType):
mu = tf.zeros((2,3))
sigma = tf.ones((2, 3))
with sg.value_type(sg.SampleValue(n=4)):
- dt = sg.DistributionTensor(
+ st = sg.StochasticTensor(
distributions.Normal, mu=mu, sigma=sigma)
# draws 4 samples each with shape (2, 3) and concatenates
- assertEqual(dt.value().get_shape(), (4, 2, 3))
+ assertEqual(st.value().get_shape(), (4, 2, 3))
```
"""
@@ -215,15 +215,15 @@ class SampleAndReshapeValue(_StochasticValueType):
sigma = tf.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleAndReshapeValue(n=2)):
- dt = sg.DistributionTensor(
+ st = sg.StochasticTensor(
distributions.Normal, mu=mu, sigma=sigma)
# sample(2) creates a (2, 2, 3) tensor, and the two outermost dimensions
# are reshaped into one: the final value is a (4, 3) tensor.
- dt_value = dt.value()
- assertEqual(dt_value.get_shape(), (4, 3))
+ st_value = st.value()
+ assertEqual(st_value.get_shape(), (4, 3))
- dt_value_val = sess.run([dt_value])[0] # or e.g. run([tf.identity(dt)])[0]
+ dt_value_val = sess.run([st_value])[0] # or e.g. run([tf.identity(st)])[0]
assertEqual(dt_value_val.shape, (4, 3))
```
"""
@@ -261,10 +261,10 @@ def value_type(dist_value_type):
```
with sg.value_type(sg.MeanValue(stop_gradients=True)):
- dt = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
+ st = sg.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
```
- In the example above, `dt.value()` (or equivalently, `tf.identity(dt)`) will
+ In the example above, `st.value()` (or equivalently, `tf.identity(st)`) will
be the mean value of the Normal distribution, i.e., `mu` (possibly
broadcasted to the shape of `sigma`). Furthermore, because the `MeanValue`
was marked with `stop_gradients=True`, this value will have been wrapped
@@ -343,8 +343,8 @@ class StochasticTensor(BaseStochasticTensor):
dist_value_type: a `_StochasticValueType`, which will determine what the
`value` of this `StochasticTensor` will be. If not provided, the
value type set with the `value_type` context manager will be used.
- loss_fn: callable that takes `(dt, dt.value(), influenced_loss)`, where
- `dt` is this `StochasticTensor`, and returns a `Tensor` loss. By
+ loss_fn: callable that takes `(st, st.value(), influenced_loss)`, where
+ `st` is this `StochasticTensor`, and returns a `Tensor` loss. By
default, `loss_fn` is the `score_function`, or more precisely, the
integral of the score function, such that when the gradient is taken,
the score function results. See the `stochastic_gradient_estimators`
diff --git a/tensorflow/contrib/bayesflow/python/ops/variational_inference.py b/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
index bbe2513a46..322f519c49 100644
--- a/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
+++ b/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
@@ -35,25 +35,25 @@ VI_PRIORS = "__vi_priors__"
def register_prior(variational, prior):
- """Associate a variational `DistributionTensor` with a `Distribution` prior.
+ """Associate a variational `StochasticTensor` with a `Distribution` prior.
This is a helper function used in conjunction with `elbo` that allows users
to specify the mapping between variational distributions and their priors
without having to pass in `variational_with_prior` explicitly.
Args:
- variational: `DistributionTensor` q(Z). Approximating distribution.
+ variational: `StochasticTensor` q(Z). Approximating distribution.
prior: `Distribution` p(Z). Prior distribution.
Returns:
None
Raises:
- ValueError: if variational is not a `DistributionTensor` or `prior` is not
+ ValueError: if variational is not a `StochasticTensor` or `prior` is not
a `Distribution`.
"""
if not isinstance(variational, st.StochasticTensor):
- raise TypeError("variational must be a DistributionTensor")
+ raise TypeError("variational must be a StochasticTensor")
if not isinstance(prior, distributions.Distribution):
raise TypeError("prior must be a Distribution")
ops.add_to_collection(VI_PRIORS, (variational, prior))
@@ -96,12 +96,12 @@ def elbo(log_likelihood,
Optimization objective for inference of hidden variables by variational
inference.
- This function is meant to be used in conjunction with `DistributionTensor`.
- The user should build out the inference network, using `DistributionTensor`s
+ This function is meant to be used in conjunction with `StochasticTensor`.
+ The user should build out the inference network, using `StochasticTensor`s
as latent variables, and the generative network. `elbo` at minimum needs
- `p(x|Z)` and assumes that all `DistributionTensor`s upstream of `p(x|Z)` are
+ `p(x|Z)` and assumes that all `StochasticTensor`s upstream of `p(x|Z)` are
the variational distributions. Use `register_prior` to register `Distribution`
- priors for each `DistributionTensor`. Alternatively, pass in
+ priors for each `StochasticTensor`. Alternatively, pass in
`variational_with_prior` specifying all variational distributions and their
priors.
@@ -133,8 +133,8 @@ def elbo(log_likelihood,
Args:
log_likelihood: `Tensor` log p(x|Z).
- variational_with_prior: dict from `DistributionTensor` q(Z) to
- `Distribution` p(Z). If `None`, defaults to all `DistributionTensor`
+ variational_with_prior: dict from `StochasticTensor` q(Z) to
+ `Distribution` p(Z). If `None`, defaults to all `StochasticTensor`
objects upstream of `log_likelihood` with priors registered with
`register_prior`.
keep_batch_dim: bool. Whether to keep the batch dimension when summing
@@ -149,10 +149,10 @@ def elbo(log_likelihood,
Raises:
TypeError: if variationals in `variational_with_prior` are not
- `DistributionTensor`s or if priors are not `Distribution`s.
+ `StochasticTensor`s or if priors are not `Distribution`s.
TypeError: if form is not a valid ELBOForms constant.
ValueError: if `variational_with_prior` is None and there are no
- `DistributionTensor`s upstream of `log_likelihood`.
+ `StochasticTensor`s upstream of `log_likelihood`.
ValueError: if any variational does not have a prior passed or registered.
"""
if form is None:
@@ -179,8 +179,8 @@ def elbo_with_log_joint(log_joint,
Args:
log_joint: `Tensor` log p(x, Z).
- variational: list of `DistributionTensor` q(Z). If `None`, defaults to all
- `DistributionTensor` objects upstream of `log_joint`.
+ variational: list of `StochasticTensor` q(Z). If `None`, defaults to all
+ `StochasticTensor` objects upstream of `log_joint`.
keep_batch_dim: bool. Whether to keep the batch dimension when summing
entropy term. When the sample is per data point, this should be True;
otherwise (e.g. in a Bayesian NN), this should be False.
@@ -192,9 +192,9 @@ def elbo_with_log_joint(log_joint,
`Tensor` ELBO of the same type and shape as `log_joint`.
Raises:
- TypeError: if variationals in `variational` are not `DistributionTensor`s.
+ TypeError: if variationals in `variational` are not `StochasticTensor`s.
TypeError: if form is not a valid ELBOForms constant.
- ValueError: if `variational` is None and there are no `DistributionTensor`s
+ ValueError: if `variational` is None and there are no `StochasticTensor`s
upstream of `log_joint`.
ValueError: if form is ELBOForms.analytic_kl.
"""
@@ -223,7 +223,7 @@ def _elbo(form, log_likelihood, log_joint, variational_with_prior,
form: ELBOForms constant. Controls how the ELBO is computed.
log_likelihood: `Tensor` log p(x|Z).
log_joint: `Tensor` log p(x, Z).
- variational_with_prior: `dict<DistributionTensor, Distribution>`, varational
+ variational_with_prior: `dict<StochasticTensor, Distribution>`, varational
distributions to prior distributions.
keep_batch_dim: bool. Whether to keep the batch dimension when reducing
the entropy/KL.
@@ -294,7 +294,7 @@ def _elbo(form, log_likelihood, log_joint, variational_with_prior,
def _find_variational_and_priors(model,
variational_with_prior,
require_prior=True):
- """Find upstream DistributionTensors and match with registered priors."""
+ """Find upstream StochasticTensors and match with registered priors."""
if variational_with_prior is None:
# pylint: disable=protected-access
upstreams = sg._upstream_stochastic_nodes([model])
@@ -307,12 +307,12 @@ def _find_variational_and_priors(model,
variational_with_prior = {}
for q in upstreams:
if require_prior and (q not in prior_map or prior_map[q] is None):
- raise ValueError("No prior specified for DistributionTensor: %s", q)
+ raise ValueError("No prior specified for StochasticTensor: %s", q)
variational_with_prior[q] = prior_map.get(q)
if not all(
[isinstance(q, st.StochasticTensor) for q in variational_with_prior]):
- raise TypeError("variationals must be DistributionTensors")
+ raise TypeError("variationals must be StochasticTensors")
if not all([p is None or isinstance(p, distributions.Distribution)
for p in variational_with_prior.values()]):
raise TypeError("priors must be Distributions")