aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/bayesflow
diff options
context:
space:
mode:
authorGravatar Yifei Feng <yifeif@google.com>2018-04-23 21:19:14 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-23 21:21:38 -0700
commit22f3a97b8b089202f60bb0c7697feb0c8e0713cc (patch)
treed16f95826e4be15bbb3b0f22bed0ca25d3eb5897 /tensorflow/contrib/bayesflow
parent24b7c9a800ab5086d45a7d83ebcd6218424dc9e3 (diff)
Merge changes from github.
PiperOrigin-RevId: 194031845
Diffstat (limited to 'tensorflow/contrib/bayesflow')
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py26
1 files changed, 10 insertions, 16 deletions
diff --git a/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py b/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
index d193a8459d..032b859d46 100644
--- a/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
+++ b/tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py
@@ -44,15 +44,13 @@ def expectation_importance_sampler(f,
n=None,
seed=None,
name='expectation_importance_sampler'):
- r"""Monte Carlo estimate of `\\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\)`.
+ r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\).
- With `\\(p(z) := exp^{log_p(z)}\\)`, this `Op` returns
+ With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns
- ```
\\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\)
\\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\)
\\(= E_p[f(Z)]\\)
- ```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
@@ -121,14 +119,12 @@ def expectation_importance_sampler_logspace(
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
- With `\\(p(z) := exp^{log_p(z)}\\)`, and `\\(f(z) = exp{log_f(z)}\\)`,
+ With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\),
this `Op` returns
- ```
\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\)
\\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\)
\\(= Log[E_p[f(Z)]]\\)
- ```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
@@ -196,13 +192,11 @@ def _logspace_mean(log_values):
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
- """Computes the Monte-Carlo approximation of `\\(E_p[f(X)]\\)`.
+ """Computes the Monte-Carlo approximation of \\(E_p[f(X)]\\).
This function computes the Monte-Carlo approximation of an expectation, i.e.,
- ```none
\\(E_p[f(X)] \approx= m^{-1} sum_i^m f(x_j), x_j\ ~iid\ p(X)\\)
- ```
where:
@@ -216,8 +210,8 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
- `grad[ Avg{ \\(s_i : i=1...n\\) } ] = Avg{ grad[\\(s_i\\)] : i=1...n }` where
- `S_n = Avg{\\(s_i\\)}` and `\\(s_i = f(x_i), x_i ~ p\\)`.
+ grad[ Avg{ \\(s_i : i=1...n\\) } ] = Avg{ grad[\\(s_i\\)] : i=1...n } where
+ S_n = Avg{\\(s_i\\)}` and `\\(s_i = f(x_i), x_i ~ p\\).
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
@@ -296,7 +290,7 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
- `\\(E_p[f(X)]\\)`. A batch of samples should be indexed by `axis`
+ \\(E_p[f(X)]\\). A batch of samples should be indexed by `axis`
dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
@@ -317,7 +311,7 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
- of `\\(E_p[f(X)]\\)`.
+ of \\(E_p[f(X)]\\).
Raises:
ValueError: if `f` is not a Python `callable`.
@@ -329,7 +323,7 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
if not callable(f):
raise ValueError('`f` must be a callable function.')
if use_reparametrization:
- return math_ops.reduce_mean(f(samples), axis=axis, keep_dims=keep_dims)
+ return math_ops.reduce_mean(f(samples), axis=axis, keepdims=keep_dims)
else:
if not callable(log_prob):
raise ValueError('`log_prob` must be a callable function.')
@@ -349,7 +343,7 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
fx += stop(fx) * (logpx - stop(logpx)) # Add zeros_like(logpx).
- return math_ops.reduce_mean(fx, axis=axis, keep_dims=keep_dims)
+ return math_ops.reduce_mean(fx, axis=axis, keepdims=keep_dims)
def _sample_mean(values):