diff options
author | 2018-07-02 09:22:21 -0700 | |
---|---|---|
committer | 2018-07-02 09:25:15 -0700 | |
commit | 28b8525b417d5b0a1d0a4905e5e3237ef5b502ef (patch) | |
tree | 4f6fad8ae9386e78afa5e2e74d89cf42b1ab9aba | |
parent | e0c1283d99ca46b883789fa0c116f1b153af7c4c (diff) |
Docstring grammar tweak.
PiperOrigin-RevId: 202961895
-rw-r--r-- | tensorflow/python/ops/distributions/distribution.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/tensorflow/python/ops/distributions/distribution.py b/tensorflow/python/ops/distributions/distribution.py index 41dcd40188..c03ef967e6 100644 --- a/tensorflow/python/ops/distributions/distribution.py +++ b/tensorflow/python/ops/distributions/distribution.py @@ -212,7 +212,7 @@ class ReparameterizationType(object): reparameterized, and straight-through gradients are either partially unsupported or are not supported at all. In this case, for purposes of e.g. RL or variational inference, it is generally safest to wrap the - sample results in a `stop_gradients` call and instead use policy + sample results in a `stop_gradients` call and use policy gradients / surrogate loss instead. """ |