aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/seq2seq
diff options
context:
space:
mode:
authorGravatar Jianwei Xie <xiejw@google.com>2018-03-28 14:36:18 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-03-28 14:38:46 -0700
commite97c9e91e016efd951dc52e82744f607d948bb2a (patch)
treee98e3a2aaec29758533b3c331140b464ff6ce50e /tensorflow/contrib/seq2seq
parentef6552b544b3c3bf6808be807b30dd9bd4f19669 (diff)
Merge changes from github.
PiperOrigin-RevId: 190835392
Diffstat (limited to 'tensorflow/contrib/seq2seq')
-rw-r--r--tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc2
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py8
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py6
3 files changed, 8 insertions, 8 deletions
diff --git a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
index dfa12e873a..a9a32b7b25 100644
--- a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
+++ b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
@@ -74,7 +74,7 @@ class GatherTreeOp : public OpKernel {
ctx,
step_ids_shape.dim_size(1) == max_sequence_lengths.shape().dim_size(0),
errors::InvalidArgument("batch size dimensions step_ids.shape[1] and "
- "max_seqeuence_lengths.shape[0] must match. "
+ "max_sequence_lengths.shape[0] must match. "
"but shapes are: ",
step_ids_shape.DebugString(), " and ",
max_sequence_lengths.shape().DebugString()));
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index 9ff8a343f1..be53779826 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -736,7 +736,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
- distribution,which is needed in order for the monotonic attention
+ distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
@@ -763,7 +763,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
- This type of attention encorces a monotonic constraint on the attention
+ This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -867,7 +867,7 @@ class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
- This type of attention encorces a monotonic constraint on the attention
+ This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -1133,7 +1133,7 @@ class AttentionWrapper(rnn_cell_impl.RNNCell):
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
- the output of `cell`. This is the beahvior of Bhadanau-style
+ the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index a26107b0d7..184144f64a 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -821,9 +821,9 @@ def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
Returns:
The scores normalized by the length_penalty.
"""
- length_penality_ = _length_penalty(
+ length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
- return log_probs / length_penality_
+ return log_probs / length_penalty_
def _length_penalty(sequence_lengths, penalty_factor):
@@ -860,7 +860,7 @@ def _mask_probs(probs, eos_token, finished):
unfinished beams remain unchanged.
Args:
- probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
+ probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that