aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/seq2seq
diff options
context:
space:
mode:
authorGravatar Anna R <annarev@google.com>2018-03-28 16:52:39 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-03-28 16:55:15 -0700
commit108178da2a20ea2d3899417ee932d46ba1a5c652 (patch)
tree313bd8cec176f8c9ef67b25c6484a650d1f2092a /tensorflow/contrib/seq2seq
parent390e19ab990f5656e09d98624c92b3c80e52937d (diff)
Automated g4 rollback of changelist 190835392
PiperOrigin-RevId: 190858242
Diffstat (limited to 'tensorflow/contrib/seq2seq')
-rw-r--r--tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc2
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py8
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py6
3 files changed, 8 insertions, 8 deletions
diff --git a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
index a9a32b7b25..dfa12e873a 100644
--- a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
+++ b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
@@ -74,7 +74,7 @@ class GatherTreeOp : public OpKernel {
ctx,
step_ids_shape.dim_size(1) == max_sequence_lengths.shape().dim_size(0),
errors::InvalidArgument("batch size dimensions step_ids.shape[1] and "
- "max_sequence_lengths.shape[0] must match. "
+ "max_seqeuence_lengths.shape[0] must match. "
"but shapes are: ",
step_ids_shape.DebugString(), " and ",
max_sequence_lengths.shape().DebugString()));
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index be53779826..9ff8a343f1 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -736,7 +736,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
- distribution, which is needed in order for the monotonic attention
+ distribution,which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
@@ -763,7 +763,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
- This type of attention enforces a monotonic constraint on the attention
+ This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -867,7 +867,7 @@ class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
- This type of attention enforces a monotonic constraint on the attention
+ This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -1133,7 +1133,7 @@ class AttentionWrapper(rnn_cell_impl.RNNCell):
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
- the output of `cell`. This is the behavior of Bhadanau-style
+ the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index 184144f64a..a26107b0d7 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -821,9 +821,9 @@ def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
Returns:
The scores normalized by the length_penalty.
"""
- length_penalty_ = _length_penalty(
+ length_penality_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
- return log_probs / length_penalty_
+ return log_probs / length_penality_
def _length_penalty(sequence_lengths, penalty_factor):
@@ -860,7 +860,7 @@ def _mask_probs(probs, eos_token, finished):
unfinished beams remain unchanged.
Args:
- probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
+ probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that