aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar brett koonce <koonce@hello.com>2018-03-27 09:24:40 -0700
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2018-03-27 09:24:40 -0700
commit2ad47da4fb9896290eb9bc87fe809a4138269f2c (patch)
tree9170733c0a5ffeef67a8c420ed553b3dd935ca40
parente5dcaf921cf9feefd42b2ab176590c696b3b0285 (diff)
Seq2seq minorsp (#18010)
* contrib/seq2seq: minor spelling tweaks * contrib/timeseries: minor spelling tweaks * contrib/slim: minor spelling tweaks
-rw-r--r--tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc2
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py8
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py6
-rw-r--r--tensorflow/contrib/slim/python/slim/data/parallel_reader.py4
-rw-r--r--tensorflow/contrib/slim/python/slim/data/prefetch_queue.py4
-rw-r--r--tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py2
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/ar_model.py2
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/math_utils.py2
-rw-r--r--tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py4
9 files changed, 17 insertions, 17 deletions
diff --git a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
index dfa12e873a..a9a32b7b25 100644
--- a/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
+++ b/tensorflow/contrib/seq2seq/kernels/beam_search_ops.cc
@@ -74,7 +74,7 @@ class GatherTreeOp : public OpKernel {
ctx,
step_ids_shape.dim_size(1) == max_sequence_lengths.shape().dim_size(0),
errors::InvalidArgument("batch size dimensions step_ids.shape[1] and "
- "max_seqeuence_lengths.shape[0] must match. "
+ "max_sequence_lengths.shape[0] must match. "
"but shapes are: ",
step_ids_shape.DebugString(), " and ",
max_sequence_lengths.shape().DebugString()));
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index 9ff8a343f1..be53779826 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -736,7 +736,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
- distribution,which is needed in order for the monotonic attention
+ distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
@@ -763,7 +763,7 @@ class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
- This type of attention encorces a monotonic constraint on the attention
+ This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -867,7 +867,7 @@ class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
- This type of attention encorces a monotonic constraint on the attention
+ This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
@@ -1133,7 +1133,7 @@ class AttentionWrapper(rnn_cell_impl.RNNCell):
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
- the output of `cell`. This is the beahvior of Bhadanau-style
+ the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index a26107b0d7..184144f64a 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -821,9 +821,9 @@ def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
Returns:
The scores normalized by the length_penalty.
"""
- length_penality_ = _length_penalty(
+ length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
- return log_probs / length_penality_
+ return log_probs / length_penalty_
def _length_penalty(sequence_lengths, penalty_factor):
@@ -860,7 +860,7 @@ def _mask_probs(probs, eos_token, finished):
unfinished beams remain unchanged.
Args:
- probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
+ probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
diff --git a/tensorflow/contrib/slim/python/slim/data/parallel_reader.py b/tensorflow/contrib/slim/python/slim/data/parallel_reader.py
index b3343aef47..99ad487630 100644
--- a/tensorflow/contrib/slim/python/slim/data/parallel_reader.py
+++ b/tensorflow/contrib/slim/python/slim/data/parallel_reader.py
@@ -115,8 +115,8 @@ class ParallelReader(io_ops.ReaderBase):
reader needs to start reading from a new file since it has finished with
the previous file).
- A queue runner for enqueing in the `common_queue` is automatically added to
- the TF QueueRunners collection.
+ A queue runner for enqueuing in the `common_queue` is automatically added
+ to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle
diff --git a/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py b/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
index 37e9c4754c..62bd200361 100644
--- a/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
+++ b/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
@@ -36,9 +36,9 @@ def prefetch_queue(tensors,
dynamic_pad=False,
shared_name=None,
name=None):
- """Creates a queue to prefetech tensors from `tensors`.
+ """Creates a queue to prefetch tensors from `tensors`.
- A queue runner for enqueing tensors into the prefetch_queue is automatically
+ A queue runner for enqueuing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
diff --git a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
index b3b61e1dfe..f2d31dc8db 100644
--- a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
+++ b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
@@ -124,7 +124,7 @@ class BoundingBox(ItemHandler):
super(BoundingBox, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
- """Maps the given dictionary of tensors to a contatenated list of bboxes.
+ """Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
diff --git a/tensorflow/contrib/timeseries/python/timeseries/ar_model.py b/tensorflow/contrib/timeseries/python/timeseries/ar_model.py
index ff140efd48..4f6527a546 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/ar_model.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/ar_model.py
@@ -70,7 +70,7 @@ class ARModel(model.TimeSeriesModel):
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
- setting it to > 1 empiricaly seems to give a better fit.
+ setting it to > 1 empirically seems to give a better fit.
num_features: number of input features per time step.
num_time_buckets: Number of buckets into which to divide (time %
periodicity) for generating time based features.
diff --git a/tensorflow/contrib/timeseries/python/timeseries/math_utils.py b/tensorflow/contrib/timeseries/python/timeseries/math_utils.py
index 23452a81c3..26793c80bf 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/math_utils.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/math_utils.py
@@ -185,7 +185,7 @@ def batch_matrix_pow(matrices, powers):
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
- The power(A, 0) = I case is handeled by starting with accumulator set to the
+ The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
diff --git a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py
index 1afc58cfb2..6746dd7b43 100644
--- a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py
+++ b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py
@@ -107,7 +107,7 @@ class VARMA(state_space_model.StateSpaceModel):
Returns:
the state transition matrix. It has shape
- [self.state_dimendion, self.state_dimension].
+ [self.state_dimension, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
@@ -127,7 +127,7 @@ class VARMA(state_space_model.StateSpaceModel):
Returns:
the state noise transform matrix. It has shape
- [self.state_dimendion, self.num_features].
+ [self.state_dimension, self.num_features].
"""
# Noise is broadcast, through the moving average coefficients, to
# un-observed parts of the latent state.