aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/training
diff options
context:
space:
mode:
authorGravatar Brett Koonce <koonce@hello.com>2018-03-18 13:41:12 -0700
committerGravatar Brett Koonce <koonce@hello.com>2018-04-07 14:30:54 -0700
commit7c95ee3ca48f4e50818f12daf749cbe050a8643f (patch)
treee1a3184216c6a092aa001592f8d15824d3606fef /tensorflow/contrib/training
parentb874783ccdf4cc36cb3546e6b6a998cb8f3470bb (diff)
contrib: minor spelling tweaks
packages: data training tensor_forest
Diffstat (limited to 'tensorflow/contrib/training')
-rw-r--r--tensorflow/contrib/training/python/training/resample.py2
-rw-r--r--tensorflow/contrib/training/python/training/sampling_ops.py6
-rw-r--r--tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py4
3 files changed, 6 insertions, 6 deletions
diff --git a/tensorflow/contrib/training/python/training/resample.py b/tensorflow/contrib/training/python/training/resample.py
index b16159bc16..7b8332b1d6 100644
--- a/tensorflow/contrib/training/python/training/resample.py
+++ b/tensorflow/contrib/training/python/training/resample.py
@@ -77,7 +77,7 @@ def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
Args:
inputs: A list of tensors, each of which has a shape of `[batch_size, ...]`
- rates: A tensor of shape `[batch_size]` contiaining the resampling rates
+ rates: A tensor of shape `[batch_size]` containing the resampling rates
for each input.
scope: Scope for the op.
seed: Random seed to use.
diff --git a/tensorflow/contrib/training/python/training/sampling_ops.py b/tensorflow/contrib/training/python/training/sampling_ops.py
index ba888f87dc..7140f2a46d 100644
--- a/tensorflow/contrib/training/python/training/sampling_ops.py
+++ b/tensorflow/contrib/training/python/training/sampling_ops.py
@@ -123,7 +123,7 @@ def rejection_sample(tensors,
batch_size=batch_size,
num_threads=queue_threads)
- # Queues return a single tensor if the list of enqued tensors is one. Since
+ # Queues return a single tensor if the list of enqueued tensors is one. Since
# we want the type to always be the same, always return a list.
if isinstance(minibatch, ops.Tensor):
minibatch = [minibatch]
@@ -312,7 +312,7 @@ def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
- # Since number of classes shouldn't change at runtime, probalities shape
+ # Since number of classes shouldn't change at runtime, probabilities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
@@ -407,7 +407,7 @@ def _calculate_acceptance_probabilities(init_probs, target_probs):
```
- A solution for a_i in terms of the other variabes is the following:
+ A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
diff --git a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
index 99d486b183..39d75a0806 100644
--- a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
+++ b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
@@ -876,7 +876,7 @@ class SequenceQueueingStateSaver(object):
]):
self._length = array_ops.identity(self._length)
- # Only create barrier; enqueu and dequeue operations happen when you
+ # Only create barrier; enqueue and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
@@ -1637,7 +1637,7 @@ def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
For `key, value` pairs in `input_context` with `SparseTensor` `value` removes
them from `input_context` and transforms the `value` into a sequence and
- then adding `key`, transformed `value` into `input_seuqences`.
+ then adding `key`, transformed `value` into `input_sequences`.
The transformation is done by adding a new first dimension of `value_length`
equal to that of the other values in input_sequences` and tiling the `value`
every `num_unroll` steps.