aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/compiler/xla/tests/BUILD2
-rw-r--r--tensorflow/contrib/distributions/python/ops/binomial.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/vector_student_t.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column.py4
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py2
-rw-r--r--tensorflow/contrib/training/python/training/bucket_ops.py2
-rw-r--r--tensorflow/contrib/training/python/training/evaluation_test.py4
-rw-r--r--tensorflow/core/framework/op_kernel.h2
-rw-r--r--tensorflow/core/kernels/fused_batch_norm_op.cc2
-rw-r--r--tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc2
-rw-r--r--tensorflow/core/lib/gtl/flatrep.h2
-rw-r--r--tensorflow/python/layers/convolutional.py10
-rw-r--r--tensorflow/python/ops/distributions/dirichlet_multinomial.py2
-rw-r--r--tensorflow/python/ops/distributions/multinomial.py2
-rw-r--r--tensorflow/python/ops/distributions/util.py2
-rw-r--r--tensorflow/python/ops/summary_op_util.py2
-rw-r--r--tensorflow/tools/docs/generate_lib.py2
17 files changed, 23 insertions, 23 deletions
diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD
index a11ac0bec6..a1a41657b6 100644
--- a/tensorflow/compiler/xla/tests/BUILD
+++ b/tensorflow/compiler/xla/tests/BUILD
@@ -509,7 +509,7 @@ xla_test(
)
# Tests the dot operation in some cases that can be performed via a
-# runtime call on some backends - e.g. a runtime call to to Eigen.
+# runtime call on some backends - e.g. a runtime call to Eigen.
xla_test(
name = "dot_operation_runtime_test",
srcs = ["dot_operation_test.cc"],
diff --git a/tensorflow/contrib/distributions/python/ops/binomial.py b/tensorflow/contrib/distributions/python/ops/binomial.py
index 9304a56491..d23af50167 100644
--- a/tensorflow/contrib/distributions/python/ops/binomial.py
+++ b/tensorflow/contrib/distributions/python/ops/binomial.py
@@ -196,7 +196,7 @@ class Binomial(distribution.Distribution):
@property
def probs(self):
- """Probability of of drawing a `1`."""
+ """Probability of drawing a `1`."""
return self._probs
def _batch_shape_tensor(self):
diff --git a/tensorflow/contrib/distributions/python/ops/vector_student_t.py b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
index ae804b6172..507493560b 100644
--- a/tensorflow/contrib/distributions/python/ops/vector_student_t.py
+++ b/tensorflow/contrib/distributions/python/ops/vector_student_t.py
@@ -160,7 +160,7 @@ class _VectorStudentT(transformed_distribution.TransformedDistribution):
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
- vector of of length `k` and a scale matrix of shape `k x k`.
+ vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
diff --git a/tensorflow/contrib/layers/python/layers/feature_column.py b/tensorflow/contrib/layers/python/layers/feature_column.py
index 68159fe9b9..4cbd198c02 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column.py
@@ -165,7 +165,7 @@ class _LinearEmbeddingLookupArguments(
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
- Used to to compute DNN inputs and weighted sum.
+ Used to compute DNN inputs and weighted sum.
"""
pass
@@ -184,7 +184,7 @@ class _DeepEmbeddingLookupArguments(
"trainable"])):
"""Represents the information needed from a column for embedding lookup.
- Used to to compute DNN inputs and weighted sum.
+ Used to compute DNN inputs and weighted sum.
"""
pass
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index ed4b723ca7..8b3ccea995 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -938,7 +938,7 @@ def convolution(inputs,
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
diff --git a/tensorflow/contrib/training/python/training/bucket_ops.py b/tensorflow/contrib/training/python/training/bucket_ops.py
index 7e293da551..5523cc375f 100644
--- a/tensorflow/contrib/training/python/training/bucket_ops.py
+++ b/tensorflow/contrib/training/python/training/bucket_ops.py
@@ -88,7 +88,7 @@ def bucket(tensors,
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
- As the returned tensors are the result of of a dequeue operation, evaluating
+ As the returned tensors are the result of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
diff --git a/tensorflow/contrib/training/python/training/evaluation_test.py b/tensorflow/contrib/training/python/training/evaluation_test.py
index babd2239b6..b07039916c 100644
--- a/tensorflow/contrib/training/python/training/evaluation_test.py
+++ b/tensorflow/contrib/training/python/training/evaluation_test.py
@@ -329,7 +329,7 @@ class EvaluateRepeatedlyTest(test.TestCase):
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
- # We need a variable that that the saver will try to restore.
+ # We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Run with placeholders. If we actually try to evaluate this, we'd fail
@@ -394,7 +394,7 @@ class EvaluateRepeatedlyTest(test.TestCase):
'evaluate_with_eval_feed_dict')
self._train_model(checkpoint_dir, num_steps=1)
- # We need a variable that that the saver will try to restore.
+ # We need a variable that the saver will try to restore.
variables.get_or_create_global_step()
# Create a variable and an eval op that increments it with a placeholder.
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index d85fbc2256..63caa054d7 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -722,7 +722,7 @@ class OpKernelContext {
StringPiece output_name, const TensorShape& output_shape,
Tensor** output) TF_MUST_USE_RESULT;
- // Tries to reuse one of of the inputs given in input_indices as a temporary.
+ // Tries to reuse one of the inputs given in input_indices as a temporary.
// If none of the given inputs can be forwarded, calls
// allocate_temp() to allocate a new temporary buffer.
Status forward_input_or_allocate_temp(
diff --git a/tensorflow/core/kernels/fused_batch_norm_op.cc b/tensorflow/core/kernels/fused_batch_norm_op.cc
index 37758e82eb..81551ee26f 100644
--- a/tensorflow/core/kernels/fused_batch_norm_op.cc
+++ b/tensorflow/core/kernels/fused_batch_norm_op.cc
@@ -149,7 +149,7 @@ struct FusedBatchNormGrad<CPUDevice, T> {
typename TTypes<T>::Vec scale_backprop(scale_backprop_output->vec<T>());
typename TTypes<T>::Vec offset_backprop(offset_backprop_output->vec<T>());
- // Note: the following formulas are used to to compute the gradients for
+ // Note: the following formulas are used to compute the gradients for
// back propagation.
// x_backprop = scale * rsqrt(variance + epsilon) *
// [y_backprop - mean(y_backprop) - (x - mean(x)) *
diff --git a/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc b/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc
index 8b85bd4ebe..933de65c15 100644
--- a/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/parameterized_truncated_normal_op_gpu.cu.cc
@@ -32,7 +32,7 @@ limitations under the License.
#ifdef COMPILER_MSVC
// msvc does not support unroll. One could try the loop pragma but we need to
// take a closer look if this generates better code in this case. For now let
-// the compiler take care of of it.
+// the compiler take care of it.
#define UNROLL
#else
#define UNROLL _Pragma("unroll")
diff --git a/tensorflow/core/lib/gtl/flatrep.h b/tensorflow/core/lib/gtl/flatrep.h
index f5e318be1f..bb405b327a 100644
--- a/tensorflow/core/lib/gtl/flatrep.h
+++ b/tensorflow/core/lib/gtl/flatrep.h
@@ -29,7 +29,7 @@ namespace internal {
//
// The representation is an open-addressed hash table. Conceptually,
// the representation is a flat array of entries. However we
-// structure it as an array of of buckets where each bucket holds
+// structure it as an array of buckets where each bucket holds
// kWidth entries along with metadata for the kWidth entries. The
// metadata marker is
//
diff --git a/tensorflow/python/layers/convolutional.py b/tensorflow/python/layers/convolutional.py
index 63c7280b3d..54c775b37d 100644
--- a/tensorflow/python/layers/convolutional.py
+++ b/tensorflow/python/layers/convolutional.py
@@ -741,7 +741,7 @@ class SeparableConv2D(Conv2D):
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
@@ -950,7 +950,7 @@ def separable_conv2d(inputs,
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
@@ -1033,7 +1033,7 @@ class Conv2DTranspose(Conv2D):
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
@@ -1233,7 +1233,7 @@ def conv2d_transpose(inputs,
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
@@ -1492,7 +1492,7 @@ def conv3d_transpose(inputs,
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 3 positive integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 3 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
diff --git a/tensorflow/python/ops/distributions/dirichlet_multinomial.py b/tensorflow/python/ops/distributions/dirichlet_multinomial.py
index 662a765558..61e22f4753 100644
--- a/tensorflow/python/ops/distributions/dirichlet_multinomial.py
+++ b/tensorflow/python/ops/distributions/dirichlet_multinomial.py
@@ -95,7 +95,7 @@ class DirichletMultinomial(distribution.Distribution):
The last `concentration` dimension parametrizes a single Dirichlet-Multinomial
distribution. When calling distribution functions (e.g., `dist.prob(counts)`),
`concentration`, `total_count` and `counts` are broadcast to the same shape.
- The last dimension of of `counts` corresponds single Dirichlet-Multinomial
+ The last dimension of `counts` corresponds single Dirichlet-Multinomial
distributions.
Distribution parameters are automatically broadcast in all functions; see
diff --git a/tensorflow/python/ops/distributions/multinomial.py b/tensorflow/python/ops/distributions/multinomial.py
index a5bea7b4ba..0236c31f6d 100644
--- a/tensorflow/python/ops/distributions/multinomial.py
+++ b/tensorflow/python/ops/distributions/multinomial.py
@@ -193,7 +193,7 @@ class Multinomial(distribution.Distribution):
@property
def probs(self):
- """Probability of of drawing a `1` in that coordinate."""
+ """Probability of drawing a `1` in that coordinate."""
return self._probs
def _batch_shape_tensor(self):
diff --git a/tensorflow/python/ops/distributions/util.py b/tensorflow/python/ops/distributions/util.py
index 05c6f4da57..4135bf9b2d 100644
--- a/tensorflow/python/ops/distributions/util.py
+++ b/tensorflow/python/ops/distributions/util.py
@@ -37,7 +37,7 @@ from tensorflow.python.ops import nn
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
- """Assert that that x and y are within machine epsilon of each other.
+ """Assert that x and y are within machine epsilon of each other.
Args:
x: Floating-point `Tensor`
diff --git a/tensorflow/python/ops/summary_op_util.py b/tensorflow/python/ops/summary_op_util.py
index a3f6616902..06ea63704d 100644
--- a/tensorflow/python/ops/summary_op_util.py
+++ b/tensorflow/python/ops/summary_op_util.py
@@ -78,7 +78,7 @@ def summary_scope(name, family=None, default_name=None, values=None):
If `family` is set, then the tag name will be '<family>/<scope_name>', where
`scope_name` is `<outer_scope>/<family>/<name>`. This ensures that `family`
is always the prefix of the tag (and unmodified), while ensuring the scope
- respects the outer scope from this this summary was created.
+ respects the outer scope from this summary was created.
Args:
name: A name for the generated summary node.
diff --git a/tensorflow/tools/docs/generate_lib.py b/tensorflow/tools/docs/generate_lib.py
index 67a4ad0ec9..587aaeb683 100644
--- a/tensorflow/tools/docs/generate_lib.py
+++ b/tensorflow/tools/docs/generate_lib.py
@@ -445,7 +445,7 @@ class DocGenerator(object):
'--base_dir',
type=str,
default=default_base_dir,
- help='Base directory to to strip from file names referenced in docs.')
+ help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()