aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/autograph/operators/control_flow.py2
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py2
-rw-r--r--tensorflow/contrib/bigtable/python/ops/bigtable_api.py4
-rw-r--r--tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/sample_stats.py4
-rw-r--r--tensorflow/contrib/lite/toco/model.h2
-rw-r--r--tensorflow/contrib/model_pruning/python/layers/layers.py2
-rw-r--r--tensorflow/contrib/quantize/python/quant_ops_test.py4
-rw-r--r--tensorflow/core/framework/function.h2
-rw-r--r--tensorflow/python/ops/image_ops_test.py4
-rw-r--r--tensorflow/python/ops/parallel_for/pfor.py2
-rw-r--r--tensorflow/python/training/checkpoint_utils.py2
-rw-r--r--tensorflow/python/training/ftrl.py2
13 files changed, 17 insertions, 17 deletions
diff --git a/tensorflow/contrib/autograph/operators/control_flow.py b/tensorflow/contrib/autograph/operators/control_flow.py
index 988df70157..5126592e1e 100644
--- a/tensorflow/contrib/autograph/operators/control_flow.py
+++ b/tensorflow/contrib/autograph/operators/control_flow.py
@@ -141,7 +141,7 @@ def _dataset_for_stmt(ds, extra_test, body, init_state):
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
- # Dropping the epoch number and iterate because they are not not syntactically
+ # Dropping the epoch number and iterate because they are not syntactically
# visible.
results = results[2:]
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
index 9a84f1231c..7f2b379d3d 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
@@ -39,7 +39,7 @@ from tensorflow.contrib.autograph.pyct.static_analysis import annos
class Definition(object):
"""Definition objects describe a unique definition of a variable.
- Subclasses of this may be used by passing an appropriate factory fuction to
+ Subclasses of this may be used by passing an appropriate factory function to
resolve.
Attributes:
diff --git a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
index fd30aa8bbb..53d58a8efe 100644
--- a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
+++ b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
@@ -331,7 +331,7 @@ class BigtableTable(object):
"""Retrieves row (including values) from the Bigtable service at high speed.
Rows with row-key prefixed by `prefix` will be retrieved. This method is
- similar to `scan_prefix`, but by constrast performs multiple sub-scans in
+ similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
@@ -390,7 +390,7 @@ class BigtableTable(object):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved. This method
- is similar to `scan_range`, but by constrast performs multiple sub-scans in
+ is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
diff --git a/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py b/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py
index 3791dae8d7..ff846b191a 100644
--- a/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py
+++ b/tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer.py
@@ -150,7 +150,7 @@ def _project_stochastic_matrix_wrt_euclidean_norm(matrix):
"matrix must be two dimensional (instead is %d-dimensional)" %
matrix_shape.ndims)
if matrix_shape[0] != matrix_shape[1]:
- raise ValueError("matrix must be be square (instead has shape (%d,%d))" %
+ raise ValueError("matrix must be square (instead has shape (%d,%d))" %
(matrix_shape[0], matrix_shape[1]))
dimension = matrix_shape[0].value
if dimension is None:
diff --git a/tensorflow/contrib/distributions/python/ops/sample_stats.py b/tensorflow/contrib/distributions/python/ops/sample_stats.py
index f5aaa5cf34..aa680a92be 100644
--- a/tensorflow/contrib/distributions/python/ops/sample_stats.py
+++ b/tensorflow/contrib/distributions/python/ops/sample_stats.py
@@ -134,7 +134,7 @@ def auto_correlation(
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
- # the moment is is necessary so that all FFT implementations work.
+ # the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = math_ops.cast(x_len, np.float64)
@@ -198,7 +198,7 @@ def auto_correlation(
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
- # divide by by to make this an unbiased estimate of the expectation
+ # divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = math_ops.cast(x_len, dtype.real_dtype)
max_lags = math_ops.cast(max_lags, dtype.real_dtype)
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index d629787939..58927f550b 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -2009,7 +2009,7 @@ class Model {
std::size_t transient_data_size = 0;
// For code-generation only: required alignment of the transient_data buffer
std::size_t transient_data_alignment = 0;
- // Arithmatic operations performed in the model.
+ // Arithmetic operations performed in the model.
int64 ops_count = 0;
private:
diff --git a/tensorflow/contrib/model_pruning/python/layers/layers.py b/tensorflow/contrib/model_pruning/python/layers/layers.py
index 466daf204a..d453e350f0 100644
--- a/tensorflow/contrib/model_pruning/python/layers/layers.py
+++ b/tensorflow/contrib/model_pruning/python/layers/layers.py
@@ -139,7 +139,7 @@ def masked_convolution(inputs,
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
- dimensions of of the filters. Can be a single integer to specify the same
+ dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
diff --git a/tensorflow/contrib/quantize/python/quant_ops_test.py b/tensorflow/contrib/quantize/python/quant_ops_test.py
index c2a8def480..a45840009b 100644
--- a/tensorflow/contrib/quantize/python/quant_ops_test.py
+++ b/tensorflow/contrib/quantize/python/quant_ops_test.py
@@ -75,7 +75,7 @@ class QuantOpsTest(googletest.TestCase):
self.assertGreater(max_value, 0.0)
self.assertLess(max_value, 1.0)
- def testVariablesNotParitioned_LastValue(self):
+ def testVariablesNotPartitioned_LastValue(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
@@ -90,7 +90,7 @@ class QuantOpsTest(googletest.TestCase):
is_training=True,
vars_collection=_MIN_MAX_VARS)
- def testVariablesNotParitioned_MovingAvg(self):
+ def testVariablesNotPartitioned_MovingAvg(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
diff --git a/tensorflow/core/framework/function.h b/tensorflow/core/framework/function.h
index 5da9af7db3..31a816ac5f 100644
--- a/tensorflow/core/framework/function.h
+++ b/tensorflow/core/framework/function.h
@@ -456,7 +456,7 @@ class FunctionLibraryRuntime {
// This interface is EXPERIMENTAL and subject to change.
//
- // Instatiates the function using an executor of the given type. If empty,
+ // Instantiates the function using an executor of the given type. If empty,
// the default TensorFlow executor will be used.
string executor_type;
};
diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py
index cf9761803b..187f3e6e2d 100644
--- a/tensorflow/python/ops/image_ops_test.py
+++ b/tensorflow/python/ops/image_ops_test.py
@@ -1956,7 +1956,7 @@ class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
- # The orignal error message does not contain back slashes. However, they
+ # The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
@@ -2985,7 +2985,7 @@ class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
- # The orignal error message does not contain back slashes. However, they
+ # The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
diff --git a/tensorflow/python/ops/parallel_for/pfor.py b/tensorflow/python/ops/parallel_for/pfor.py
index 77ec3bc0d4..2e4b2fd64e 100644
--- a/tensorflow/python/ops/parallel_for/pfor.py
+++ b/tensorflow/python/ops/parallel_for/pfor.py
@@ -2117,7 +2117,7 @@ def _convert_print(pfor_input):
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
-# TensorArray corresponds to to the j_th entry of the TensorArray in the i_th
+# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
diff --git a/tensorflow/python/training/checkpoint_utils.py b/tensorflow/python/training/checkpoint_utils.py
index 883f4fd910..4752b9dce6 100644
--- a/tensorflow/python/training/checkpoint_utils.py
+++ b/tensorflow/python/training/checkpoint_utils.py
@@ -316,7 +316,7 @@ def _set_checkpoint_initializer(variable,
# pylint:disable=protected-access
# We need special handling for `DistributedVariable`s as they contain
- # mutliple actual variables. `assign` on a `DistributedVariable` returns a
+ # multiple actual variables. `assign` on a `DistributedVariable` returns a
# combined `init_op` which contains initializers for all the contained
# variables. We then set each underlying variable's `_initializer_op` using
# the corresponding `init_op`.
diff --git a/tensorflow/python/training/ftrl.py b/tensorflow/python/training/ftrl.py
index 4fa081fab7..832c10d454 100644
--- a/tensorflow/python/training/ftrl.py
+++ b/tensorflow/python/training/ftrl.py
@@ -86,7 +86,7 @@ class FtrlOptimizer(optimizer.Optimizer):
if initial_accumulator_value < 0.0:
raise ValueError(
- "initial_accumulator_value %f needs to be be positive or zero" %
+ "initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %