aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar Nicholas Nadeau, P.Eng., AVS <nnadeau@users.noreply.github.com>2018-05-03 13:47:06 -0400
committerGravatar Shanqing Cai <cais@google.com>2018-05-03 13:47:06 -0400
commit487fa7b1a48c151362ab1b16cdda6bbc78f5d6dc (patch)
tree938a76a47b46d102f4833677db475aaa99e936d1 /tensorflow/contrib
parent4984a60e7147edef532ca1b15050471e81e45841 (diff)
Fixed Typos (#18806)
* fixed typos
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/autograph/impl/config.py2
-rw-r--r--tensorflow/contrib/autograph/operators/control_flow.py2
-rw-r--r--tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py2
-rw-r--r--tensorflow/contrib/eager/README.md2
-rw-r--r--tensorflow/contrib/ffmpeg/ffmpeg_lib.h2
-rw-r--r--tensorflow/contrib/framework/python/ops/critical_section_ops.py2
-rw-r--r--tensorflow/contrib/gan/python/features/python/conditioning_utils.py2
-rw-r--r--tensorflow/contrib/graph_editor/transform.py2
-rwxr-xr-xtensorflow/contrib/image/__init__.py2
-rw-r--r--tensorflow/contrib/kfac/examples/convnet.py2
-rw-r--r--tensorflow/contrib/kfac/python/ops/optimizer.py4
-rw-r--r--tensorflow/contrib/kfac/python/ops/placement.py2
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h2
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs2
-rw-r--r--tensorflow/contrib/lite/schema/schema_v0.fbs2
-rw-r--r--tensorflow/contrib/lite/schema/schema_v1.fbs2
-rw-r--r--tensorflow/contrib/lite/schema/schema_v2.fbs2
-rw-r--r--tensorflow/contrib/lite/schema/schema_v3.fbs4
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py4
-rw-r--r--tensorflow/contrib/lite/testing/tflite_driver.cc4
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md4
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc2
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.h4
-rw-r--r--tensorflow/contrib/lite/toco/tflite/types_test.cc2
-rw-r--r--tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py2
-rw-r--r--tensorflow/contrib/opt/python/training/model_average_optimizer_test.py2
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_nodes.cc2
-rw-r--r--tensorflow/contrib/verbs/README.md2
29 files changed, 35 insertions, 35 deletions
diff --git a/tensorflow/contrib/autograph/impl/config.py b/tensorflow/contrib/autograph/impl/config.py
index 2600088595..878bb7e12f 100644
--- a/tensorflow/contrib/autograph/impl/config.py
+++ b/tensorflow/contrib/autograph/impl/config.py
@@ -33,7 +33,7 @@ DEFAULT_UNCOMPILED_MODULES = set((
(utils.__name__,),
# All of tensorflow's subpackages. Unlike the root tf module, they don't
- # have well-known names. Not refering to the module directly to avoid
+ # have well-known names. Not referring to the module directly to avoid
# circular imports.
(
utils.__name__[:-len('.contrib.autograph.utils')],),
diff --git a/tensorflow/contrib/autograph/operators/control_flow.py b/tensorflow/contrib/autograph/operators/control_flow.py
index 9f7202821f..671c9ccc13 100644
--- a/tensorflow/contrib/autograph/operators/control_flow.py
+++ b/tensorflow/contrib/autograph/operators/control_flow.py
@@ -174,7 +174,7 @@ def while_stmt(test, body, init_state, extra_deps, opts=None):
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
- # That could be somethins as simple as a collection of dispatch rules, with
+ # That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)
diff --git a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
index 08c1dcdd02..e53d86ec61 100644
--- a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
+++ b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
@@ -369,7 +369,7 @@ class GradientBoostedDecisionTreeModel(object):
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
- NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPED.
+ NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py b/tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py
index ecdb8967f4..268c8d0342 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py
@@ -53,7 +53,7 @@ class CholeskyOuterProduct(bijector.Bijector):
its spectrum), and that the product of two positive-diagonal lower-triangular
matrices is another positive-diagonal lower-triangular matrix.
- A simple inductive argument (proceding one column of L_3 at a time) shows
+ A simple inductive argument (proceeding one column of L_3 at a time) shows
that, if `I = L_3 @ L_3.T`, with L_3 being lower-triangular with positive-
diagonal, then `L_3 = I`. Thus, `L_1 = L_2`, proving injectivity of g.
diff --git a/tensorflow/contrib/eager/README.md b/tensorflow/contrib/eager/README.md
index 762685db14..4384431e7b 100644
--- a/tensorflow/contrib/eager/README.md
+++ b/tensorflow/contrib/eager/README.md
@@ -1,6 +1,6 @@
# Eager Execution
-Eager execution provides an imperative interface to TensorFlow (similiar to
+Eager execution provides an imperative interface to TensorFlow (similar to
[NumPy](http://www.numpy.org)). When you enable eager execution, TensorFlow
operations execute immediately; you do not execute a pre-constructed graph with
[`Session.run()`](https://www.tensorflow.org/api_docs/python/tf/Session).
diff --git a/tensorflow/contrib/ffmpeg/ffmpeg_lib.h b/tensorflow/contrib/ffmpeg/ffmpeg_lib.h
index a8d5a0dd83..bf2aa75545 100644
--- a/tensorflow/contrib/ffmpeg/ffmpeg_lib.h
+++ b/tensorflow/contrib/ffmpeg/ffmpeg_lib.h
@@ -53,7 +53,7 @@ Status CreateAudioFile(const string& audio_format_id, int32 bits_per_second,
int32 samples_per_second, int32 channel_count,
const std::vector<float>& samples, string* output_data);
-// Reads an video file using ffmpeg adn converts it into a RGB24 in uint8
+// Reads an video file using ffmpeg and converts it into a RGB24 in uint8
// [frames, height, width, 3]. The w, h, and frames are obtained from ffmpeg.
Status ReadVideoFile(const string& filename, std::vector<uint8>* output_data,
uint32* width, uint32* height, uint32* frames);
diff --git a/tensorflow/contrib/framework/python/ops/critical_section_ops.py b/tensorflow/contrib/framework/python/ops/critical_section_ops.py
index bd764ed57a..72835c3ad8 100644
--- a/tensorflow/contrib/framework/python/ops/critical_section_ops.py
+++ b/tensorflow/contrib/framework/python/ops/critical_section_ops.py
@@ -202,7 +202,7 @@ class CriticalSection(object):
or lazy way that may cause a deadlock.
ValueError: If `exclusive_resource_access` is not provided (is `True`) and
another `CriticalSection` has an execution requesting the same
- resources as in `*args`, `**kwargs`, and any additionaly captured
+ resources as in `*args`, `**kwargs`, and any additionally captured
inputs in `fn`. Note, even if `exclusive_resource_access` is `True`,
if another execution in another `CriticalSection` was created without
`exclusive_resource_access=True`, a `ValueError` will be raised.
diff --git a/tensorflow/contrib/gan/python/features/python/conditioning_utils.py b/tensorflow/contrib/gan/python/features/python/conditioning_utils.py
index df71187fbd..a9b8faa712 100644
--- a/tensorflow/contrib/gan/python/features/python/conditioning_utils.py
+++ b/tensorflow/contrib/gan/python/features/python/conditioning_utils.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Miscellanous utilities for TFGAN code and examples."""
+"""Miscellaneous utilities for TFGAN code and examples."""
from __future__ import absolute_import
from __future__ import division
diff --git a/tensorflow/contrib/graph_editor/transform.py b/tensorflow/contrib/graph_editor/transform.py
index a320a3f232..592d37b432 100644
--- a/tensorflow/contrib/graph_editor/transform.py
+++ b/tensorflow/contrib/graph_editor/transform.py
@@ -677,7 +677,7 @@ def copy_with_input_replacements(sgv, replacement_ts,
def _add_control_flow_ops(ops, control_ios):
- """Complete `ops` so that the tranformed graph is valid.
+ """Complete `ops` so that the transformed graph is valid.
Partially copying a graph can lead to a malformed graph. For instance,
copying half of a while construct is likely to result in an invalid graph.
diff --git a/tensorflow/contrib/image/__init__.py b/tensorflow/contrib/image/__init__.py
index 8f406ace1d..f230d93da4 100755
--- a/tensorflow/contrib/image/__init__.py
+++ b/tensorflow/contrib/image/__init__.py
@@ -17,7 +17,7 @@
### API
This module provides functions for image manipulation; currently, chrominance
-transformas (including changing saturation and hue) in YIQ space and
+transforms (including changing saturation and hue) in YIQ space and
projective transforms (including rotation) are supported.
## Image Transformation `Ops`
diff --git a/tensorflow/contrib/kfac/examples/convnet.py b/tensorflow/contrib/kfac/examples/convnet.py
index b261f41bf9..d6b1a61b71 100644
--- a/tensorflow/contrib/kfac/examples/convnet.py
+++ b/tensorflow/contrib/kfac/examples/convnet.py
@@ -325,7 +325,7 @@ def distributed_grads_only_and_ops_chief_worker(
All workers perform gradient computation. Chief worker applies gradient after
averaging the gradients obtained from all the workers. All workers block
- execution untill the update is applied. Chief worker runs covariance and
+ execution until the update is applied. Chief worker runs covariance and
inverse update ops. Covariance and inverse matrices are placed on parameter
servers in a round robin manner. For further details on synchronous
distributed optimization check `tf.train.SyncReplicasOptimizer`.
diff --git a/tensorflow/contrib/kfac/python/ops/optimizer.py b/tensorflow/contrib/kfac/python/ops/optimizer.py
index 7203804af3..b7f63d8d94 100644
--- a/tensorflow/contrib/kfac/python/ops/optimizer.py
+++ b/tensorflow/contrib/kfac/python/ops/optimizer.py
@@ -66,7 +66,7 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer):
the local approximation with the Fisher information matrix, and to
regularize the update direction by making it closer to the gradient.
If damping is adapted during training then this value is used for
- initializing damping varaible.
+ initializing damping variable.
(Higher damping means the update looks more like a standard gradient
update - see Tikhonov regularization.)
layer_collection: The layer collection object, which holds the fisher
@@ -195,7 +195,7 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer):
min_damping: `float`(Optional), Minimum value the damping parameter
can take. Default value 1e-5.
damping_adaptation_decay: `float`(Optional), The `damping` parameter is
- multipled by the `damping_adaptation_decay` every
+ multiplied by the `damping_adaptation_decay` every
`damping_adaptation_interval` number of iterations. Default value 0.99.
damping_adaptation_interval: `int`(Optional), Number of steps in between
updating the `damping` parameter. Default value 5.
diff --git a/tensorflow/contrib/kfac/python/ops/placement.py b/tensorflow/contrib/kfac/python/ops/placement.py
index 8a20ebe198..c4454325ae 100644
--- a/tensorflow/contrib/kfac/python/ops/placement.py
+++ b/tensorflow/contrib/kfac/python/ops/placement.py
@@ -51,7 +51,7 @@ class RoundRobinPlacementMixin(object):
self._inv_devices = inv_devices
def make_vars_and_create_op_thunks(self, scope=None):
- """Make vars and create op thunks w/ a round-robin device placement strat.
+ """Make vars and create op thunks w/ a round-robin device placement start.
For each factor, all of that factor's cov variables and their associated
update ops will be placed on a particular device. A new device is chosen
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 445687cd15..e2e1cf4478 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -1814,7 +1814,7 @@ inline void LstmCell(const float* input_data, const Dims<4>& input_dims,
// requiring a power-of-two representation interval. Thus, we should right
// away quantize this array to a power-of-two interval; otherwise,
// implementation will need to rescale that, losing any benefit that a tighter
-// representation interval might otherwise yield, while introducting some
+// representation interval might otherwise yield, while introducing some
// numerical error and computational overhead.
//
// Now, Logistic and Tanh
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index b16baf02dc..ff56c31720 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -65,7 +65,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
-// A list of builtin operators. Builtin operators a slighlty faster than custom
+// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
diff --git a/tensorflow/contrib/lite/schema/schema_v0.fbs b/tensorflow/contrib/lite/schema/schema_v0.fbs
index 852ea988f3..891d8366cc 100644
--- a/tensorflow/contrib/lite/schema/schema_v0.fbs
+++ b/tensorflow/contrib/lite/schema/schema_v0.fbs
@@ -48,7 +48,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
-// A list of builtin operators. Builtin operators a slighlty faster than custom
+// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
diff --git a/tensorflow/contrib/lite/schema/schema_v1.fbs b/tensorflow/contrib/lite/schema/schema_v1.fbs
index 06cd9408ed..b438b569e6 100644
--- a/tensorflow/contrib/lite/schema/schema_v1.fbs
+++ b/tensorflow/contrib/lite/schema/schema_v1.fbs
@@ -53,7 +53,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
-// A list of builtin operators. Builtin operators a slighlty faster than custom
+// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
diff --git a/tensorflow/contrib/lite/schema/schema_v2.fbs b/tensorflow/contrib/lite/schema/schema_v2.fbs
index 96731c8aae..b90408ff6d 100644
--- a/tensorflow/contrib/lite/schema/schema_v2.fbs
+++ b/tensorflow/contrib/lite/schema/schema_v2.fbs
@@ -54,7 +54,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
-// A list of builtin operators. Builtin operators a slighlty faster than custom
+// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
diff --git a/tensorflow/contrib/lite/schema/schema_v3.fbs b/tensorflow/contrib/lite/schema/schema_v3.fbs
index cedefe08f3..020da38493 100644
--- a/tensorflow/contrib/lite/schema/schema_v3.fbs
+++ b/tensorflow/contrib/lite/schema/schema_v3.fbs
@@ -53,7 +53,7 @@ table Tensor {
type:TensorType;
// An index that refers to the buffers table at the root of the model. Or,
// if there is no data buffer associated (i.e. intermediate results), then
- // this is 0 (which refers to an always existant empty buffer).
+ // this is 0 (which refers to an always existent empty buffer).
//
// The data_buffer itself is an opaque container, with the assumption that the
// target device is little-endian. In addition, all builtin operators assume
@@ -64,7 +64,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
-// A list of builtin operators. Builtin operators a slighlty faster than custom
+// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index e4851d6077..fd09332165 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -1758,7 +1758,7 @@ def make_strided_slice_tests(zip_path):
"shrink_axis_mask": [None, 1, 8, 11, 15, -1],
"constant_indices": [False, True],
},
- # TODO(b/73170889) Restore test paramaters removed in cl/191608113.
+ # TODO(b/73170889) Restore test parameters removed in cl/191608113.
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64],
@@ -1899,7 +1899,7 @@ def make_lstm_tests(zip_path):
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
- """Feed inputs, assign vairables, and freeze graph."""
+ """Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
diff --git a/tensorflow/contrib/lite/testing/tflite_driver.cc b/tensorflow/contrib/lite/testing/tflite_driver.cc
index 58fe5bd6e4..75ac24719a 100644
--- a/tensorflow/contrib/lite/testing/tflite_driver.cc
+++ b/tensorflow/contrib/lite/testing/tflite_driver.cc
@@ -226,8 +226,8 @@ void TfLiteDriver::SetExpectation(int id, const string& csv_values) {
if (!IsValid()) return;
auto* tensor = interpreter_->tensor(id);
if (expected_output_.count(id) != 0) {
- fprintf(stderr, "Overriden expectation for tensor %d\n", id);
- Invalidate("Overriden expectation");
+ fprintf(stderr, "Overridden expectation for tensor %d\n", id);
+ Invalidate("Overridden expectation");
}
expected_output_[id].reset(new Expectation);
switch (tensor->type) {
diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
index 495014c6fc..7680cdd344 100644
--- a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
+++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
@@ -115,7 +115,7 @@ bazel run --config=opt \
In order to evaluate the possible benefit of generating a quantized graph, TOCO
allows "dummy-quantization" on float graphs. The flags `--default_ranges_min`
-and `--default_ranges_max` accept plausable values for the min-max ranges of the
+and `--default_ranges_max` accept plausible values for the min-max ranges of the
values in all arrays that do not have min-max information. "Dummy-quantization"
will produce lower accuracy but will emulate the performance of a correctly
quantized model.
@@ -338,7 +338,7 @@ below outline the use cases for each.
### Using `--output_format=GRAPHVIZ_DOT`
The first way to get a graphviz rendering is to pass `GRAPHVIZ_DOT` into
-`--output_format`. This results in a plausable visualization of the graph. This
+`--output_format`. This results in a plausible visualization of the graph. This
reduces the requirements that normally exist during conversion between other
input and output formats. For example, this may be useful if conversion from
TENSORFLOW_GRAPHDEF to TFLITE is failing.
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index 453ff29b0d..8efe6ab7b9 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -144,7 +144,7 @@ ArrayDataType ConvertDataType(tensorflow::DataType dtype) {
else if (dtype == DT_STRING)
return ArrayDataType::kString;
else
- LOG(INFO) << "Unsupported data type in placehoder op: " << dtype;
+ LOG(INFO) << "Unsupported data type in placeholder op: " << dtype;
return ArrayDataType::kNone;
}
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.h b/tensorflow/contrib/lite/toco/tflite/operator.h
index 88af3d6ab6..85f7bdafe0 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.h
+++ b/tensorflow/contrib/lite/toco/tflite/operator.h
@@ -25,10 +25,10 @@ namespace tflite {
class BaseOperator;
-// Return a map contained all knwo TF Lite Operators, keyed by their names.
+// Return a map contained all know TF Lite Operators, keyed by their names.
std::map<string, std::unique_ptr<BaseOperator>> BuildOperatorByNameMap();
-// Return a map contained all knwo TF Lite Operators, keyed by the type of
+// Return a map contained all know TF Lite Operators, keyed by the type of
// their tf.mini counterparts.
std::map<OperatorType, std::unique_ptr<BaseOperator>> BuildOperatorByTypeMap();
diff --git a/tensorflow/contrib/lite/toco/tflite/types_test.cc b/tensorflow/contrib/lite/toco/tflite/types_test.cc
index 29fb0b2af2..efb849f422 100644
--- a/tensorflow/contrib/lite/toco/tflite/types_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/types_test.cc
@@ -44,7 +44,7 @@ template <ArrayDataType T>
Array ToFlatBufferAndBack(std::initializer_list<::toco::DataType<T>> items) {
// NOTE: This test does not construct the full buffers list. Since
// Deserialize normally takes a buffer, we need to synthesize one and provide
- // an index that is non-zero so the buffer is not assumed to be emtpy.
+ // an index that is non-zero so the buffer is not assumed to be empty.
Array src;
src.data_type = T;
src.GetMutableBuffer<T>().data = items;
diff --git a/tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py b/tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py
index 37539b9599..5ed8057b86 100644
--- a/tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py
+++ b/tensorflow/contrib/opt/python/training/elastic_average_optimizer_test.py
@@ -58,7 +58,7 @@ def create_local_cluster(num_workers, num_ps, protocol="grpc"):
# Creates the workers and return their sessions, graphs, train_ops.
-# Cheif worker will update at last
+# Chief worker will update at last
def _get_workers(num_workers, period, workers, moving_rate):
sessions = []
graphs = []
diff --git a/tensorflow/contrib/opt/python/training/model_average_optimizer_test.py b/tensorflow/contrib/opt/python/training/model_average_optimizer_test.py
index bfb3350b59..3acd940268 100644
--- a/tensorflow/contrib/opt/python/training/model_average_optimizer_test.py
+++ b/tensorflow/contrib/opt/python/training/model_average_optimizer_test.py
@@ -57,7 +57,7 @@ def create_local_cluster(num_workers, num_ps, protocol="grpc"):
# Creates the workers and return their sessions, graphs, train_ops.
-# Cheif worker will update at last
+# Chief worker will update at last
def _get_workers(num_workers, steps, workers):
sessions = []
graphs = []
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
index 4d3710a514..3767596f8c 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
@@ -2145,7 +2145,7 @@ tensorflow::Status ConvertCalibrationNodeToEngineNode(
if (!status.ok() || !calib_res->calibrator_) {
return tensorflow::errors::FailedPrecondition(
"You must run calibration"
- " and inference conversion in the same proces");
+ " and inference conversion in the same process");
}
calib_res->calibrator_->setDone();
diff --git a/tensorflow/contrib/verbs/README.md b/tensorflow/contrib/verbs/README.md
index 4b6104a8b4..3137bfd03e 100644
--- a/tensorflow/contrib/verbs/README.md
+++ b/tensorflow/contrib/verbs/README.md
@@ -159,7 +159,7 @@ When the receiver receives the RDMA write, it will locate the relevant **RdmaTen
* step_id - Step ID.
* request_index - Request index.
* remote_addr/rkey - Address/rkey of the reallocated result/proxy tensor.
-* **RDMA_MESSAGE_ERROR_STATUS** - (sender ==> receiver) Notify the receiver that an error had occured on the sender side, so it can propagate it to the upper levels.
+* **RDMA_MESSAGE_ERROR_STATUS** - (sender ==> receiver) Notify the receiver that an error had occurred on the sender side, so it can propagate it to the upper levels.
* type - The message type.
* name (name_size) - Name of the requested tensor.
* step_id - Step ID.