aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar Taehoon Lee <taehoonlee@snu.ac.kr>2017-06-13 13:29:58 +0900
committerGravatar Martin Wicke <martin.wicke@gmail.com>2017-06-13 18:47:06 -0700
commitbdff1828d9e40b6e2d8d989f6e9588af2decc6a7 (patch)
treeb7ee0aea6383fad10b29a0ff5fa72afec577c90f /tensorflow
parent8ae96864e0f187f7a137c769f41950982e747e50 (diff)
Fix typos
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py4
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py2
-rw-r--r--tensorflow/core/framework/tensor.h2
-rw-r--r--tensorflow/core/grappler/costs/virtual_scheduler_test.cc2
-rw-r--r--tensorflow/core/protobuf/worker.proto2
-rw-r--r--tensorflow/python/ops/data_flow_ops.py2
-rw-r--r--tensorflow/python/ops/distributions/special_math.py2
-rw-r--r--tensorflow/python/ops/rnn_cell_impl.py2
-rw-r--r--tensorflow/python/ops/tensor_array_ops.py2
-rw-r--r--tensorflow/tools/graph_transforms/quantize_weights_test.cc4
10 files changed, 12 insertions, 12 deletions
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py b/tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py
index d44e258bd2..42865ed404 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py
@@ -120,7 +120,7 @@ class _TriLPlusVDVTLightweightOperatorPD(object):
Doesn't actually do the sqrt! Named as such to agree with API.
- To compute (M + V D V.T), we use the the Woodbury matrix identity:
+ To compute (M + V D V.T), we use the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
@@ -166,7 +166,7 @@ class _TriLPlusVDVTLightweightOperatorPD(object):
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
- Computes the "`C`" in the the identity:
+ Computes the "`C`" in the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index 642c7f1b54..b659988a56 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -327,7 +327,7 @@ class LuongAttention(_BaseAttentionMechanism):
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
- "Perhaps you need to set num_units to the the keys' dimension (%s)?"
+ "Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, self.keys, key_units, key_units))
dtype = query.dtype
diff --git a/tensorflow/core/framework/tensor.h b/tensorflow/core/framework/tensor.h
index 49eecc0b08..a164fe61b5 100644
--- a/tensorflow/core/framework/tensor.h
+++ b/tensorflow/core/framework/tensor.h
@@ -307,7 +307,7 @@ class Tensor {
/// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing the
/// first 'begin' Tensor dimensions into the first dimension of the result and
/// the Tensor dimensions of the last dims() - 'begin' - NDIMS into the last
- /// dimension of the result. If 'begin' < 0 then the the |'begin'| leading
+ /// dimension of the result. If 'begin' < 0 then the |'begin'| leading
/// dimensions of size 1 will be added. If 'begin' + NDIMS > dims() then
/// 'begin' + NDIMS - dims() trailing dimensions of size 1 will be added.
template <typename T, size_t NDIMS = 3>
diff --git a/tensorflow/core/grappler/costs/virtual_scheduler_test.cc b/tensorflow/core/grappler/costs/virtual_scheduler_test.cc
index 484a8860d8..9e48c411dc 100644
--- a/tensorflow/core/grappler/costs/virtual_scheduler_test.cc
+++ b/tensorflow/core/grappler/costs/virtual_scheduler_test.cc
@@ -441,7 +441,7 @@ TEST_F(VirtualSchedulerTest, ComplexDependency) {
1 /* control dependency */);
EXPECT_EQ(expected_size, cpu_state.memory_usage);
- // Nodes currrently in memory: bn's port -1, 0, and 2, and x's port 0.
+ // Nodes currently in memory: bn's port -1, 0, and 2, and x's port 0.
std::set<std::pair<string, int>> nodes_in_memory;
std::transform(
cpu_state.nodes_in_memory.begin(), cpu_state.nodes_in_memory.end(),
diff --git a/tensorflow/core/protobuf/worker.proto b/tensorflow/core/protobuf/worker.proto
index cf05aece39..e476a84a13 100644
--- a/tensorflow/core/protobuf/worker.proto
+++ b/tensorflow/core/protobuf/worker.proto
@@ -171,7 +171,7 @@ message ExecutorOpts {
};
message RunGraphRequest {
- // session_handle is the the master-generated unique id for this session.
+ // session_handle is the master-generated unique id for this session.
// If session_handle is non-empty, it must be the same as used when
// registering the graph. If it is empty, a single global namespace is used to
// search for the graph_handle.
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index 4eead79531..e05b1ff557 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -1582,7 +1582,7 @@ class StagingArea(BaseStagingArea):
This is mostly useful for limiting the number of tensors on
devices such as GPUs.
- All get() and peek() commands block if the the requested data
+ All get() and peek() commands block if the requested data
is not present in the Staging Area.
"""
diff --git a/tensorflow/python/ops/distributions/special_math.py b/tensorflow/python/ops/distributions/special_math.py
index f96eafed71..3a804c941a 100644
--- a/tensorflow/python/ops/distributions/special_math.py
+++ b/tensorflow/python/ops/distributions/special_math.py
@@ -324,7 +324,7 @@ def log_ndtr(x, series_order=3, name="log_ndtr"):
def _log_ndtr_lower(x, series_order):
- """Asymptotic expansion version of `Log[cdf(x)]`, apppropriate for `x<<-1`."""
+ """Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
diff --git a/tensorflow/python/ops/rnn_cell_impl.py b/tensorflow/python/ops/rnn_cell_impl.py
index 49a4aba473..cc6528d1f5 100644
--- a/tensorflow/python/ops/rnn_cell_impl.py
+++ b/tensorflow/python/ops/rnn_cell_impl.py
@@ -606,7 +606,7 @@ class DropoutWrapper(RNNCell):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
- then the the same dropout mask is applied at every step, as described in:
+ then the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
diff --git a/tensorflow/python/ops/tensor_array_ops.py b/tensorflow/python/ops/tensor_array_ops.py
index 7a6abc8e61..20ae082ee1 100644
--- a/tensorflow/python/ops/tensor_array_ops.py
+++ b/tensorflow/python/ops/tensor_array_ops.py
@@ -87,7 +87,7 @@ class TensorArray(object):
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
- colocated on the same device as the the Tensor used on its first write
+ colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
diff --git a/tensorflow/tools/graph_transforms/quantize_weights_test.cc b/tensorflow/tools/graph_transforms/quantize_weights_test.cc
index e1a105bdd3..63c5b5a64d 100644
--- a/tensorflow/tools/graph_transforms/quantize_weights_test.cc
+++ b/tensorflow/tools/graph_transforms/quantize_weights_test.cc
@@ -90,13 +90,13 @@ class QuantizeWeightsTest : public ::testing::Test {
EXPECT_EQ("Const", q_weights_const->op());
EXPECT_EQ(DT_QUINT8, q_weights_const->attr().at("dtype").type());
- // Run the the original graph.
+ // Run the original graph.
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
- // Run the the quantized graph.
+ // Run the quantized graph.
std::unique_ptr<Session> quantized_session(NewSession(SessionOptions()));
TF_ASSERT_OK(quantized_session->Create(quantized_graph_def));
std::vector<Tensor> quantized_outputs;