aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/__init__.py2
-rw-r--r--tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py6
-rw-r--r--tensorflow/contrib/slim/python/slim/evaluation_test.py2
-rw-r--r--tensorflow/contrib/specs/python/__init__.py4
-rw-r--r--tensorflow/examples/tutorials/word2vec/word2vec_basic.py2
-rw-r--r--tensorflow/python/__init__.py2
-rw-r--r--tensorflow/python/framework/dtypes.py6
-rw-r--r--tensorflow/python/framework/framework_lib.py2
-rw-r--r--tensorflow/python/framework/op_def_library_test.py4
-rw-r--r--tensorflow/python/framework/ops.py2
-rw-r--r--tensorflow/python/kernel_tests/decode_jpeg_op_test.py2
-rw-r--r--tensorflow/python/kernel_tests/pool_test.py2
-rw-r--r--tensorflow/python/kernel_tests/softmax_op_test.py6
-rw-r--r--tensorflow/python/ops/control_flow_ops.py4
-rw-r--r--tensorflow/python/ops/math_ops.py8
-rw-r--r--tensorflow/python/ops/nn_grad.py2
-rw-r--r--tensorflow/python/ops/nn_ops.py28
-rw-r--r--tensorflow/python/ops/standard_ops.py3
-rw-r--r--tensorflow/python/training/training.py2
-rwxr-xr-xtensorflow/tools/ci_build/ci_sanity.sh3
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_test.py2
21 files changed, 46 insertions, 48 deletions
diff --git a/tensorflow/__init__.py b/tensorflow/__init__.py
index 083634bd79..78ad6aec19 100644
--- a/tensorflow/__init__.py
+++ b/tensorflow/__init__.py
@@ -21,7 +21,7 @@ from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
-from tensorflow.python import *
+from tensorflow.python import * # pylint: disable=redefined-builtin
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
index 7526f3ae0d..3f5fdc18bb 100644
--- a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
+++ b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
@@ -211,9 +211,8 @@ class SdcaModel(object):
sums.append(
math_ops.reduce_sum(
math_ops.abs(math_ops.cast(weights, dtypes.float64))))
- sum = math_ops.add_n(sums)
# SDCA L1 regularization cost is: l1 * sum(|weights|)
- return self._options['symmetric_l1_regularization'] * sum
+ return self._options['symmetric_l1_regularization'] * math_ops.add_n(sums)
def _l2_loss(self, l2):
"""Computes the (un-normalized) l2 loss of the model."""
@@ -225,9 +224,8 @@ class SdcaModel(object):
sums.append(
math_ops.reduce_sum(
math_ops.square(math_ops.cast(weights, dtypes.float64))))
- sum = math_ops.add_n(sums)
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
- return l2 * sum / 2.0
+ return l2 * math_ops.add_n(sums) / 2.0
def _convert_n_to_tensor(self, input_list, as_ref=False):
"""Converts input list to a set of tensors."""
diff --git a/tensorflow/contrib/slim/python/slim/evaluation_test.py b/tensorflow/contrib/slim/python/slim/evaluation_test.py
index 8a267ddac7..7ab6805fac 100644
--- a/tensorflow/contrib/slim/python/slim/evaluation_test.py
+++ b/tensorflow/contrib/slim/python/slim/evaluation_test.py
@@ -41,7 +41,7 @@ from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
-from tensorflow.python.training import input
+from tensorflow.python.training import input # pylint: disable=redefined-builtin
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
diff --git a/tensorflow/contrib/specs/python/__init__.py b/tensorflow/contrib/specs/python/__init__.py
index 52db61e421..b6cc754023 100644
--- a/tensorflow/contrib/specs/python/__init__.py
+++ b/tensorflow/contrib/specs/python/__init__.py
@@ -18,10 +18,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-# pylint: disable=wildcard-import,g-importing-member
+# pylint: disable=wildcard-import,g-importing-member,redefined-builtin
from tensorflow.contrib.specs.python.params_ops import *
from tensorflow.contrib.specs.python.specs import *
from tensorflow.contrib.specs.python.specs_lib import *
from tensorflow.contrib.specs.python.specs_ops import *
from tensorflow.contrib.specs.python.summaries import *
-# pylint: enable=wildcard-import
+# pylint: enable=wildcard-import,redefined-builtin
diff --git a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
index f6906b0f79..14ae7fbf35 100644
--- a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
+++ b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
@@ -131,7 +131,7 @@ def generate_batch(batch_size, num_skips, skip_window):
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
- buffer = collections.deque(maxlen=span)
+ buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py
index 6d405d04b1..02ed5517ca 100644
--- a/tensorflow/python/__init__.py
+++ b/tensorflow/python/__init__.py
@@ -60,7 +60,7 @@ from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
-from tensorflow.python.framework.framework_lib import *
+from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py
index c825114483..99ae8b24f1 100644
--- a/tensorflow/python/framework/dtypes.py
+++ b/tensorflow/python/framework/dtypes.py
@@ -238,9 +238,9 @@ class DType(object):
min, max : tuple
Lower and upper intensity limits.
"""
- min, max = dtype_range[self.as_numpy_dtype]
+ min, max = dtype_range[self.as_numpy_dtype] # pylint: disable=redefined-builtin
if clip_negative:
- min = 0
+ min = 0 # pylint: disable=redefined-builtin
return min, max
def is_compatible_with(self, other):
@@ -356,7 +356,7 @@ complex128 = DType(types_pb2.DT_COMPLEX128)
tf_export("complex128").export_constant(__name__, "complex128")
int64 = DType(types_pb2.DT_INT64)
tf_export("int64").export_constant(__name__, "int64")
-bool = DType(types_pb2.DT_BOOL)
+bool = DType(types_pb2.DT_BOOL) # pylint: disable=redefined-builtin
tf_export("bool").export_constant(__name__, "bool")
qint8 = DType(types_pb2.DT_QINT8)
tf_export("qint8").export_constant(__name__, "qint8")
diff --git a/tensorflow/python/framework/framework_lib.py b/tensorflow/python/framework/framework_lib.py
index d16fe979e6..3172f3c2c3 100644
--- a/tensorflow/python/framework/framework_lib.py
+++ b/tensorflow/python/framework/framework_lib.py
@@ -118,7 +118,7 @@ from tensorflow.python.framework.ops import register_tensor_conversion_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
-from tensorflow.python.framework.dtypes import *
+from tensorflow.python.framework.dtypes import * # pylint: disable=redefined-builtin
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
diff --git a/tensorflow/python/framework/op_def_library_test.py b/tensorflow/python/framework/op_def_library_test.py
index 817007ce6c..84ca062ade 100644
--- a/tensorflow/python/framework/op_def_library_test.py
+++ b/tensorflow/python/framework/op_def_library_test.py
@@ -42,7 +42,7 @@ class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
- def _add_op(self, ascii):
+ def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
@@ -1336,7 +1336,7 @@ class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
- def _add_op(self, ascii):
+ def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 4d7dcdbee1..77e83554c9 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -3649,7 +3649,7 @@ class Graph(object):
def _last_id(self):
return self._next_id_counter
- def _get_op_def(self, type):
+ def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
if self._c_graph:
with c_api_util.tf_buffer() as buf:
diff --git a/tensorflow/python/kernel_tests/decode_jpeg_op_test.py b/tensorflow/python/kernel_tests/decode_jpeg_op_test.py
index 89fd26c544..510daf79dc 100644
--- a/tensorflow/python/kernel_tests/decode_jpeg_op_test.py
+++ b/tensorflow/python/kernel_tests/decode_jpeg_op_test.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import os
import time
-from six.moves import xrange
+from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
diff --git a/tensorflow/python/kernel_tests/pool_test.py b/tensorflow/python/kernel_tests/pool_test.py
index 6384897633..6ede654aad 100644
--- a/tensorflow/python/kernel_tests/pool_test.py
+++ b/tensorflow/python/kernel_tests/pool_test.py
@@ -96,7 +96,7 @@ def pool_direct_single_axis(
def pool_direct(
- input,
+ input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
diff --git a/tensorflow/python/kernel_tests/softmax_op_test.py b/tensorflow/python/kernel_tests/softmax_op_test.py
index ac08f2aec0..4d89831aae 100644
--- a/tensorflow/python/kernel_tests/softmax_op_test.py
+++ b/tensorflow/python/kernel_tests/softmax_op_test.py
@@ -99,10 +99,10 @@ class SoftmaxTest(test.TestCase):
def _testOverflow(self, use_gpu=False):
if use_gpu:
- type = np.float32
+ type = np.float32 # pylint: disable=redefined-builtin
else:
- type = np.float64
- max = np.finfo(type).max
+ type = np.float64 # pylint: disable=redefined-builtin
+ max = np.finfo(type).max # pylint: disable=redefined-builtin
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 3a6fdaafb9..c33f351289 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -313,7 +313,7 @@ def _Enter(data,
return sparse_tensor.SparseTensor(indices, values, dense_shape)
-def exit(data, name=None):
+def exit(data, name=None): # pylint: disable=redefined-builtin
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
@@ -3343,7 +3343,7 @@ def group(*inputs, **kwargs):
@tf_export("tuple")
-def tuple(tensors, name=None, control_inputs=None):
+def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index 448cc905d6..57b260ae91 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -237,7 +237,7 @@ def argmin(input,
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("abs")
-def abs(x, name=None):
+def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
@@ -542,7 +542,7 @@ def scalar_mul(scalar, x):
@tf_export("pow")
-def pow(x, y, name=None):
+def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
@@ -712,7 +712,7 @@ def angle(input, name=None):
@tf_export("round")
-def round(x, name=None):
+def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
@@ -1207,7 +1207,7 @@ ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
@tf_export("range")
-def range(start, limit=None, delta=1, dtype=None, name="range"):
+def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
diff --git a/tensorflow/python/ops/nn_grad.py b/tensorflow/python/ops/nn_grad.py
index 5e6cafd6aa..2a883eb0d5 100644
--- a/tensorflow/python/ops/nn_grad.py
+++ b/tensorflow/python/ops/nn_grad.py
@@ -1010,7 +1010,7 @@ def _NthElementGrad(op, grad):
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
- input = op.inputs[0]
+ input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index a691e281ee..47f48a7e16 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -48,8 +48,8 @@ local_response_normalization = gen_nn_ops.lrn
def _non_atrous_convolution(
- input,
- filter,
+ input, # pylint: disable=redefined-builtin
+ filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
@@ -94,9 +94,9 @@ def _non_atrous_convolution(
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
- input = ops.convert_to_tensor(input, name="input")
+ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
- filter = ops.convert_to_tensor(filter, name="filter")
+ filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
@@ -348,7 +348,7 @@ def with_space_to_batch(
ValueError: if `spatial_dims` are invalid.
"""
- input = ops.convert_to_tensor(input, name="input")
+ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
@@ -645,7 +645,7 @@ def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
@tf_export("nn.convolution")
def convolution(
- input,
+ input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
@@ -766,9 +766,9 @@ def convolution(
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
- input = ops.convert_to_tensor(input, name="input")
+ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
- filter = ops.convert_to_tensor(filter, name="filter")
+ filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
@@ -962,7 +962,7 @@ def pool(
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
- input = ops.convert_to_tensor(input, name="input")
+ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
@@ -1223,7 +1223,7 @@ def conv2d_transpose(
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
- filter = ops.convert_to_tensor(filter, name="filter")
+ filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape()[axis].is_compatible_with(filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
@@ -1447,7 +1447,7 @@ def conv3d_transpose(
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
- filter = ops.convert_to_tensor(filter, name="filter")
+ filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape()[axis].is_compatible_with(filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
@@ -2279,7 +2279,7 @@ def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: di
@tf_export("nn.top_k")
-def top_k(input, k=1, sorted=True, name=None):
+def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
@@ -2308,7 +2308,7 @@ def top_k(input, k=1, sorted=True, name=None):
return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)
-def nth_element(input, n, reverse=False, name=None):
+def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
@@ -2505,7 +2505,7 @@ def conv1d_transpose(
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
- filter = array_ops.expand_dims(filter, 0)
+ filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
diff --git a/tensorflow/python/ops/standard_ops.py b/tensorflow/python/ops/standard_ops.py
index 009d1dc3b9..a2164f78b8 100644
--- a/tensorflow/python/ops/standard_ops.py
+++ b/tensorflow/python/ops/standard_ops.py
@@ -47,8 +47,7 @@ from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
-# pylint: disable=redefined-builtin
-from tensorflow.python.ops.control_flow_ops import tuple
+from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
diff --git a/tensorflow/python/training/training.py b/tensorflow/python/training/training.py
index cdba18af8c..78c8ce9208 100644
--- a/tensorflow/python/training/training.py
+++ b/tensorflow/python/training/training.py
@@ -135,7 +135,7 @@ from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
-from tensorflow.python.training.input import *
+from tensorflow.python.training.input import * # pylint: disable=redefined-builtin
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
diff --git a/tensorflow/tools/ci_build/ci_sanity.sh b/tensorflow/tools/ci_build/ci_sanity.sh
index 310c1b6248..f980ced2e4 100755
--- a/tensorflow/tools/ci_build/ci_sanity.sh
+++ b/tensorflow/tools/ci_build/ci_sanity.sh
@@ -186,7 +186,8 @@ do_pylint() {
# C0301 line-too-long
# C0326 bad-whitespace
# W0611 unused-import
- grep -E '(\[E|\[W0311|\[W0312|\[C0330|\[C0301|\[C0326|\[W0611)' ${OUTPUT_FILE} > ${ERRORS_FILE}
+ # W0622 redefined-builtin
+ grep -E '(\[E|\[W0311|\[W0312|\[C0330|\[C0301|\[C0326|\[W0611|\[W0622)' ${OUTPUT_FILE} > ${ERRORS_FILE}
N_ERRORS=0
while read -r LINE; do
diff --git a/tensorflow/tools/compatibility/tf_upgrade_test.py b/tensorflow/tools/compatibility/tf_upgrade_test.py
index a495f9883b..3d02eacba6 100644
--- a/tensorflow/tools/compatibility/tf_upgrade_test.py
+++ b/tensorflow/tools/compatibility/tf_upgrade_test.py
@@ -114,7 +114,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
self.assertEqual(errors, ["test.py:1: tf.reverse requires manual check."])
def testListComprehension(self):
- def _test(input, output):
+ def _test(input, output): # pylint: disable=redefined-builtin
_, unused_report, errors, new_text = self._upgrade(input)
self.assertEqual(new_text, output)
_test("tf.concat(0, \t[x for x in y])\n",