aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/quantize
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-03-15 15:44:56 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-03-15 15:49:01 -0700
commit1e75c69339da2fbf2c5c5fbeb891243badae7ff8 (patch)
tree81268169bbff6836bfbbd4e9866a1374f597a624 /tensorflow/contrib/quantize
parent6c62e650252ab32f83637a8de6720e73ffeca226 (diff)
Automated g4 rollback of changelist 189231636
PiperOrigin-RevId: 189258641
Diffstat (limited to 'tensorflow/contrib/quantize')
-rw-r--r--tensorflow/contrib/quantize/python/fold_batch_norms.py4
-rw-r--r--tensorflow/contrib/quantize/python/quant_ops.py4
-rw-r--r--tensorflow/contrib/quantize/python/quantize.py2
-rw-r--r--tensorflow/contrib/quantize/python/quantize_graph.py2
-rw-r--r--tensorflow/contrib/quantize/python/quantize_parameterized_test.py8
-rw-r--r--tensorflow/contrib/quantize/python/quantize_test.py2
6 files changed, 11 insertions, 11 deletions
diff --git a/tensorflow/contrib/quantize/python/fold_batch_norms.py b/tensorflow/contrib/quantize/python/fold_batch_norms.py
index 1afcbb8504..b278265639 100644
--- a/tensorflow/contrib/quantize/python/fold_batch_norms.py
+++ b/tensorflow/contrib/quantize/python/fold_batch_norms.py
@@ -237,7 +237,7 @@ def _FindFusedBatchNorms(graph):
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
- # correction). The variance tensor read from FuseBatchNorm has Bessel's
+ # correction). The variance tensor read from FuseBatchNorm has bessel's
# correction applied, so we undo it here.
scope, sep, _ = bn_op.name.rpartition('/')
g = ops.get_default_graph()
@@ -306,7 +306,7 @@ def _ComputeBatchNormCorrections(context, match, freeze_batch_norm_delay,
Args:
context: The scope under which we look for batch norm params
- match: Object containing required batch norm tensors for correction
+ match: Object containg required batch norm tensors for correction
computation.
freeze_batch_norm_delay: Delay in steps at which computation switches
from regular batch norm to frozen mean and variance.
diff --git a/tensorflow/contrib/quantize/python/quant_ops.py b/tensorflow/contrib/quantize/python/quant_ops.py
index a4f7b1b221..0a8e35080c 100644
--- a/tensorflow/contrib/quantize/python/quant_ops.py
+++ b/tensorflow/contrib/quantize/python/quant_ops.py
@@ -282,8 +282,8 @@ def _FakeQuantWithMinMaxVars(inputs, min_var, max_var, per_channel, num_bits,
Args:
inputs: a tensor containing values to be quantized.
min_var: a variable containing quantization range lower end(s).
- max_var: a variable containing quantization range upper end(s).
- per_channel: a boolean specifying whether to use per-channel quantization.
+ max_var: a variable containing quantization range lupper end(s).
+ per_channel: a boolean specifying whether to use per-channel quantizatioh.
num_bits: Number of bits to use for quantization, must be between 2 and 8.
narrow_range: Whether to use the narrow quantization range
[1; 2^num_bits - 1] or wide range [0; 2^num_bits - 1].
diff --git a/tensorflow/contrib/quantize/python/quantize.py b/tensorflow/contrib/quantize/python/quantize.py
index ec721afbc8..0608ab9302 100644
--- a/tensorflow/contrib/quantize/python/quantize.py
+++ b/tensorflow/contrib/quantize/python/quantize.py
@@ -267,7 +267,7 @@ def _InsertQuantOp(context,
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
- context: Context where producer and consumer operations are nested.
+ context: Context w,here producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
diff --git a/tensorflow/contrib/quantize/python/quantize_graph.py b/tensorflow/contrib/quantize/python/quantize_graph.py
index 5abdcd2475..5a3a74cec4 100644
--- a/tensorflow/contrib/quantize/python/quantize_graph.py
+++ b/tensorflow/contrib/quantize/python/quantize_graph.py
@@ -158,7 +158,7 @@ def experimental_create_training_graph(input_graph=None,
often fail.
Args:
- input_graph: The tf.Graph to be transformed, if None then defaults to the
+ input_graph: The tf.Graph to be transformed,if None then defaults to the
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
diff --git a/tensorflow/contrib/quantize/python/quantize_parameterized_test.py b/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
index db745aa562..0624cc878b 100644
--- a/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
+++ b/tensorflow/contrib/quantize/python/quantize_parameterized_test.py
@@ -419,7 +419,7 @@ class QuantizeTest(test_util.TensorFlowTestCase):
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
- # Manually add a bypass (optional) and an activation.
+ # Manually add a bypass (optionaly) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
@@ -470,7 +470,7 @@ class QuantizeTest(test_util.TensorFlowTestCase):
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
- # Manually add a bypass (optional) and an activation.
+ # Manually add a bypass (optionaly) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
@@ -526,7 +526,7 @@ class QuantizeTest(test_util.TensorFlowTestCase):
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
- # Manually add a bypass (optional) and an activation.
+ # Manually add a bypass (optionaly) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
@@ -565,7 +565,7 @@ class QuantizeTest(test_util.TensorFlowTestCase):
stddev: Standard deviation of normal variable.
Returns:
- An initialized that initializes with a truncated normal variable.
+ An initialized that initialzes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
diff --git a/tensorflow/contrib/quantize/python/quantize_test.py b/tensorflow/contrib/quantize/python/quantize_test.py
index b2e5707a6d..ef59475167 100644
--- a/tensorflow/contrib/quantize/python/quantize_test.py
+++ b/tensorflow/contrib/quantize/python/quantize_test.py
@@ -144,7 +144,7 @@ class QuantizeTest(test_util.TensorFlowTestCase):
stddev: Standard deviation of normal variable.
Returns:
- An initialized that initializes with a truncated normal variable.
+ An initialized that initialzes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)