aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/quantize
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-16 10:51:58 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-16 11:02:55 -0700
commit394db95965e1d745f08b4eeb550878ddc175af15 (patch)
tree41fd569867479ef5a352b85f0b3dad0a5412c778 /tensorflow/contrib/quantize
parentb2a496a2a13d02d6208a369df36b036a8e1a236b (diff)
Fixes issue where gradients are being quantized. In addition, because gradients are quantized, quantization fails for any node that is a producer of gradients since the _FollowedByFakeQuant method prevents any node followed by a fake quant from being properly quantized.
PiperOrigin-RevId: 209010415
Diffstat (limited to 'tensorflow/contrib/quantize')
-rw-r--r--tensorflow/contrib/quantize/python/quantize.py3
-rw-r--r--tensorflow/contrib/quantize/python/quantize_test.py27
2 files changed, 1 insertions, 29 deletions
diff --git a/tensorflow/contrib/quantize/python/quantize.py b/tensorflow/contrib/quantize/python/quantize.py
index 2ddbd73ea6..8ad5b42d18 100644
--- a/tensorflow/contrib/quantize/python/quantize.py
+++ b/tensorflow/contrib/quantize/python/quantize.py
@@ -198,7 +198,7 @@ def _FindLayersToQuantize(graph):
|
[post_conv_correction]
|
- [biasadd|folded_bias]
+ biasadd|folded_bias
|
[bypass]
|
@@ -320,7 +320,6 @@ def _FindLayersToQuantize(graph):
folded_bias_add_pattern,
batch_norm_identity,
bypass_pattern,
- layer_pattern,
])
])
diff --git a/tensorflow/contrib/quantize/python/quantize_test.py b/tensorflow/contrib/quantize/python/quantize_test.py
index 212d902a3c..2369896562 100644
--- a/tensorflow/contrib/quantize/python/quantize_test.py
+++ b/tensorflow/contrib/quantize/python/quantize_test.py
@@ -194,33 +194,6 @@ class QuantizeTest(test_util.TensorFlowTestCase):
self.assertNotIn('test/relu6', [c.name for c in consumers])
- def testLayerActivationQuantized(self):
- self._RunTestOverParameters(self._TestLayerActivationQuantized)
-
- def _TestLayerActivationQuantized(self, is_training):
- graph = ops.Graph()
- with graph.as_default():
- batch_size, height, width, depth = 5, 128, 128, 3
- input1 = array_ops.zeros((batch_size, height, width, depth))
- _ = conv2d(
- input1,
- 32, [5, 5],
- stride=2,
- padding='SAME',
- weights_initializer=self._WeightInit(0.09),
- activation_fn=nn_ops.relu6,
- biases_initializer=None,
- scope='test')
- # Ensure that both weights and output of activations are quantized
- # when we have a conv->relu6 with no bias add
- quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
- activation_op = graph.get_operation_by_name('test/Relu6')
- conv_op = graph.get_operation_by_name('test/Conv2D')
- self.assertTrue('test/weights_quant/FakeQuantWithMinMaxVars:0' in
- [tensor_in.name for tensor_in in conv_op.inputs])
- self.assertTrue('FakeQuantWithMinMaxVars' in
- [op.type for op in activation_op.outputs[0].consumers()])
-
def testFinalLayerQuantized(self):
self._RunTestOverParameters(self._TestFinalLayerQuantized)