aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/quantize
diff options
context:
space:
mode:
authorGravatar Raghuraman Krishnamoorthi <raghuramank@google.com>2018-08-16 22:29:20 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-16 22:34:29 -0700
commit7e9286a92e244f9cd2e351421c1a639fc57f19f1 (patch)
treeb96a8c0c46ec2c83d5742589cd3671c995bc8bdf /tensorflow/contrib/quantize
parent439d8c4809139b163853fe87e8c5cdaba5d832eb (diff)
Check for training ops in graph. The rewriter only works for graphs with no training ops as described in https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/quantize. This test help check for graphs that have training ops and raises a value error.
PiperOrigin-RevId: 209103265
Diffstat (limited to 'tensorflow/contrib/quantize')
-rw-r--r--tensorflow/contrib/quantize/BUILD2
-rw-r--r--tensorflow/contrib/quantize/python/quantize_graph.py53
-rw-r--r--tensorflow/contrib/quantize/python/quantize_graph_test.py15
3 files changed, 69 insertions, 1 deletions
diff --git a/tensorflow/contrib/quantize/BUILD b/tensorflow/contrib/quantize/BUILD
index 23363617ed..499fec4ffa 100644
--- a/tensorflow/contrib/quantize/BUILD
+++ b/tensorflow/contrib/quantize/BUILD
@@ -244,7 +244,9 @@ py_test(
"//tensorflow/python:framework_ops",
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:init_ops",
+ "//tensorflow/python:math_ops",
"//tensorflow/python:nn_ops",
"//tensorflow/python:platform_test",
+ "//tensorflow/python:training",
],
)
diff --git a/tensorflow/contrib/quantize/python/quantize_graph.py b/tensorflow/contrib/quantize/python/quantize_graph.py
index 2944f964c7..484493f1b2 100644
--- a/tensorflow/contrib/quantize/python/quantize_graph.py
+++ b/tensorflow/contrib/quantize/python/quantize_graph.py
@@ -59,6 +59,10 @@ def _create_graph(input_graph=None,
if input_graph is None:
input_graph = ops.get_default_graph()
+
+ # Add check to see if graph has training ops, if so provide error message and
+ # exit
+ _check_for_training_ops(input_graph)
with input_graph.as_default():
fold_batch_norms.FoldBatchNorms(
input_graph,
@@ -78,6 +82,9 @@ def create_training_graph(input_graph=None, quant_delay=0):
Variables added by the rewrite get added to the global variables collection.
+ This function must be invoked prior to insertion of gradient ops in a graph
+ as quantization should be modeled in both forward and backward passes.
+
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
@@ -104,7 +111,6 @@ def create_training_graph(input_graph=None, quant_delay=0):
# Currently the values below are hardcoded for mobilenetV1 on imagenet
# Please use the experimental API if you need to tune these values.
freeze_bn_delay = None
-
_create_graph(
input_graph=input_graph,
is_training=True,
@@ -141,6 +147,9 @@ def experimental_create_training_graph(input_graph=None,
scope=None):
"""Rewrites a training input_graph in place for simulated quantization.
+ This function must be invoked prior to insertion of gradient ops in a graph
+ as quantization should be modeled in both forward and backward passes.
+
Variables added by the rewrite get added to the global variables collection.
This function has additional experimental options not (yet) available to
@@ -226,3 +235,45 @@ def experimental_create_eval_graph(input_graph=None,
activation_bits=activation_bits,
quant_delay=quant_delay,
scope=scope)
+
+
+def _check_for_training_ops(g):
+ """Check if training ops are present in the graph.
+
+ Args:
+ g: The tf.Graph on which the check for training ops needs to be
+ performed.
+
+ Raises:
+ ValueError: If a training op is seen in the graph;
+ """
+
+ # The list here is obtained
+ # from https://www.tensorflow.org/api_docs/cc/group/training-ops
+ training_ops = frozenset([
+ 'ApplyAdagrad', 'ApplyAdagradDA', 'ApplyAdam', 'ApplyAddSign',
+ 'ApplyCenteredRMSProp', 'ApplyFtrl', 'ApplyFtrlV2',
+ 'ApplyGradientDescent', 'ApplyMomentum', 'ApplyPowerSign',
+ 'ApplyProximalAdagrad', 'ApplyProximalGradientDescent', 'ApplyRMSProp',
+ 'ResourceApplyAdadelta', 'ResourceApplyAdagrad', 'ResourceApplyAdagradDA',
+ 'ResourceApplyAdam', 'ResourceApplyAddSign',
+ 'ResourceApplyCenteredRMSProp', 'ResourceApplyFtrl',
+ 'ResourceApplyFtrlV2', 'ResourceApplyGradientDescent',
+ 'ResourceApplyMomentum', 'ResourceApplyPowerSign',
+ 'ResourceApplyProximalAdagrad', 'ResourceApplyProximalGradientDescent',
+ 'ResourceApplyRMSProp', 'ResourceSparseApplyAdadelta',
+ 'ResourceSparseApplyAdagrad', 'ResourceSparseApplyAdagradDA',
+ 'ResourceSparseApplyCenteredRMSProp', 'ResourceSparseApplyFtrl',
+ 'ResourceSparseApplyFtrlV2', 'ResourceSparseApplyMomentum',
+ 'ResourceSparseApplyProximalAdagrad',
+ 'ResourceSparseApplyProximalGradientDescent',
+ 'ResourceSparseApplyRMSProp', 'SparseApplyAdadelta', 'SparseApplyAdagrad',
+ 'SparseApplyAdagradDA', 'SparseApplyCenteredRMSProp', 'SparseApplyFtrl',
+ 'SparseApplyFtrlV2', 'SparseApplyMomentum', 'SparseApplyProximalAdagrad',
+ 'SparseApplyProximalGradientDescent', 'SparseApplyRMSProp'
+ ])
+
+ op_types = set([op.type for op in g.get_operations()])
+ train_op_list = op_types.intersection(training_ops)
+ if train_op_list:
+ raise ValueError('Training op found in graph, exiting %s' % train_op_list)
diff --git a/tensorflow/contrib/quantize/python/quantize_graph_test.py b/tensorflow/contrib/quantize/python/quantize_graph_test.py
index 54faf582f1..e80d2183a6 100644
--- a/tensorflow/contrib/quantize/python/quantize_graph_test.py
+++ b/tensorflow/contrib/quantize/python/quantize_graph_test.py
@@ -20,10 +20,12 @@ from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
+from tensorflow.python import training
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
@@ -145,6 +147,19 @@ class QuantizeGraphTest(test_util.TensorFlowTestCase):
self.assertTrue(('int64_val: %i' % quant_delay) in const_value)
self.assertTrue(quant_delay_found)
+ def testTrainingOpsCheck(self):
+ self._RunTestOverTrainingRewrites(self._TestTrainingOpsCheck)
+
+ def _TestTrainingOpsCheck(self, rewrite_fn):
+ with ops.Graph().as_default():
+ output = self._ConvLayer()
+ output_scalar = math_ops.reduce_sum(output)
+ loss = math_ops.square(output_scalar - 1)
+ opt = training.gradient_descent.GradientDescentOptimizer(0.0001)
+ opt.minimize(loss)
+ with self.assertRaisesRegexp(ValueError, 'Training op found in graph'):
+ rewrite_fn()
+
def testWeightBits(self):
self._RunTestOverExperimentalRewrites(self._TestWeightBits)