diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2016-09-09 09:21:29 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2016-09-09 10:31:16 -0700 |
commit | 29faf36ed2341fe50a02a1ad1329b96157d96d55 (patch) | |
tree | 10a8162d5bea048421a98cf4a33c963a0be11aa7 /tensorflow/contrib/quantization | |
parent | b7541f67c21b5a12120af0b7ac33404bd5160643 (diff) |
Switch ops in functional_ops, math_ops, state_ops, string_ops,
contrib/quantization, and contrib/layers to use C++ shape inference functions.
Implement Betainc C++ shape inference function; it's a little different from
the python one, mainly because propagating an unknown input (a_shape in this
case) when all are unknown is not correct for C++ (in case we later backwards
bind the value), since it could end up being a broadcasted scalar.
Change ReduceJoin's C++ shape inference function to match python.
Fix SymbolicGradient's shape function in C++ - there are actually more inputs
than outputs.
Changes QuantizedBiasAdd to be more precise (the C++ shape fn invokes the common bias_add to compute the shape of the first output).
Change: 132688147
Diffstat (limited to 'tensorflow/contrib/quantization')
-rw-r--r-- | tensorflow/contrib/quantization/python/math_ops.py | 12 | ||||
-rw-r--r-- | tensorflow/contrib/quantization/python/nn_ops.py | 56 |
2 files changed, 5 insertions, 63 deletions
diff --git a/tensorflow/contrib/quantization/python/math_ops.py b/tensorflow/contrib/quantization/python/math_ops.py index 43c1409358..d4fabbd36b 100644 --- a/tensorflow/contrib/quantization/python/math_ops.py +++ b/tensorflow/contrib/quantization/python/math_ops.py @@ -23,16 +23,6 @@ from tensorflow.contrib.quantization.ops import gen_math_ops from tensorflow.contrib.quantization.ops.gen_math_ops import * from tensorflow.python.framework import common_shapes from tensorflow.python.framework import ops -from tensorflow.python.framework import tensor_shape -# QuantizedMatMul* ops. -@ops.RegisterShape("QuantizedMatMul") -def _QuantizedMatMulShape(op): - unused_a_min = op.inputs[2].get_shape().merge_with(tensor_shape.scalar()) - unused_a_max = op.inputs[3].get_shape().merge_with(tensor_shape.scalar()) - unused_b_min = op.inputs[4].get_shape().merge_with(tensor_shape.scalar()) - unused_b_max = op.inputs[5].get_shape().merge_with(tensor_shape.scalar()) - result = common_shapes.matmul_shape(op) - result.extend([tensor_shape.scalar(), tensor_shape.scalar()]) - return result +ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn) diff --git a/tensorflow/contrib/quantization/python/nn_ops.py b/tensorflow/contrib/quantization/python/nn_ops.py index 122d93fd23..d31f1d4e68 100644 --- a/tensorflow/contrib/quantization/python/nn_ops.py +++ b/tensorflow/contrib/quantization/python/nn_ops.py @@ -23,60 +23,12 @@ from tensorflow.contrib.quantization.ops import gen_nn_ops from tensorflow.contrib.quantization.ops.gen_nn_ops import * from tensorflow.python.framework import common_shapes from tensorflow.python.framework import ops -from tensorflow.python.framework import tensor_shape - - -# QuantizedAvgPool* ops. -@ops.RegisterShape("QuantizedAvgPool") -def _QuantizedAvgPoolShape(op): - return [common_shapes.avg_pool_shape(op)[0], tensor_shape.scalar(), - tensor_shape.scalar()] - - -# QuantizedBiasAdd op. -@ops.RegisterShape("QuantizedBiasAdd") -def _QuantizedBiasAddShape(op): - """Returns the same shape as the input, plus min and max scalar values. - - Args: - op: Input operation. - Returns: - Shape of ops first input, plus min and max tensors. - """ - unused_input_min = op.inputs[2].get_shape().merge_with(tensor_shape.scalar()) - unused_input_max = op.inputs[3].get_shape().merge_with(tensor_shape.scalar()) - unused_bias_min = op.inputs[4].get_shape().merge_with(tensor_shape.scalar()) - unused_bias_max = op.inputs[5].get_shape().merge_with(tensor_shape.scalar()) - return [op.inputs[0].get_shape(), tensor_shape.scalar(), - tensor_shape.scalar()] - - -# QuantizedConv2D* ops. -@ops.RegisterShape("QuantizedConv2D") -def _QuantizedConv2DShape(op): - """Returns the same shape as Conv2D, plus min and max scalar values. - - Args: - op: Input operation. - Returns: - Shape of float Conv2D, plus min and max tensors. - """ - unused_input_min = op.inputs[2].get_shape().merge_with(tensor_shape.scalar()) - unused_input_max = op.inputs[3].get_shape().merge_with(tensor_shape.scalar()) - unused_filter_min = op.inputs[4].get_shape().merge_with(tensor_shape.scalar()) - unused_filter_max = op.inputs[5].get_shape().merge_with(tensor_shape.scalar()) - result = common_shapes.conv2d_shape(op) - result.extend([tensor_shape.scalar(), tensor_shape.scalar()]) - return result - - -# QuantizedMaxPool* ops. -@ops.RegisterShape("QuantizedMaxPool") -def _QuantizedMaxPoolShape(op): - return [common_shapes.max_pool_shape(op)[0], tensor_shape.scalar(), - tensor_shape.scalar()] +ops.RegisterShape("QuantizedAvgPool")(common_shapes.call_cpp_shape_fn) +ops.RegisterShape("QuantizedBiasAdd")(common_shapes.call_cpp_shape_fn) +ops.RegisterShape("QuantizedConv2D")(common_shapes.call_cpp_shape_fn) +ops.RegisterShape("QuantizedMaxPool")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("QuantizedRelu")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("QuantizedRelu6")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("QuantizedReluX")(common_shapes.call_cpp_shape_fn) |