aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-26 06:49:27 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-26 06:54:19 -0700
commitc5feedabcebe67ea6c72402832ef9fd25c560446 (patch)
tree21c33fdd3bb8e68a29b8c6b887bec17b7502d24c /tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
parent544436bbdf279dd4be68ad71536ea0488258aa07 (diff)
Have EnsureBiasVector create bias vectors that already have the
constant value 0 and already have their shape set from the output activations shape; instead of having it create dummy placeholders and relying on PropagateFixedSizes to create the constant array. Rationale: It wasn't PropagateFixedSizes's job to create constant arrays, and that broke down in a case where the bias vectors not being constant prevented FuseBinaryIntoPrecedingAffine from running. PiperOrigin-RevId: 202120850
Diffstat (limited to 'tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc')
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc37
1 files changed, 0 insertions, 37 deletions
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index c61da203c6..01a51802d4 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -133,36 +133,7 @@ int GetOutputDepthFromWeights(const Model& model, const Operator& op) {
}
}
-bool EnsureBiasVectorShape(Model* model, Operator* op) {
- const string& weights_name = op->inputs[1];
- const auto& weights_array = model->GetArray(weights_name);
- // Yield until weights shape has been resolved.
- if (!weights_array.has_shape()) {
- return false;
- }
-
- if (op->inputs.size() < 3) {
- return false;
- }
- auto& bias_array = model->GetArray(op->inputs[2]);
- if (bias_array.has_shape()) {
- return true;
- }
-
- const int output_depth = GetOutputDepthFromWeights(*model, *op);
- bias_array.copy_shape(Shape({output_depth}));
-
- auto& float_buffer = bias_array.GetMutableBuffer<ArrayDataType::kFloat>();
- float_buffer.data.resize(output_depth, 0);
-
- return true;
-}
-
void ProcessConvOperator(Model* model, ConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -292,10 +263,6 @@ void ProcessTransposeConvOperator(Model* model, TransposeConvOperator* op) {
}
void ProcessDepthwiseConvOperator(Model* model, DepthwiseConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -410,10 +377,6 @@ void ProcessOpWithShapeInput(Model* model, Operator* op) {
}
void ProcessFullyConnectedOperator(Model* model, FullyConnectedOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {