aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-27 14:00:52 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-27 14:03:31 -0700
commit3795ec30b2833e895efa7dfe176b345d4462d235 (patch)
tree8fff9eda18daaf2bbebb73c2488de6950daed5f1 /tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
parent0916acf68121a3f1130593995b8e8ddab7a2af1d (diff)
Have EnsureBiasVector create bias vectors that already have the
constant value 0 and already have their shape set from the output activations shape; instead of having it create dummy placeholders and relying on PropagateFixedSizes to create the constant array. Rationale: It wasn't PropagateFixedSizes's job to create constant arrays, and that broke down in a case where the bias vectors not being constant prevented FuseBinaryIntoPrecedingAffine from running. PiperOrigin-RevId: 202365030
Diffstat (limited to 'tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc')
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc50
1 files changed, 0 insertions, 50 deletions
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index c9c9f13d2e..cee14b257f 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -120,49 +120,7 @@ void ComputeBinaryOperatorOutputSize(const Shape& input_shape_x,
CHECK(output_array->has_shape());
}
-int GetOutputDepthFromWeights(const Model& model, const Operator& op) {
- const string& weights_name = op.inputs[1];
- const auto& weights_shape = model.GetArray(weights_name).shape();
- if (op.type == OperatorType::kConv ||
- op.type == OperatorType::kFullyConnected) {
- return weights_shape.dims(0);
- } else if (op.type == OperatorType::kDepthwiseConv) {
- return weights_shape.dims(3);
- } else {
- LOG(FATAL) << "Unhandled operator type";
- }
-}
-
-bool EnsureBiasVectorShape(Model* model, Operator* op) {
- const string& weights_name = op->inputs[1];
- const auto& weights_array = model->GetArray(weights_name);
- // Yield until weights shape has been resolved.
- if (!weights_array.has_shape()) {
- return false;
- }
-
- if (op->inputs.size() < 3) {
- return false;
- }
- auto& bias_array = model->GetArray(op->inputs[2]);
- if (bias_array.has_shape()) {
- return true;
- }
-
- const int output_depth = GetOutputDepthFromWeights(*model, *op);
- bias_array.copy_shape(Shape({output_depth}));
-
- auto& float_buffer = bias_array.GetMutableBuffer<ArrayDataType::kFloat>();
- float_buffer.data.resize(output_depth, 0);
-
- return true;
-}
-
void ProcessConvOperator(Model* model, ConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -292,10 +250,6 @@ void ProcessTransposeConvOperator(Model* model, TransposeConvOperator* op) {
}
void ProcessDepthwiseConvOperator(Model* model, DepthwiseConvOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {
@@ -410,10 +364,6 @@ void ProcessOpWithShapeInput(Model* model, Operator* op) {
}
void ProcessFullyConnectedOperator(Model* model, FullyConnectedOperator* op) {
- if (!EnsureBiasVectorShape(model, op)) {
- return;
- }
-
const auto& input_array = model->GetArray(op->inputs[0]);
// Yield until input dims have been resolved.
if (!input_array.has_shape()) {