aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools/graph_transforms
diff options
context:
space:
mode:
authorGravatar Patrick Nguyen <drpng@google.com>2018-08-09 18:39:19 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-09 18:48:39 -0700
commit367903193f44b16e79981f2f29f4f2ceea8b6117 (patch)
tree14f93747f624e7ca7edbad8d96afb67db6b81596 /tensorflow/tools/graph_transforms
parentc36b51458563e9f72c0d37ddf84206f639e13f72 (diff)
Automated rollback of commit ee38f86972b13f3eb90032e93b305e822152bf62
PiperOrigin-RevId: 208146417
Diffstat (limited to 'tensorflow/tools/graph_transforms')
-rw-r--r--tensorflow/tools/graph_transforms/fold_batch_norms.cc20
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms.cc67
2 files changed, 43 insertions, 44 deletions
diff --git a/tensorflow/tools/graph_transforms/fold_batch_norms.cc b/tensorflow/tools/graph_transforms/fold_batch_norms.cc
index 39f682e8b0..975b17380f 100644
--- a/tensorflow/tools/graph_transforms/fold_batch_norms.cc
+++ b/tensorflow/tools/graph_transforms/fold_batch_norms.cc
@@ -38,7 +38,7 @@ Status FoldBatchNorms(const GraphDef& input_graph_def,
input_graph_def, // clang-format off
{"Mul", // mul_node
{
- {"Conv2D|MatMul|DepthwiseConv2dNative", // conv_node
+ {"Conv2D|MatMul", // conv_node
{
{"*"}, // input_node
{"Const"}, // weights_node
@@ -73,10 +73,7 @@ Status FoldBatchNorms(const GraphDef& input_graph_def,
// Make sure all the inputs really are vectors, with as many entries as
// there are columns in the weights.
- const int weights_cols_index =
- conv_node.op() == "Conv2D"
- ? 3
- : (conv_node.op() == "DepthwiseConv2dNative" ? 2 : 1);
+ const int weights_cols_index = conv_node.op() == "Conv2D" ? 3 : 1;
const int64 weights_cols = weights.shape().dim_size(weights_cols_index);
if ((mul_values.shape().dims() != 1) ||
(mul_values.shape().dim_size(0) != weights_cols)) {
@@ -86,13 +83,14 @@ Status FoldBatchNorms(const GraphDef& input_graph_def,
}
// Multiply the original weights by the scale vector.
- auto weights_vector = weights.flat<float>();
+ auto weights_matrix = weights.flat_inner_dims<float>();
Tensor scaled_weights(DT_FLOAT, weights.shape());
- auto scaled_weights_vector = scaled_weights.flat<float>();
- for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
- scaled_weights_vector(row) =
- weights_vector(row) *
- mul_values.flat<float>()(row % weights_cols);
+ auto scaled_weights_matrix = scaled_weights.flat_inner_dims<float>();
+ for (int64 row = 0; row < weights_matrix.dimension(0); ++row) {
+ for (int64 col = 0; col < weights_cols; ++col) {
+ scaled_weights_matrix(row, col) =
+ weights_matrix(row, col) * mul_values.flat<float>()(col);
+ }
}
// Construct the new nodes.
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
index a35d64b789..156636ab82 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
@@ -110,23 +110,24 @@ Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
const string& conv_output_name,
std::vector<NodeDef>* new_nodes) {
const NodeDef& conv_node = conv_node_match.node;
- // CHECK_EQ("Conv2D", conv_node.op());
+ CHECK_EQ("Conv2D", conv_node.op());
const NodeDef& input_node = conv_node_match.inputs[0].node;
const NodeDef& weights_node = conv_node_match.inputs[1].node;
CHECK_EQ("Const", weights_node.op());
Tensor weights = GetNodeTensorAttr(weights_node, "value");
- const int weights_cols_idx = conv_node.op() == "Conv2D" ? 3 : 2;
- const int64 weights_cols = weights.shape().dim_size(weights_cols_idx);
+ const int64 weights_cols = weights.shape().dim_size(3);
CHECK_EQ(weights_cols, scale_values.size());
// Multiply the original weights by the scale vector.
- auto weights_vector = weights.flat<float>();
+ auto weights_matrix = weights.flat_inner_dims<float>();
Tensor scaled_weights(DT_FLOAT, weights.shape());
- auto scaled_weights_vector = scaled_weights.flat<float>();
- for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
- scaled_weights_vector(row) =
- weights_vector(row) * scale_values[row % weights_cols];
+ auto scaled_weights_matrix = scaled_weights.flat_inner_dims<float>();
+ for (int64 row = 0; row < weights_matrix.dimension(0); ++row) {
+ for (int64 col = 0; col < weights_cols; ++col) {
+ scaled_weights_matrix(row, col) =
+ weights_matrix(row, col) * scale_values[col];
+ }
}
// Figure out the remaining bias to add on.
Tensor bias_offset(DT_FLOAT, {weights_cols});
@@ -292,7 +293,7 @@ Status FoldOldBatchNorms(const GraphDef& input_graph_def,
current_graph_def, // clang-format off
{"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
{
- {"Conv2D|DepthwiseConv2dNative", // conv_node
+ {"Conv2D", // conv_node
{
{"*"}, // input_node
{"Const"}, // weights_node
@@ -321,24 +322,24 @@ Status FoldOldBatchNorms(const GraphDef& input_graph_def,
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, // clang-format off
- {"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
+ {"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
{
- {"BatchToSpaceND", // batch_to_space_node
+ {"BatchToSpaceND", // batch_to_space_node
{
- {"Conv2D|DepthwiseConv2dNative", // conv_node
+ {"Conv2D", // conv_node
{
- {"*"}, // input_node
- {"Const"}, // weights_node
+ {"*"}, // input_node
+ {"Const"}, // weights_node
}
},
- {"Const"}, // block_shape
- {"Const"}, // crops
+ {"Const"}, // block_shape
+ {"Const"}, // crops
}
},
- {"Const"}, // mean_node
- {"Const"}, // variance_node
- {"Const"}, // beta_node
- {"Const"}, // gamma_node
+ {"Const"}, // mean_node
+ {"Const"}, // variance_node
+ {"Const"}, // beta_node
+ {"Const"}, // gamma_node
}
}, // clang-format on
[&did_graph_change](const NodeMatch& match,
@@ -359,29 +360,29 @@ Status FoldOldBatchNorms(const GraphDef& input_graph_def,
// Replace BatchNorm with concat as input.
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, // clang-format off
- {"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
+ {"BatchNormWithGlobalNormalization|FusedBatchNorm", // batch_norm_node
{
- {"ConcatV2|Concat", // concat two conv2d.
+ {"ConcatV2|Concat", // concat two conv2d.
{
- {"Conv2D|DepthwiseConv2dNative", // conv_node
+ {"Conv2D", // conv_node
{
- {"*"}, // input_node
- {"Const"}, // weights_node
+ {"*"}, // input_node
+ {"Const"}, // weights_node
}
},
- {"Conv2D|DepthwiseConv2dNative", // conv_node
+ {"Conv2D", // conv_node
{
- {"*"}, // input_node
- {"Const"}, // weights_node
+ {"*"}, // input_node
+ {"Const"}, // weights_node
}
},
- {"Const"}, // axis
+ {"Const"}, // axis
},
},
- {"Const"}, // mean_node
- {"Const"}, // variance_node
- {"Const"}, // beta_node
- {"Const"}, // gamma_node
+ {"Const"}, // mean_node
+ {"Const"}, // variance_node
+ {"Const"}, // beta_node
+ {"Const"}, // gamma_node
}
}, // clang-format on
[&did_graph_change](const NodeMatch& match,