aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-17 12:04:04 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-17 12:07:13 -0700
commit9b12cb84292d23522c1c3f75700d97d9f9af8abd (patch)
tree2806fabac9dc13e3d29f496daf15e01f1a6ab22e /tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
parent9d126b26ff219d9258a78832ead0bb272b898bf0 (diff)
toco thinks Stack is the same as Pack.
Stack is not Pack. tf.stack() yields Pack ops. Pack ops stack tensors. Stack ops manipulate the TF runtime stack. This cl unaliases "Stack" and "Pack" ops in toco, and renames most things that refer to "Stack" ops to "Pack" to be consistent across the codebase. In summary: Stack is whack. 'Stack' should be 'Pack'. Hack 'Stack's into 'Pack's like a maniac. This keeps 'Stack's from wracking runtime graphs. (We apologize for the fault in the change description. Those responsible have been...er...sacked). PiperOrigin-RevId: 204951155
Diffstat (limited to 'tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc')
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc26
1 files changed, 13 insertions, 13 deletions
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index f422e3a9c7..5e2ba0eca7 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -1192,7 +1192,7 @@ void ProcessShapeOperator(Model* model, TensorFlowShapeOperator* op) {
output_shape->ReplaceDims({input_array.shape().dimensions_count()});
}
-void ProcessStackOperator(Model* model, StackOperator* op) {
+void ProcessPackOperator(Model* model, PackOperator* op) {
CHECK_GE(op->inputs.size(), 1);
CHECK_EQ(op->outputs.size(), 1);
auto& output_array = model->GetArray(op->outputs[0]);
@@ -1201,7 +1201,7 @@ void ProcessStackOperator(Model* model, StackOperator* op) {
return;
}
- std::unique_ptr<Shape> stacked_shape;
+ std::unique_ptr<Shape> packed_shape;
for (const auto& input : op->inputs) {
const auto& input_array = model->GetArray(input);
if (!input_array.has_shape()) {
@@ -1210,23 +1210,23 @@ void ProcessStackOperator(Model* model, StackOperator* op) {
}
Shape shape = input_array.shape();
- if (!stacked_shape) {
- stacked_shape.reset(new Shape(shape));
+ if (!packed_shape) {
+ packed_shape.reset(new Shape(shape));
} else {
- CHECK(*stacked_shape == shape) << "All input arrays to Stack operators "
- "must have the same shape. Input \""
- << input << "\" is different.";
+ CHECK(*packed_shape == shape) << "All input arrays to Pack operators "
+ "must have the same shape. Input \""
+ << input << "\" is different.";
}
}
int axis = op->axis;
if (axis < 0) {
// Handle negative axis
- axis += stacked_shape->dims().size() + 1;
+ axis += packed_shape->dims().size() + 1;
}
- stacked_shape->mutable_dims()->insert(
- stacked_shape->mutable_dims()->begin() + axis, op->inputs.size());
- output_array.copy_shape(*stacked_shape);
+ packed_shape->mutable_dims()->insert(
+ packed_shape->mutable_dims()->begin() + axis, op->inputs.size());
+ output_array.copy_shape(*packed_shape);
}
void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
@@ -1659,8 +1659,8 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kShape:
ProcessShapeOperator(model, static_cast<TensorFlowShapeOperator*>(op));
break;
- case OperatorType::kStack:
- ProcessStackOperator(model, static_cast<StackOperator*>(op));
+ case OperatorType::kPack:
+ ProcessPackOperator(model, static_cast<PackOperator*>(op));
break;
case OperatorType::kReorderAxes:
ProcessReorderAxesOperator(model, static_cast<ReorderAxesOperator*>(op));