aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-20 15:00:44 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-20 15:06:02 -0700
commitcbbffe5f646c940723247d595d33e2e87a3c3b27 (patch)
tree2672e5db1ceea946530748bc9414554b3016e4ed /tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
parenteacbaabf6d0983d61c99e1bb17658cd80a24f1ee (diff)
Fix operator names.
PiperOrigin-RevId: 201422566
Diffstat (limited to 'tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc')
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc60
1 files changed, 30 insertions, 30 deletions
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index beda187f13..c61da203c6 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -572,11 +572,11 @@ void ProcessAddNOperator(Model* model, Operator* op) {
bool KeepDims(const Operator& op) {
switch (op.type) {
- case OperatorType::kTensorFlowMin:
+ case OperatorType::kMin: // Reduction Min
return static_cast<const TensorFlowMinOperator&>(op).keep_dims;
- case OperatorType::kTensorFlowMax:
+ case OperatorType::kMax: // Reduction Max
return static_cast<const TensorFlowMaxOperator&>(op).keep_dims;
- case OperatorType::kTensorFlowSum:
+ case OperatorType::kSum:
return static_cast<const TensorFlowSumOperator&>(op).keep_dims;
case OperatorType::kMean:
return static_cast<const MeanOperator&>(op).keep_dims;
@@ -1577,14 +1577,14 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kLogistic:
case OperatorType::kTanh:
case OperatorType::kLocalResponseNormalization:
- case OperatorType::kTensorFlowIdentity:
+ case OperatorType::kIdentity:
case OperatorType::kFakeQuant:
case OperatorType::kNeg:
- case OperatorType::kTensorFlowRsqrt:
- case OperatorType::kTensorFlowSqrt:
- case OperatorType::kTensorFlowSquare:
- case OperatorType::kTensorFlowAll:
- case OperatorType::kTensorFlowAssert:
+ case OperatorType::kRsqrt:
+ case OperatorType::kSqrt:
+ case OperatorType::kSquare:
+ case OperatorType::kAll:
+ case OperatorType::kAssert:
case OperatorType::kCast:
case OperatorType::kFloor:
case OperatorType::kExp:
@@ -1603,14 +1603,14 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kDiv:
case OperatorType::kFloorDiv:
case OperatorType::kFloorMod:
- case OperatorType::kTensorFlowLess:
- case OperatorType::kTensorFlowLessEqual:
- case OperatorType::kTensorFlowGreater:
- case OperatorType::kTensorFlowMaximum:
- case OperatorType::kTensorFlowMinimum:
- case OperatorType::kTensorFlowGreaterEqual:
- case OperatorType::kTensorFlowEqual:
- case OperatorType::kTensorFlowNotEqual:
+ case OperatorType::kLess:
+ case OperatorType::kLessEqual:
+ case OperatorType::kGreater:
+ case OperatorType::kMaximum: // Element-wise Maximum
+ case OperatorType::kMinimum: // Element-wise Minimum
+ case OperatorType::kGreaterEqual:
+ case OperatorType::kEqual:
+ case OperatorType::kNotEqual:
ProcessSimpleBinaryOperator(model, op);
break;
case OperatorType::kAddN:
@@ -1643,7 +1643,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessFullyConnectedOperator(model,
static_cast<FullyConnectedOperator*>(op));
break;
- case OperatorType::kTensorFlowReshape:
+ case OperatorType::kReshape:
ProcessTensorFlowReshapeOperator(
model, static_cast<TensorFlowReshapeOperator*>(op));
break;
@@ -1656,9 +1656,9 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kL2Pool:
ProcessL2PoolOperator(model, static_cast<L2PoolOperator*>(op));
break;
- case OperatorType::kTensorFlowMin:
- case OperatorType::kTensorFlowMax:
- case OperatorType::kTensorFlowSum:
+ case OperatorType::kMin: // Reduction Min
+ case OperatorType::kMax: // Reduction Max
+ case OperatorType::kSum:
case OperatorType::kMean:
ProcessTensorFlowReductionOperator(model, op);
break;
@@ -1669,26 +1669,26 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessSliceOperator(model, static_cast<SliceOperator*>(op));
break;
- case OperatorType::kTensorFlowSwitch:
+ case OperatorType::kSwitch:
// We can't know the sizes of the outputs until we have resolved the
// predicate, and once we have resolved the predicate, the whole
// Switch node will get resolved away.
// See ResolveTensorFlowSwitch.
break;
- case OperatorType::kTensorFlowMerge:
+ case OperatorType::kMerge:
// No need to bother resolving TensorFlow Merge ops: other graph
// transformations will remove them anyway.
// See ResolveTensorFlowMerge.
break;
- case OperatorType::kTensorFlowSplit:
+ case OperatorType::kSplit:
ProcessTensorFlowSplitOperator(model,
static_cast<TensorFlowSplitOperator*>(op));
break;
case OperatorType::kSqueeze:
ProcessSqueezeOperator(model, static_cast<SqueezeOperator*>(op));
break;
- case OperatorType::kTensorFlowConcat:
- case OperatorType::kTensorFlowConcatV2:
+ case OperatorType::kConcat:
+ case OperatorType::kConcatV2:
// Unimplemented, hopefully another graph transformation will
// drop it or rewrite it. Concretely, either ResolveTensorFlowConcat
// will resolve this node to a DepthConcatenation, or else we have
@@ -1704,7 +1704,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kRank:
ProcessRankOperator(model, static_cast<RankOperator*>(op));
break;
- case OperatorType::kTensorFlowShape:
+ case OperatorType::kShape:
ProcessShapeOperator(model, static_cast<TensorFlowShapeOperator*>(op));
break;
case OperatorType::kStack:
@@ -1725,7 +1725,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessLstmCellOperator(model, static_cast<LstmCellOperator*>(op));
break;
case OperatorType::kBatchMatMul:
- case OperatorType::kTensorFlowMatMul:
+ case OperatorType::kMatMul:
// MatMul operators are converted to FullyConnected, after which their
// shapes are propagated.
break;
@@ -1750,7 +1750,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kArgMax:
ProcessArgMaxOperator(model, static_cast<ArgMaxOperator*>(op));
break;
- case OperatorType::kTensorFlowUnsupported:
+ case OperatorType::kUnsupported:
break;
case OperatorType::kSvdf:
ProcessSvdfOperator(model, static_cast<SvdfOperator*>(op));
@@ -1772,7 +1772,7 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessSparseToDenseOperator(model,
static_cast<SparseToDenseOperator*>(op));
break;
- case OperatorType::kTensorFlowTile:
+ case OperatorType::kTile:
ProcessTileOperator(model, static_cast<TensorFlowTileOperator*>(op));
break;
default: