aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h73
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h61
-rw-r--r--tensorflow/contrib/lite/toco/export_tensorflow.cc36
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc5
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc116
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc86
-rw-r--r--tensorflow/contrib/lite/toco/model.h16
7 files changed, 363 insertions, 30 deletions
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
index 3866f86d38..f1937228f6 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
@@ -768,6 +768,7 @@ inline void DilatedConv(const float* input_data, const Dims<4>& input_dims,
float output_activation_max, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
+ gemmlowp::ScopedProfilingLabel label("DilatedConv");
// This is a copy of the reference Conv implementation. We do not currently
// have an optimized path for dilation.
(void)im2col_data; // only used in optimized code.
@@ -4725,6 +4726,78 @@ void Transpose(const T* input, const Dims<4>& input_dims, T* output,
}
}
+inline void TransposeConv(const float* input_data, const Dims<4>& input_dims,
+ const float* filter_data, const Dims<4>& filter_dims,
+ int stride_width, int stride_height, int pad_width,
+ int pad_height, float* output_data,
+ const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("TransposeConv");
+ // THIS FUNCTION IS A COPY FROM reference_ops.h.
+ // To optimize, start by using the conv code with transposed weights for the
+ // case of stride_height = stride_width = 1.
+ const int batches = MatchingArraySize(input_dims, 3, output_dims, 3);
+ const int input_depth = MatchingArraySize(input_dims, 0, filter_dims, 3);
+ const int output_depth = MatchingArraySize(filter_dims, 0, output_dims, 0);
+ const int input_height = ArraySize(input_dims, 2);
+ const int input_width = ArraySize(input_dims, 1);
+ const int filter_height = ArraySize(filter_dims, 2);
+ const int filter_width = ArraySize(filter_dims, 1);
+ const int output_height = ArraySize(output_dims, 2);
+ const int output_width = ArraySize(output_dims, 1);
+
+ // Although transpose convolution simplifies to convolution with transposed
+ // weights for strides of 1, non-unitary striding complicates matters. To
+ // keep this reference implementation as clear as possible, we use a "scatter"
+ // access pattern, where we loop through all the input elements, computing
+ // their influence on the output, rather than looping through the output
+ // elements in the typical "gather" access pattern of a conv. We therefore
+ // must initialize the output array to zero.
+ for (int batch = 0; batch < batches; ++batch) {
+ for (int out_y = 0; out_y < output_height; ++out_y) {
+ for (int out_x = 0; out_x < output_width; ++out_x) {
+ for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
+ output_data[Offset(output_dims, out_channel, out_x, out_y, batch)] =
+ 0.0f;
+ }
+ }
+ }
+ }
+
+ // Loop through input elements one at a time.
+ for (int batch = 0; batch < batches; ++batch) {
+ for (int in_y = 0; in_y < input_height; ++in_y) {
+ for (int in_x = 0; in_x < input_width; ++in_x) {
+ for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
+ // Loop through the output elements it will influence
+ const int out_x_origin = (in_x * stride_width) - pad_width;
+ const int out_y_origin = (in_y * stride_height) - pad_height;
+ for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
+ for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
+ for (int out_channel = 0; out_channel < input_depth;
+ ++out_channel) {
+ // Compute output element location
+ const int out_x = out_x_origin + filter_x;
+ const int out_y = out_y_origin + filter_y;
+ // We cannot accumulate out of bounds
+ if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
+ (out_y < output_height)) {
+ float input_value = input_data[Offset(input_dims, in_channel,
+ in_x, in_y, batch)];
+ float filter_value =
+ filter_data[Offset(filter_dims, out_channel, filter_x,
+ filter_y, in_channel)];
+ output_data[Offset(output_dims, out_channel, out_x, out_y,
+ batch)] += input_value * filter_value;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
} // namespace optimized_ops
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 53de21697b..84f6cf6e4f 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -3084,6 +3084,67 @@ void Transpose(const T* input, const Dims<4>& input_dims, T* output,
}
}
+inline void TransposeConv(const float* input_data, const Dims<4>& input_dims,
+ const float* filter_data, const Dims<4>& filter_dims,
+ int stride_width, int stride_height, int pad_width,
+ int pad_height, float* output_data,
+ const Dims<4>& output_dims) {
+ const int batches = MatchingArraySize(input_dims, 3, output_dims, 3);
+ const int input_depth = MatchingArraySize(input_dims, 0, filter_dims, 3);
+ const int output_depth = MatchingArraySize(filter_dims, 0, output_dims, 0);
+ const int input_height = ArraySize(input_dims, 2);
+ const int input_width = ArraySize(input_dims, 1);
+ const int filter_height = ArraySize(filter_dims, 2);
+ const int filter_width = ArraySize(filter_dims, 1);
+ const int output_height = ArraySize(output_dims, 2);
+ const int output_width = ArraySize(output_dims, 1);
+
+ // Although transpose convolution simplifies to convolution with transposed
+ // weights for strides of 1, non-unitary striding complicates matters. To
+ // keep this reference implementation as clear as possible, we use a "scatter"
+ // access pattern, where we loop through all the input elements, computing
+ // their influence on the output, rather than looping through the output
+ // elements in the typical "gather" access pattern of a conv. We therefore
+ // must initialize the output array to zero.
+ for (int i = 0; i < RequiredBufferSizeForDims(output_dims); i++) {
+ output_data[i] = 0.0f;
+ }
+
+ // Loop through input elements one at a time.
+ for (int batch = 0; batch < batches; ++batch) {
+ for (int in_y = 0; in_y < input_height; ++in_y) {
+ for (int in_x = 0; in_x < input_width; ++in_x) {
+ for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
+ // Loop through the output elements it will influence
+ const int out_x_origin = (in_x * stride_width) - pad_width;
+ const int out_y_origin = (in_y * stride_height) - pad_height;
+ for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
+ for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
+ for (int out_channel = 0; out_channel < output_depth;
+ ++out_channel) {
+ // Compute output element location
+ const int out_x = out_x_origin + filter_x;
+ const int out_y = out_y_origin + filter_y;
+ // We cannot accumulate out of bounds
+ if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
+ (out_y < output_height)) {
+ float input_value = input_data[Offset(input_dims, in_channel,
+ in_x, in_y, batch)];
+ float filter_value =
+ filter_data[Offset(filter_dims, out_channel, filter_x,
+ filter_y, in_channel)];
+ output_data[Offset(output_dims, out_channel, out_x, out_y,
+ batch)] += input_value * filter_value;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
} // namespace reference_ops
} // namespace tflite
diff --git a/tensorflow/contrib/lite/toco/export_tensorflow.cc b/tensorflow/contrib/lite/toco/export_tensorflow.cc
index 6900468ec6..695def7ba3 100644
--- a/tensorflow/contrib/lite/toco/export_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/export_tensorflow.cc
@@ -548,6 +548,38 @@ void ConvertDepthwiseConvOperator(const Model& model,
}
}
+void ConvertTransposeConvOperator(const Model& model,
+ const TransposeConvOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ auto* conv2d_op = tensorflow_graph->add_node();
+ conv2d_op->set_op("Conv2DBackpropInput");
+ conv2d_op->set_name(src_op.outputs[0]);
+ *conv2d_op->add_input() = src_op.inputs[0];
+ *conv2d_op->add_input() = src_op.inputs[1];
+ *conv2d_op->add_input() = src_op.inputs[2];
+ (*conv2d_op->mutable_attr())["T"].set_type(DT_FLOAT);
+ const string& weights_array_name = WalkUpToConstantArray(
+ model, src_op.inputs[TransposeConvOperator::WEIGHTS]);
+ const auto& weights_array = model.GetArray(weights_array_name);
+ CHECK(weights_array.buffer->type == ArrayDataType::kFloat);
+ ConvertFloatTensorConst(model, weights_array_name, AxesOrder::kOHWI,
+ AxesOrder::kHWIO, tensorflow_graph);
+ auto& strides = (*conv2d_op->mutable_attr())["strides"];
+ strides.mutable_list()->add_i(1);
+ strides.mutable_list()->add_i(src_op.stride_height);
+ strides.mutable_list()->add_i(src_op.stride_width);
+ strides.mutable_list()->add_i(1);
+ string padding;
+ if (src_op.padding.type == PaddingType::kSame) {
+ padding = "SAME";
+ } else if (src_op.padding.type == PaddingType::kValid) {
+ padding = "VALID";
+ } else {
+ LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
+ }
+ (*conv2d_op->mutable_attr())["padding"].set_s(padding);
+}
+
void ConvertDepthToSpaceOperator(const Model& model,
const DepthToSpaceOperator& src_op,
GraphDef* tensorflow_graph) {
@@ -1859,6 +1891,10 @@ void ConvertOperator(const Model& model, const Operator& src_op,
ConvertExpandDimsOperator(model,
static_cast<const ExpandDimsOperator&>(src_op),
tensorflow_graph);
+ } else if (src_op.type == OperatorType::kTransposeConv) {
+ ConvertTransposeConvOperator(
+ model, static_cast<const TransposeConvOperator&>(src_op),
+ tensorflow_graph);
} else {
LOG(FATAL) << "Unhandled operator type " << OperatorTypeName(src_op.type);
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
index bde947f78d..778da39bf1 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
@@ -71,6 +71,11 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
CHECK_GE(op->inputs.size(), 2);
const ArrayDataType data_type = model->GetArray(op->inputs[1]).data_type;
SetDataTypeForAllOutputs(model, op, data_type);
+ } else if (op->type == OperatorType::kTransposeConv) {
+ // These operators produce an output with the same type as their 3rd input
+ CHECK_GE(op->inputs.size(), 3);
+ const ArrayDataType data_type = model->GetArray(op->inputs[2]).data_type;
+ SetDataTypeForAllOutputs(model, op, data_type);
} else if (op->type == OperatorType::kCast) {
// Data type of the Cast op is specified.
CHECK_EQ(op->outputs.size(), 1);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index fc26f997a6..375848a7d4 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -190,6 +190,116 @@ void ProcessConvOperator(Model* model, ConvOperator* op) {
}
}
+void ProcessTransposeConvOperator(Model* model, TransposeConvOperator* op) {
+ // TransposeConv is unique in that it is specifically given the output shape
+ // as a 1D array on it's 1st input. Theoretically then, resolving the output
+ // shape is as easy as waiting for this input to be resolved. However, we also
+ // have to calculate the padding which requires the weights shape. So, we
+ // might as well calculate the output shape and ensure it matches the
+ // specified one
+
+ // Check if we have already run.
+ auto& output_array = model->GetArray(op->outputs[0]);
+ if (output_array.has_shape()) {
+ return;
+ }
+
+ // SPECIFIED OUTPUT SHAPE
+ // The below is the specified, or prescribed output shape, _given_ to the
+ // operator as an input.
+ auto& specified_output_shape_array =
+ model->GetArray(op->inputs[TransposeConvOperator::OUTPUT_SHAPE]);
+ if (!specified_output_shape_array.has_shape() ||
+ !specified_output_shape_array.buffer) {
+ // Yield until the specified output shape is resolved as a constant
+ return;
+ }
+
+ CHECK(specified_output_shape_array.data_type == ArrayDataType::kInt32)
+ << "TransposeConv input_dims must be int32";
+
+ CHECK(specified_output_shape_array.shape().dimensions_count() == 1 &&
+ specified_output_shape_array.shape().dims(0) == 4)
+ << "TransposeConv requires a 1D, 4 element array on it's 0th input "
+ "specifying the output shape. \""
+ << op->inputs[TransposeConvOperator::OUTPUT_SHAPE] << "\" had shape "
+ << toco::ShapeToString(specified_output_shape_array.shape());
+
+ // COMPUTE PADDING
+ // We require the weights shape to calculate padding.
+ const auto& weights_array =
+ model->GetArray(op->inputs[TransposeConvOperator::WEIGHTS]);
+ if (!weights_array.has_shape()) {
+ // Yield until weights dims have been resolved.
+ return;
+ }
+ const auto& weights_shape = weights_array.shape();
+ CHECK_EQ(weights_shape.dimensions_count(), 4)
+ << "TransposeConv weights must have 4 input dimensions. Input weights \""
+ << op->inputs[TransposeConvOperator::WEIGHTS] << "\" had shape "
+ << toco::ShapeToString(weights_shape) << ".";
+
+ CHECK(weights_shape.dims(0) == 1 && weights_shape.dims(3) == 1)
+ << "TransposeConv weights dimensions must begin and end with 1. Input "
+ "weights \""
+ << op->inputs[TransposeConvOperator::WEIGHTS] << "\" had shape "
+ << toco::ShapeToString(weights_shape) << ".";
+
+ // Compute padding
+ const int kheight = weights_shape.dims(1);
+ const int kwidth = weights_shape.dims(2);
+ op->padding.GetOrCreateFixedPadding();
+ if (op->padding.type == PaddingType::kValid) {
+ op->padding.fixed->height = 0;
+ op->padding.fixed->width = 0;
+ } else if (op->padding.type == PaddingType::kSame) {
+ op->padding.fixed->height = (kheight - 1) / 2;
+ op->padding.fixed->width = (kwidth - 1) / 2;
+ } else {
+ LOG(FATAL) << "TransposeConv only supports SAME or VALID padding";
+ }
+
+ // VALIDATE OUTPUT SHAPE
+ // Compute the output shape from the input and weights shapes to verify it
+ // agrees with the specified output shape.
+ const auto& input_array =
+ model->GetArray(op->inputs[TransposeConvOperator::DATA_INPUT]);
+ if (!input_array.has_shape()) {
+ // Yield until input dims have been resolved.
+ return;
+ }
+ const auto& input_shape = input_array.shape();
+ CHECK_EQ(input_shape.dimensions_count(), 4)
+ << "TransposeConv input shape must have 4 dimensions. Input \""
+ << op->inputs[TransposeConvOperator::WEIGHTS] << "\" had shape "
+ << toco::ShapeToString(weights_shape) << ".";
+
+ // Compute output shape
+ const int input_width = input_shape.dims(2);
+ const int input_height = input_shape.dims(1);
+ int output_height = op->stride_height * (input_height - 1);
+ int output_width = op->stride_width * (input_width - 1);
+ if (op->padding.type == PaddingType::kValid) {
+ output_height += kheight;
+ output_width += kwidth;
+ } else if (op->padding.type == PaddingType::kSame) {
+ output_height += 1;
+ output_width += 1;
+ }
+
+ CHECK(specified_output_shape_array.GetBuffer<ArrayDataType::kInt32>().data ==
+ std::vector<int32>({input_shape.dims(0), output_height, output_width,
+ weights_shape.dims(3)}))
+ << "Specified output shape: " << ShapeToString(output_array.shape())
+ << ", does not agree with shape computed from input data and weights: ["
+ << input_shape.dims(0) << ", " << output_height << ", " << output_width
+ << ", " << weights_shape.dims(3) << "].";
+
+ // SUCCESS: Set the op's output shape according to the specified output shape.
+ *(output_array.mutable_shape()->mutable_dims()) =
+ specified_output_shape_array.GetBuffer<ArrayDataType::kInt32>().data;
+}
+
void ProcessDepthwiseConvOperator(Model* model, DepthwiseConvOperator* op) {
if (!EnsureBiasVectorShape(model, op)) {
return;
@@ -1300,7 +1410,7 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) {
std::vector<int32> const& perm =
perm_array.GetBuffer<ArrayDataType::kInt32>().data;
CHECK_EQ(perm.size(), input_shape.dimensions_count())
- << "Transpose permutation input " << op->inputs[0]
+ << "Transpose permutation input " << op->inputs[1]
<< " must be same length as input dimensions";
std::vector<int>* output_dims = output_array.mutable_shape()->mutable_dims();
for (int i = 0; i < perm.size(); i++) {
@@ -1402,8 +1512,8 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
ProcessConvOperator(model, static_cast<ConvOperator*>(op));
break;
case OperatorType::kTransposeConv:
- // Unimplemented, hopefully another graph transformation will drop it or
- // rewrite it.
+ ProcessTransposeConvOperator(model,
+ static_cast<TransposeConvOperator*>(op));
break;
case OperatorType::kDepthwiseConv:
ProcessDepthwiseConvOperator(model,
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index 41abca864d..50aeafdf8d 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -351,6 +351,18 @@ void CheckInputsCount(const NodeDef& node,
<< " input(s) other than control dependencies: " << node.DebugString();
}
+template <ArrayDataType T>
+string CreateConstArray(Model* model, string const& name,
+ std::vector<typename toco::DataType<T> > const& data) {
+ // Utility function to create a const 1D array, useful for input parameters.
+ string array_name = toco::AvailableArrayName(*model, name);
+ auto& array = model->GetOrCreateArray(array_name);
+ array.data_type = T;
+ array.mutable_shape()->mutable_dims()->emplace_back(data.size());
+ array.GetMutableBuffer<T>().data = data;
+ return array_name;
+}
+
void ConvertConstOperator(const NodeDef& node,
const TensorFlowImportFlags& tf_import_flags,
Model* model) {
@@ -1436,12 +1448,8 @@ void ConvertFusedBatchNormOperator(const NodeDef& node,
const string& moving_variance_input = node.input(4);
// Create an array holding the epsilon value (typically, 0.001).
- const string epsilon_array_name = node.name() + "_epsilon_array";
- auto& epsilon_array = model->GetOrCreateArray(epsilon_array_name);
- epsilon_array.data_type = ArrayDataType::kFloat;
- *epsilon_array.mutable_shape()->mutable_dims() = {1};
- epsilon_array.GetMutableBuffer<ArrayDataType::kFloat>().data.push_back(
- GetFloatAttr(node, "epsilon"));
+ const string epsilon_array_name = CreateConstArray<ArrayDataType::kFloat>(
+ model, node.name() + "_epsilon_array", {GetFloatAttr(node, "epsilon")});
// Add epsilon to the moving variance.
const string epsilon_add_op_name = node.name() + "_epsilon";
@@ -1569,16 +1577,56 @@ void ConvertTransposeConvOperator(const NodeDef& node,
CHECK_EQ(node.op(), "Conv2DBackpropInput");
CheckInputsCount(node, tf_import_flags, 3);
auto* op = new TransposeConvOperator;
- op->inputs.push_back(node.input(2));
- op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(0));
+ op->inputs.push_back(node.input(1));
+ op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
- CHECK_EQ(strides.i_size(), 4);
- CHECK_EQ(strides.i(0), 1);
op->stride_height = strides.i(1);
op->stride_width = strides.i(2);
- CHECK_EQ(strides.i(3), 1);
+ CHECK_EQ(strides.i_size(), 4)
+ << "Can only import TransposeConv ops with 4D strides. TensorFlow op \""
+ << node.name() << "\" has " << strides.i_size() << "D strides.";
+ CHECK((strides.i(0) == 1) && (strides.i(3) == 1))
+ << "Can only import TransposeConv ops with striding along the height "
+ "(1st) or width (2nd) axis. TensorFlow op \""
+ << node.name() << "\" had strides:[ " << strides.i(0) << ", "
+ << strides.i(1) << ", " << strides.i(2) << ", " << strides.i(3) << "].";
+ op->stride_height = strides.i(1);
+ op->stride_width = strides.i(2);
+ if (HasAttr(node, "dilations")) {
+ const auto& dilations = GetListAttr(node, "dilations");
+ CHECK_EQ(dilations.i_size(), 4)
+ << "Dilation unsupported in TransposeConv. TensorFlow op \""
+ << node.name() << "\" had dilations";
+ CHECK((dilations.i(0) == 1) && (dilations.i(1) == 1) &&
+ (dilations.i(1) == 1) && (dilations.i(3) == 1))
+ << "Dilation unsupported in TransposeConv. TensorFlow op \""
+ << node.name() << "\" had dilations:[ " << dilations.i(0) << ", "
+ << dilations.i(1) << ", " << dilations.i(2) << ", " << dilations.i(3)
+ << "].";
+ }
+
+ const string& weights_name = node.input(TransposeConvOperator::WEIGHTS);
+ const string& transposed_weights_name = weights_name + "_transposed";
+ // Check if a TransposeOperator was already created for these weights
+ // (can happen when multiple layers share the same weights).
+ const Operator* existing_transpose =
+ GetOpWithOutput(*model, transposed_weights_name);
+ if (existing_transpose) {
+ CHECK(existing_transpose->type == OperatorType::kTranspose);
+ } else {
+ // Transpose weights from HWIO order to OHWI order, which is more efficient
+ // for computation
+ TransposeOperator* transpose = new TransposeOperator;
+ string perm_array = CreateConstArray<ArrayDataType::kInt32>(
+ model, node.name() + "_transpose_perm", {3, 0, 1, 2});
+ transpose->inputs = {weights_name, perm_array};
+ transpose->outputs = {transposed_weights_name};
+ model->operators.emplace_back(transpose);
+ }
+ op->inputs[1] = transposed_weights_name;
+
auto const& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
op->padding.type = PaddingType::kSame;
@@ -1874,19 +1922,9 @@ void ConvertTopKV2Operator(const NodeDef& node,
op->inputs.push_back(node.input(0));
// K can be encoded as attr (TopK) convert it to a const.
if (HasAttr(node, "k")) {
- // Convert attribute into const tensor.
- const string array_name = node.name() + "k";
- auto& array = model->GetOrCreateArray(array_name);
- array.data_type = ArrayDataType::kInt32;
- // Size of array is always 1.
- array.mutable_shape()->mutable_dims()->emplace_back(1);
-
- auto& output_int_data =
- array.GetMutableBuffer<ArrayDataType::kInt32>().data;
- output_int_data.resize(1);
- output_int_data[0] = GetIntAttr(node, "k");
- op->inputs.push_back(array_name);
-
+ string k_array = CreateConstArray<ArrayDataType::kInt32>(
+ model, node.name() + "k", {GetIntAttr(node, "k")});
+ op->inputs.push_back(k_array);
} else {
CheckInputsCount(node, tf_import_flags, 2);
op->inputs.push_back(node.input(1));
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index ed0dedc003..cd3eb06602 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -846,19 +846,29 @@ struct SqueezeOperator : Operator {
};
// Inputs:
-// inputs[0]: required: the input activations array
-// inputs[1]: required: the Conv weights
-// channel.
+// inputs[0]: required: the output shape
+// inputs[1]: required: the weights
+// inputs[2]: required: the input activations array
+// NOTE: The input activations is NOT the first input.
+//
//
// Outputs:
// outputs[0]: required: the output activations array
//
// TensorFlow equivalent: Conv2DBackpropInput
struct TransposeConvOperator : Operator {
+ enum Inputs {
+ OUTPUT_SHAPE = 0,
+ WEIGHTS = 1,
+ DATA_INPUT = 2,
+ };
+
TransposeConvOperator() : Operator(OperatorType::kTransposeConv) {}
Padding padding;
int stride_width = 0;
int stride_height = 0;
+ // Dilation is possible with transpose convolution, but Tensorflow does not
+ // currently support it, so we omit it.
};
// Given a tensor input, this operation calculates element-wise exponential