aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/lite')
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h5
-rw-r--r--tensorflow/contrib/lite/kernels/pad.cc102
-rw-r--r--tensorflow/contrib/lite/kernels/pad_test.cc109
-rw-r--r--tensorflow/contrib/lite/kernels/test_util.cc36
-rw-r--r--tensorflow/contrib/lite/kernels/test_util.h7
-rw-r--r--tensorflow/contrib/lite/model.cc19
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs2
-rw-r--r--tensorflow/contrib/lite/schema/schema_generated.h62
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py27
-rw-r--r--tensorflow/contrib/lite/testing/generated_examples_zip_test.cc59
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc10
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc10
12 files changed, 237 insertions, 211 deletions
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index ab07c58c92..8269de8e3f 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -172,11 +172,6 @@ typedef struct {
} TfLiteResizeBilinearParams;
typedef struct {
- // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
- // For now we will fix the maximum possible number of dimensions.
- int before_padding[8];
- int after_padding[8];
- int num_dimensions;
} TfLitePadParams;
typedef struct {
diff --git a/tensorflow/contrib/lite/kernels/pad.cc b/tensorflow/contrib/lite/kernels/pad.cc
index 1a0d9d1505..569bf0fe8f 100644
--- a/tensorflow/contrib/lite/kernels/pad.cc
+++ b/tensorflow/contrib/lite/kernels/pad.cc
@@ -33,65 +33,93 @@ enum KernelType {
kGenericOptimized,
};
-// TODO(nupurgarg): Padding represented as a tensor is ignored. Only use the
-// `left_padding` and `right_padding` specified in `params`.
struct PadContext {
PadContext(TfLiteContext* context, TfLiteNode* node) {
- params = reinterpret_cast<TfLitePadParams*>(node->builtin_data);
input = GetInput(context, node, 0);
+ paddings = GetInput(context, node, 1);
output = GetOutput(context, node, 0);
+ dims = NumDimensions(input);
}
- TfLitePadParams* params;
TfLiteTensor* input;
+ TfLiteTensor* paddings;
TfLiteTensor* output;
+ int dims;
};
+// Resizes output array based on the input size and padding size. This function
+// is callable from both Prepare() and Eval() as long as the caller ensures the
+// paddings data is present.
+TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
+ PadContext* op_context) {
+ // TODO(nupurgarg): Our current implementations rely on the inputs being 4D.
+ TF_LITE_ENSURE_EQ(context, op_context->dims, 4);
+
+ // Ensures the paddings array is dims x 2.
+ TF_LITE_ENSURE_EQ(context, SizeOfDimension(op_context->paddings, 0),
+ op_context->dims);
+ TF_LITE_ENSURE_EQ(context, SizeOfDimension(op_context->paddings, 1), 2);
+
+ // Determines the size of the output tensor.
+ const TfLiteIntArray* input_size = op_context->input->dims;
+ TfLiteIntArray* output_size = TfLiteIntArrayCreate(op_context->dims);
+ const int32* paddings_data = GetTensorData<int32>(op_context->paddings);
+
+ for (int idx = 0; idx < op_context->dims; ++idx) {
+ int before_padding = *paddings_data++;
+ int after_padding = *paddings_data++;
+
+ TF_LITE_ENSURE_MSG(context, (before_padding >= 0 && after_padding >= 0),
+ "Pad value has to be greater than equal to 0.");
+
+ output_size->data[idx] =
+ (input_size->data[idx] + before_padding + after_padding);
+ }
+
+ return context->ResizeTensor(context, op_context->output, output_size);
+}
+
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
- // Determines size of output tensor.
PadContext op_context(context, node);
- int dims = NumDimensions(op_context.input);
- TF_LITE_ENSURE_EQ(context, dims, op_context.params->num_dimensions);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
- // TODO(nupurgarg): Our current implementations rely on the inputs being 4D.
- TF_LITE_ENSURE_EQ(context, dims, 4);
-
- const TfLiteIntArray* input_size = op_context.input->dims;
- TfLiteIntArray* output_size = TfLiteIntArrayCreate(dims);
- for (int idx = 0; idx < dims; ++idx) {
- TF_LITE_ENSURE_MSG(context,
- (op_context.params->before_padding[idx] >= 0 &&
- op_context.params->after_padding[idx] >= 0),
- "Pad value has to be greater than equal to 0.");
- output_size->data[idx] =
- (input_size->data[idx] + op_context.params->before_padding[idx] +
- op_context.params->after_padding[idx]);
+ // TODO(nupurgarg): Create wrapper functions for dynamic tensor logic.
+ // Exit early if paddings is a non-const tensor. Set output tensor to
+ // dynamic so output size can be determined in Eval.
+ if (op_context.paddings->allocation_type != kTfLiteMmapRo) {
+ op_context.output->allocation_type = kTfLiteDynamic;
+ return kTfLiteOk;
}
-
- return context->ResizeTensor(context, op_context.output, output_size);
+ return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
PadContext op_context(context, node);
- std::vector<int> before_padding(
- op_context.params->before_padding,
- op_context.params->before_padding + op_context.params->num_dimensions);
- std::vector<int> after_padding(
- op_context.params->after_padding,
- op_context.params->after_padding + op_context.params->num_dimensions);
-
- // TODO(nupurgarg): Change TOCO's implementation to use padding arrays
- // in forward order (depth, width, height, batch).
- // Converts from int[] = {depth, width, height, batch} to int[] = {batch,
- // height, width, depth} to match TOCO's implementation of pad in
- // referenced_ops.h and optimized_ops.h.
- std::reverse(before_padding.begin(), before_padding.end());
- std::reverse(after_padding.begin(), after_padding.end());
+ // Resize the output tensor if the output tensor is dynamic.
+ if (op_context.output->allocation_type == kTfLiteDynamic) {
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ TfLiteTensorRealloc(op_context.output->bytes, op_context.output);
+ }
+
+ // TODO(nupurgarg): Change kernel implementation to take in int* instead of
+ // vector<int> to remove malloc from Eval().
+ // Create before and after padding arrays that are accepted by the kernel.
+ std::vector<int> before_padding;
+ std::vector<int> after_padding;
+ const int32* paddings_data = GetTensorData<int32>(op_context.paddings);
+
+ // TODO(nupurgarg): Change kernel implementation to use padding arrays in
+ // forward order (depth, width, height, batch).
+ // Build paddings in order of int[] = {batch, height, width, depth} to match
+ // kernel implementation of Pad in referenced_ops.h and optimized_ops.h.
+ for (int idx = op_context.dims - 1; idx >= 0; --idx) {
+ before_padding.push_back(paddings_data[idx * 2]);
+ after_padding.push_back(paddings_data[idx * 2 + 1]);
+ }
#define TF_LITE_PAD(type, scalar) \
type::Pad(GetTensorData<scalar>(op_context.input), \
diff --git a/tensorflow/contrib/lite/kernels/pad_test.cc b/tensorflow/contrib/lite/kernels/pad_test.cc
index f3ea9417df..28834ad071 100644
--- a/tensorflow/contrib/lite/kernels/pad_test.cc
+++ b/tensorflow/contrib/lite/kernels/pad_test.cc
@@ -25,52 +25,87 @@ using ::testing::ElementsAreArray;
class PadOpModel : public SingleOpModel {
public:
- PadOpModel(std::initializer_list<int> input_shape,
- std::initializer_list<int> before_padding,
- std::initializer_list<int> after_padding) {
- input_ = AddInput(TensorType_FLOAT32);
- output_ = AddOutput(TensorType_FLOAT32);
- SetBuiltinOp(
- BuiltinOperator_PAD, BuiltinOptions_PadOptions,
- CreatePadOptions(builder_, builder_.CreateVector<int>(before_padding),
- builder_.CreateVector<int>(after_padding))
- .Union());
- BuildInterpreter({input_shape});
- }
-
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
+ void SetPaddings(std::initializer_list<int> paddings) {
+ PopulateTensor<int>(paddings_, paddings);
+ }
+
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
- private:
+ protected:
int input_;
int output_;
+ int paddings_;
+};
+
+// Tests case where paddings is a const tensor.
+//
+// Example usage is as follows:
+// PadOpDynamicModel m(input_shape, paddings_shape, paddings_data);
+// m.SetInput(input_data);
+// m.Invoke();
+class PadOpConstModel : public PadOpModel {
+ public:
+ PadOpConstModel(std::initializer_list<int> input_shape,
+ std::initializer_list<int> paddings_shape,
+ std::initializer_list<int> paddings) {
+ input_ = AddInput(TensorType_FLOAT32);
+ paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape);
+ output_ = AddOutput(TensorType_FLOAT32);
+
+ SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
+ CreatePadOptions(builder_).Union());
+ BuildInterpreter({input_shape});
+ }
+};
+
+// Test case where paddings is a non-const tensor.
+//
+// Example usage is as follows:
+// PadOpDynamicModel m(input_shape, paddings_shape);
+// m.SetInput(input_data);
+// m.SetPaddings(paddings_data);
+// m.Invoke();
+class PadOpDynamicModel : public PadOpModel {
+ public:
+ PadOpDynamicModel(std::initializer_list<int> input_shape,
+ std::initializer_list<int> paddings_shape) {
+ input_ = AddInput(TensorType_FLOAT32);
+ paddings_ = AddInput(TensorType_INT32);
+ output_ = AddOutput(TensorType_FLOAT32);
+
+ SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
+ CreatePadOptions(builder_).Union());
+ BuildInterpreter({input_shape, paddings_shape});
+ }
};
TEST(PadOpTest, TooManyDimensions) {
EXPECT_DEATH(
- PadOpModel({1, 2, 3, 4, 5, 6, 7, 8, 9}, {1, 2, 3, 4, 5, 6, 7, 8, 9},
- {1, 2, 3, 4, 5, 6, 7, 8, 9}),
+ PadOpConstModel({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9, 2},
+ {1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9}),
"dims != 4");
}
-// TODO(nupurgarg): Test case where before padding and after padding arrays
-// don't contain the same number of dimensions.
TEST(PadOpTest, UnequalDimensions) {
- EXPECT_DEATH(PadOpModel({1, 1, 2, 1}, {1, 2, 3}, {1, 2, 3}),
- "dims != op_context.params->num_dimensions");
+ EXPECT_DEATH(PadOpConstModel({1, 1, 2, 1}, {3, 2}, {1, 1, 2, 2, 3, 3}),
+ "3 != 4");
}
TEST(PadOpTest, InvalidPadValue) {
- EXPECT_DEATH(PadOpModel({1, 1, 2, 1}, {0, 1, 2, 0}, {0, -1, -1, 0}),
- "Pad value has to be greater than equal to 0.");
+ EXPECT_DEATH(
+ PadOpConstModel({1, 1, 2, 1}, {4, 2}, {0, 0, 1, -1, 2, -1, 0, 0}),
+ "Pad value has to be greater than equal to 0.");
}
-TEST(PadOpTest, SimpleTest) {
- PadOpModel m({1, 2, 2, 1}, {0, 1, 1, 0}, {0, 1, 1, 0});
+TEST(PadOpTest, SimpleConstTest) {
+ // Padding is represented as four 2-D lists representing above padding and
+ // below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}).
+ PadOpConstModel m({1, 2, 2, 1}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0});
m.SetInput({1, 2, 3, 4});
m.Invoke();
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
@@ -78,10 +113,30 @@ TEST(PadOpTest, SimpleTest) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
-TEST(PadOpTest, AdvancedTest) {
- // The padding is input in the order of batch, height, width, depth.
- PadOpModel m({1, 2, 3, 1}, {0, 0, 1, 0}, {0, 2, 3, 0});
+TEST(PadOpTest, SimpleDynamicTest) {
+ PadOpDynamicModel m({1, 2, 2, 1}, {4, 2});
+ m.SetInput({1, 2, 3, 4});
+ m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
+ 0, 0, 0, 0, 0}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
+}
+
+TEST(PadOpTest, AdvancedConstTest) {
+ PadOpConstModel m({1, 2, 3, 1}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0});
+ m.SetInput({1, 2, 3, 4, 5, 6});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
+}
+
+TEST(PadOpTest, AdvancedDynamicTest) {
+ PadOpDynamicModel m({1, 2, 3, 1}, {4, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
+ m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
m.Invoke();
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
diff --git a/tensorflow/contrib/lite/kernels/test_util.cc b/tensorflow/contrib/lite/kernels/test_util.cc
index b69f2b3e4b..3a58e7ec32 100644
--- a/tensorflow/contrib/lite/kernels/test_util.cc
+++ b/tensorflow/contrib/lite/kernels/test_util.cc
@@ -49,7 +49,7 @@ std::vector<Matcher<float>> ArrayFloatNear(const std::vector<float>& values,
return matchers;
}
-int SingleOpModel::AddTensor(TensorData t) {
+int SingleOpModel::AddTensor(TensorData t, std::initializer_list<int> data) {
int id = tensors_.size();
// This is slightly different depending on whether we are adding a
@@ -78,8 +78,23 @@ int SingleOpModel::AddTensor(TensorData t) {
builder_.CreateVector<int64_t>({t.zero_point}));
}
- tensors_.push_back(CreateTensor(builder_, builder_.CreateVector<int>({}),
- t.type, /*buffer=*/0,
+ int buffer_id = 0;
+ if (data.size()) {
+ // Initialize buffers list with empty buffer to allow for non-const tensors.
+ if (buffers_.empty()) {
+ buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector({})));
+ }
+
+ // Add data as a Buffer to buffers list.
+ buffer_id = buffers_.size();
+ auto data_buffer =
+ builder_.CreateVector(reinterpret_cast<const uint8_t*>(data.begin()),
+ sizeof(int) * data.size());
+ buffers_.push_back(CreateBuffer(builder_, data_buffer));
+ }
+
+ tensors_.push_back(CreateTensor(builder_, builder_.CreateVector<int>(t.shape),
+ t.type, /*buffer=*/buffer_id,
/*name=*/0, q_params));
tensor_data_[id] = t;
@@ -88,7 +103,15 @@ int SingleOpModel::AddTensor(TensorData t) {
}
int SingleOpModel::AddInput(const TensorData& t) {
- int id = AddTensor(t);
+ int id = AddTensor(t, {});
+ inputs_.push_back(id);
+ return id;
+}
+
+int SingleOpModel::AddConstInput(TensorType type,
+ std::initializer_list<int> data,
+ std::initializer_list<int> shape) {
+ int id = AddTensor(TensorData{type, shape}, data);
inputs_.push_back(id);
return id;
}
@@ -100,7 +123,7 @@ int SingleOpModel::AddNullInput() {
}
int SingleOpModel::AddOutput(const TensorData& t) {
- int id = AddTensor(t);
+ int id = AddTensor(t, {});
outputs_.push_back(id);
return id;
}
@@ -142,8 +165,7 @@ void SingleOpModel::BuildInterpreter(
subgraphs.push_back(subgraph);
auto subgraphs_flatbuffer = builder_.CreateVector(subgraphs);
- std::vector<flatbuffers::Offset<Buffer>> buffers_vec;
- auto buffers = builder_.CreateVector(buffers_vec);
+ auto buffers = builder_.CreateVector(buffers_);
auto description = builder_.CreateString("programmatic model");
builder_.Finish(CreateModel(builder_, TFLITE_SCHEMA_VERSION, opcodes,
subgraphs_flatbuffer, description, buffers));
diff --git a/tensorflow/contrib/lite/kernels/test_util.h b/tensorflow/contrib/lite/kernels/test_util.h
index 531c1366a8..b9c0ba8f47 100644
--- a/tensorflow/contrib/lite/kernels/test_util.h
+++ b/tensorflow/contrib/lite/kernels/test_util.h
@@ -98,6 +98,10 @@ class SingleOpModel {
int AddInput(TensorType type) { return AddInput(TensorData{type}); }
int AddInput(const TensorData& t);
+ // Add a Tensor containing const data and return the tensor id.
+ int AddConstInput(TensorType type, std::initializer_list<int> data,
+ std::initializer_list<int> shape);
+
// Add a null input tensor (optional input) and return kOptionalTensor.
int AddNullInput();
@@ -181,7 +185,7 @@ class SingleOpModel {
std::unique_ptr<tflite::Interpreter> interpreter_;
private:
- int AddTensor(TensorData t);
+ int AddTensor(TensorData t, std::initializer_list<int> data);
std::map<int, TensorData> tensor_data_;
std::vector<int32_t> inputs_;
@@ -189,6 +193,7 @@ class SingleOpModel {
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<OperatorCode>> opcodes_;
std::vector<flatbuffers::Offset<Operator>> operators_;
+ std::vector<flatbuffers::Offset<Buffer>> buffers_;
std::map<string, std::function<TfLiteRegistration*()>> custom_registrations_;
};
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index a01b74f9da..95949be9e6 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -486,25 +486,6 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type,
break;
}
case BuiltinOperator_PAD: {
- auto* params = MallocPOD<TfLitePadParams>();
- if (auto* schema_params = op->builtin_options_as_PadOptions()) {
- auto* before_padding = schema_params->before_padding();
- FlatBufferIntVectorToArray(sizeof(params->before_padding),
- before_padding, params->before_padding,
- error_reporter);
-
- auto* after_padding = schema_params->after_padding();
- FlatBufferIntVectorToArray(sizeof(params->after_padding), after_padding,
- params->after_padding, error_reporter);
-
- if (before_padding->Length() != after_padding->Length()) {
- error_reporter->Report(
- "Before padding and after padding arrays need to contain the "
- "same number of dimensions.\n");
- }
- params->num_dimensions = after_padding->Length();
- }
- builtin_data = reinterpret_cast<void*>(params);
break;
}
case BuiltinOperator_RESHAPE: {
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index 2172135f49..ec202cd407 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -275,8 +275,6 @@ table CallOptions {
}
table PadOptions {
- before_padding:[int];
- after_padding:[int];
}
table ReshapeOptions {
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index b756891f66..c04a73a2bf 100644
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -2657,26 +2657,13 @@ flatbuffers::Offset<CallOptions> CreateCallOptions(
struct PadOptionsT : public flatbuffers::NativeTable {
typedef PadOptions TableType;
- std::vector<int32_t> before_padding;
- std::vector<int32_t> after_padding;
PadOptionsT() {}
};
struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef PadOptionsT NativeTableType;
- enum { VT_BEFORE_PADDING = 4, VT_AFTER_PADDING = 6 };
- const flatbuffers::Vector<int32_t> *before_padding() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BEFORE_PADDING);
- }
- const flatbuffers::Vector<int32_t> *after_padding() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_AFTER_PADDING);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_BEFORE_PADDING) &&
- verifier.Verify(before_padding()) &&
- VerifyOffset(verifier, VT_AFTER_PADDING) &&
- verifier.Verify(after_padding()) && verifier.EndTable();
+ return VerifyTableStart(verifier) && verifier.EndTable();
}
PadOptionsT *UnPack(
const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -2691,14 +2678,6 @@ struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
struct PadOptionsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
- void add_before_padding(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> before_padding) {
- fbb_.AddOffset(PadOptions::VT_BEFORE_PADDING, before_padding);
- }
- void add_after_padding(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> after_padding) {
- fbb_.AddOffset(PadOptions::VT_AFTER_PADDING, after_padding);
- }
explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -2712,24 +2691,11 @@ struct PadOptionsBuilder {
};
inline flatbuffers::Offset<PadOptions> CreatePadOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> before_padding = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> after_padding = 0) {
+ flatbuffers::FlatBufferBuilder &_fbb) {
PadOptionsBuilder builder_(_fbb);
- builder_.add_after_padding(after_padding);
- builder_.add_before_padding(before_padding);
return builder_.Finish();
}
-inline flatbuffers::Offset<PadOptions> CreatePadOptionsDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *before_padding = nullptr,
- const std::vector<int32_t> *after_padding = nullptr) {
- return tflite::CreatePadOptions(
- _fbb, before_padding ? _fbb.CreateVector<int32_t>(*before_padding) : 0,
- after_padding ? _fbb.CreateVector<int32_t>(*after_padding) : 0);
-}
-
flatbuffers::Offset<PadOptions> CreatePadOptions(
flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o,
const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -5572,24 +5538,6 @@ inline void PadOptions::UnPackTo(
PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
- {
- auto _e = before_padding();
- if (_e) {
- _o->before_padding.resize(_e->size());
- for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
- _o->before_padding[_i] = _e->Get(_i);
- }
- }
- };
- {
- auto _e = after_padding();
- if (_e) {
- _o->after_padding.resize(_e->size());
- for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
- _o->after_padding[_i] = _e->Get(_i);
- }
- }
- };
}
inline flatbuffers::Offset<PadOptions> PadOptions::Pack(
@@ -5609,11 +5557,7 @@ inline flatbuffers::Offset<PadOptions> CreatePadOptions(
const flatbuffers::rehasher_function_t *__rehasher;
} _va = {&_fbb, _o, _rehasher};
(void)_va;
- auto _before_padding =
- _o->before_padding.size() ? _fbb.CreateVector(_o->before_padding) : 0;
- auto _after_padding =
- _o->after_padding.size() ? _fbb.CreateVector(_o->after_padding) : 0;
- return tflite::CreatePadOptions(_fbb, _before_padding, _after_padding);
+ return tflite::CreatePadOptions(_fbb);
}
inline ReshapeOptionsT *ReshapeOptions::UnPack(
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index c225cd4f00..a639351657 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -1141,28 +1141,43 @@ def make_pad_tests(zip_path):
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
+ "constant_paddings": [True, False],
},
# Non-4D use case.
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2], [0, 1, 2]],
"paddings": [[[0, 1], [2, 3]]],
+ "constant_paddings": [True, False],
},
]
def build_graph(parameters):
+ """Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
- out = tf.pad(input_tensor, paddings=parameters["paddings"])
- return [input_tensor], [out]
+
+ # Get paddings as either a placeholder or constants.
+ if parameters["constant_paddings"]:
+ paddings = parameters["paddings"]
+ input_tensors = [input_tensor]
+ else:
+ shape = [len(parameters["paddings"]), 2]
+ paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
+ input_tensors = [input_tensor, paddings]
+
+ out = tf.pad(input_tensor, paddings=paddings)
+ return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
- input_values = create_tensor_data(parameters["dtype"],
- parameters["input_shape"])
- return [input_values], sess.run(
- outputs, feed_dict=dict(zip(inputs, [input_values])))
+ values = [
+ create_tensor_data(parameters["dtype"], parameters["input_shape"])
+ ]
+ if not parameters["constant_paddings"]:
+ values.append(np.array(parameters["paddings"]))
+ return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index 36aa09090b..41652a07d2 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -48,50 +48,51 @@ tensorflow::Env* env = tensorflow::Env::Default();
// TODO(ahentz): make sure we clean this list up frequently.
std::map<string, string> kBrokenTests = {
// Add doesn't support broadcasting.
- {R"(adda.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
- {R"(mula.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
- {R"(diva.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
- {R"(suba.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
+ {R"(^\/adda.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
+ {R"(^\/mula.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
+ {R"(^\/diva.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
+ {R"(^\/suba.*input_shape_1=\[1,3,4,3\],input_shape_2=\[3\])", "68500195"},
// Add only supports float32. (and "constant" tests use Add)
- {R"(adda.*int32)", "68808744"},
- {R"(constant.*int32)", "68808744"},
- {R"(mul.*int32)", "68808744"},
- {R"(div.*int32)", "68808744"},
- {R"(sub.*int32)", "68808744"},
+ {R"(^\/adda.*int32)", "68808744"},
+ {R"(^\/constant.*int32)", "68808744"},
+ {R"(^\/mul.*int32)", "68808744"},
+ {R"(^\/div.*int32)", "68808744"},
+ {R"(^\/sub.*int32)", "68808744"},
// Pad only supports 4D tensors.
- {R"(paddtype=.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
+ {R"(^\/pad.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
"70527055"},
// L2Norm only supports tensors with 4D or fewer.
- {R"(l2normdim=.*,epsilon=.*,input_shape=\[.,.,.,.,.*\])", "67963684"},
+ {R"(^\/l2normdim=.*,epsilon=.*,input_shape=\[.,.,.,.,.*\])", "67963684"},
// SpaceToBatch only supports 4D tensors.
- {R"(space_to_batch_nd.*input_shape=\[1,4,4,4,1,1\])", "70848787"},
+ {R"(^\/space_to_batch_nd.*input_shape=\[1,4,4,4,1,1\])", "70848787"},
// L2Norm only works for dim=-1.
- {R"(l2normdim=-2,epsilon=.*,input_shape=\[.,.\])", "67963812"},
- {R"(l2normdim=0,epsilon=.*,input_shape=\[.,.\])", "67963812"},
- {R"(l2normdim=-2,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
- {R"(l2normdim=-2,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
- {R"(l2normdim=2,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
- {R"(l2normdim=2,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
- {R"(l2normdim=0,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
- {R"(l2normdim=0,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
- {R"(l2normdim=1,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
- {R"(l2normdim=1,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
- {R"(l2normdim=\[2,3\],epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
- {R"(l2normdim=\[2,3\],epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
+ {R"(^\/l2normdim=-2,epsilon=.*,input_shape=\[.,.\])", "67963812"},
+ {R"(^\/l2normdim=0,epsilon=.*,input_shape=\[.,.\])", "67963812"},
+ {R"(^\/l2normdim=-2,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
+ {R"(^\/l2normdim=-2,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
+ {R"(^\/l2normdim=2,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
+ {R"(^\/l2normdim=2,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
+ {R"(^\/l2normdim=0,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
+ {R"(^\/l2normdim=0,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
+ {R"(^\/l2normdim=1,epsilon=.*,input_shape=\[3,15,14,3\])", "67963812"},
+ {R"(^\/l2normdim=1,epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
+ {R"(^\/l2normdim=\[2,3\],epsilon=.*,input_shape=\[3,15,14,3\])",
+ "67963812"},
+ {R"(^\/l2normdim=\[2,3\],epsilon=.*,input_shape=\[1,3,4,3\])", "67963812"},
// ResizeBilinear looks completely incompatible with Tensorflow
- {R"(resize_bilinear.*dtype=tf.int32)", "72401107"},
- {R"(resize_bilinearalign_corners=True,.*,size=\[2,2\])", "72401483"},
- {R"(resize_bilinearalign_corners=True,.*,size=\[4,3\])", "72401483"},
- {R"(resize_bilinearalign_corners=True,.*,size=\[5,6\])", "72401483"},
+ {R"(^\/resize_bilinear.*dtype=tf.int32)", "72401107"},
+ {R"(^\/resize_bilinearalign_corners=True,.*,size=\[2,2\])", "72401483"},
+ {R"(^\/resize_bilinearalign_corners=True,.*,size=\[4,3\])", "72401483"},
+ {R"(^\/resize_bilinearalign_corners=True,.*,size=\[5,6\])", "72401483"},
// Transpose only supports 1D-4D input tensors.
- {R"(transposedtype=.*,input_shape=\[.,.,.,.,.\],perm=.*)", "71545879"},
+ {R"(^\/transposedtype=.*,input_shape=\[.,.,.,.,.\],perm=.*)", "71545879"},
};
// Allows test data to be unzipped into a temporary directory and makes
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 0c2b570aad..d75d1fcc5b 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -474,19 +474,11 @@ class Pad : public BuiltinOperator<PadOperator, ::tflite::PadOptions,
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
- auto before_padding = builder->CreateVector(op.left_padding);
- auto after_padding = builder->CreateVector(op.right_padding);
- return ::tflite::CreatePadOptions(*builder, before_padding, after_padding);
+ return ::tflite::CreatePadOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
- op->left_padding.insert(op->left_padding.end(),
- options.before_padding()->begin(),
- options.before_padding()->end());
- op->right_padding.insert(op->right_padding.end(),
- options.after_padding()->begin(),
- options.after_padding()->end());
}
};
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index de79c70e1b..9036a16d1c 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -258,16 +258,6 @@ TEST_F(OperatorTest, BuiltinMaxPool) {
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
-TEST_F(OperatorTest, BuiltinPad) {
- PadOperator op;
- op.left_padding = {1, 2, 3};
- op.right_padding = {1, 2, 3};
- auto output_toco_op =
- SerializeAndDeserialize(GetOperator("PAD", OperatorType::kPad), op);
- EXPECT_EQ(op.left_padding, output_toco_op->left_padding);
- EXPECT_EQ(op.right_padding, output_toco_op->right_padding);
-}
-
TEST_F(OperatorTest, BuiltinReshape) {
TensorFlowReshapeOperator op;
op.shape = {1, 2, 4, 5, 8};