aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar Nupur Garg <nupurgarg@google.com>2018-01-29 15:06:30 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-29 15:10:40 -0800
commitaaaeef5bbc4698ea48c2476ad5c84a94712c8d2f (patch)
tree6a40ea0692cae1e71b4fa8189fbde711a3569273 /tensorflow/contrib
parenta807755db184c2d2c3b9bcca65457e9915508650 (diff)
Make TFLite SpaceToBatchND op have parity with TF SpaceToBatchND op.
PiperOrigin-RevId: 183734695
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h8
-rw-r--r--tensorflow/contrib/lite/kernels/space_to_batch_nd.cc95
-rw-r--r--tensorflow/contrib/lite/kernels/space_to_batch_nd_test.cc141
-rw-r--r--tensorflow/contrib/lite/model.cc17
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs3
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h89
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py42
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc15
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc13
9 files changed, 204 insertions, 219 deletions
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index 7a7e20a41e..a1037a525c 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -116,14 +116,6 @@ typedef struct {
} TfLiteAddParams;
typedef struct {
- // Number of spatial dimensions.
- // For now only NHWC is supported, and the value should always be 2.
- int num_spatial_dimensions;
- // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
- // For now we will fix the maximum possible number of dimensions.
- int block_shape[2];
- int before_paddings[2];
- int after_paddings[2];
} TfLiteSpaceToBatchNDParams;
typedef struct {
diff --git a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc
index 2e22d0db56..e2e1873f77 100644
--- a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc
+++ b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc
@@ -33,17 +33,16 @@ enum KernelType {
kGenericOptimized,
};
-// Inputs specified in the 2nd tensor (block_shape) and 3rd tensor (paddings)
-// are ignored. Only use the `block_shape` and `paddings` specified in params.
-// TODO(nupurgarg): Support inputs as tensors in SpaceToBatchND.
struct SpaceToBatchNDContext {
SpaceToBatchNDContext(TfLiteContext* context, TfLiteNode* node) {
- params = reinterpret_cast<TfLiteSpaceToBatchNDParams*>(node->builtin_data);
input = GetInput(context, node, 0);
+ block_shape = GetInput(context, node, 1);
+ paddings = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
- TfLiteSpaceToBatchNDParams* params;
TfLiteTensor* input;
+ TfLiteTensor* block_shape;
+ TfLiteTensor* paddings;
TfLiteTensor* output;
};
@@ -51,32 +50,29 @@ struct SpaceToBatchNDContext {
// The 4D array need to have exactly 2 spatial dimensions.
// TODO(nupurgarg): Support arbitrary dimension in SpaceToBatchND.
const int kInputDimensionNum = 4;
-const int kOutputDimensionNum = 4;
+const int kBlockSizeDimensionNum = 1;
const int kSpatialDimensionNum = 2;
-const int kPaddingDimensionNum = 4;
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- TF_LITE_ENSURE(context, NumInputs(node) >= 1 && NumInputs(node) <= 3);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
+ SpaceToBatchNDContext* op_context) {
+ TfLiteIntArray* input_size = op_context->input->dims;
+ const int32* block_shape = GetTensorData<int32>(op_context->block_shape);
+ const int32* paddings_data = GetTensorData<int32>(op_context->paddings);
- SpaceToBatchNDContext op_context(context, node);
- TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.input),
- kInputDimensionNum);
- TF_LITE_ENSURE_EQ(context, op_context.params->num_spatial_dimensions,
+ TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape),
+ kBlockSizeDimensionNum);
+ TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
+ kSpatialDimensionNum);
+ TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->paddings),
kSpatialDimensionNum);
- TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
-
- const TfLiteIntArray* input_size = op_context.input->dims;
- const int* block_shape = op_context.params->block_shape;
- TfLiteIntArray* output_size = TfLiteIntArrayCreate(kOutputDimensionNum);
+ TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
// Ensures the input height and width (with padding) is a multiple of block
// shape height and width.
for (int dim = 0; dim < kSpatialDimensionNum; ++dim) {
- int final_dim_size =
- (input_size->data[dim + 1] + op_context.params->before_paddings[dim] +
- op_context.params->after_paddings[dim]);
+ int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] +
+ paddings_data[dim * 2 + 1]);
TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0);
output_size->data[dim + 1] = final_dim_size / block_shape[dim];
}
@@ -88,33 +84,44 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output_size->data[0] = output_batch_size;
output_size->data[3] = output_channel_size;
- return context->ResizeTensor(context, op_context.output, output_size);
+ return context->ResizeTensor(context, op_context->output, output_size);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ SpaceToBatchNDContext op_context(context, node);
+ TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.input),
+ kInputDimensionNum);
+ TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
+
+ if (!IsConstantTensor(op_context.block_shape) ||
+ !IsConstantTensor(op_context.paddings)) {
+ SetTensorToDynamic(op_context.output);
+ return kTfLiteOk;
+ }
+ return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SpaceToBatchNDContext op_context(context, node);
- int block_shape_dims_array[1] = {kSpatialDimensionNum};
- Dims<4> block_shape_dims = GetTensorDims(block_shape_dims_array, 1);
-
- // Initialize padding array in the format accepted by the kernel code.
- // TODO(nupurgarg): Make kernel code accept padding array format that is
- // consistent with Pad operation (i.e. before_paddings and after_paddings).
- TfLiteIntArray* padding_data = TfLiteIntArrayCreate(kPaddingDimensionNum);
- padding_data->data[0] = op_context.params->before_paddings[0];
- padding_data->data[1] = op_context.params->after_paddings[0];
- padding_data->data[2] = op_context.params->before_paddings[1];
- padding_data->data[3] = op_context.params->after_paddings[1];
- int padding_dims_array[1] = {kPaddingDimensionNum};
- Dims<4> padding_dims = GetTensorDims(padding_dims_array, 1);
-
-#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar) \
- type::SpaceToBatchND(GetTensorData<scalar>(op_context.input), \
- GetTensorDims(op_context.input), \
- op_context.params->block_shape, block_shape_dims, \
- padding_data->data, padding_dims, \
- GetTensorData<scalar>(op_context.output), \
+ // Resize the output tensor if the output tensor is dynamic.
+ if (IsDynamicTensor(op_context.output)) {
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ TfLiteTensorRealloc(op_context.output->bytes, op_context.output);
+ }
+
+#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar) \
+ type::SpaceToBatchND(GetTensorData<scalar>(op_context.input), \
+ GetTensorDims(op_context.input), \
+ GetTensorData<int32_t>(op_context.block_shape), \
+ GetTensorDims(op_context.block_shape), \
+ GetTensorData<int32_t>(op_context.paddings), \
+ GetTensorDims(op_context.paddings), \
+ GetTensorData<scalar>(op_context.output), \
GetTensorDims(op_context.output))
switch (op_context.input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
@@ -151,8 +158,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
-
- TfLiteIntArrayFree(padding_data);
return kTfLiteOk;
}
diff --git a/tensorflow/contrib/lite/kernels/space_to_batch_nd_test.cc b/tensorflow/contrib/lite/kernels/space_to_batch_nd_test.cc
index 45a6aef73d..92a4a037d5 100644
--- a/tensorflow/contrib/lite/kernels/space_to_batch_nd_test.cc
+++ b/tensorflow/contrib/lite/kernels/space_to_batch_nd_test.cc
@@ -26,41 +26,81 @@ using ::testing::ElementsAreArray;
class SpaceToBatchNDOpModel : public SingleOpModel {
public:
- SpaceToBatchNDOpModel(std::initializer_list<int> input_shape,
- std::initializer_list<int> block_shape,
- std::initializer_list<int> before_paddings,
- std::initializer_list<int> after_paddings) {
- input_ = AddInput(TensorType_FLOAT32);
- output_ = AddOutput(TensorType_FLOAT32);
- SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOptions_SpaceToBatchNDOptions,
- CreateSpaceToBatchNDOptions(
- builder_, builder_.CreateVector<int>(block_shape),
- builder_.CreateVector<int>(before_paddings),
- builder_.CreateVector<int>(after_paddings))
- .Union());
- BuildInterpreter({input_shape});
- }
-
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
+ void SetBlockShape(std::initializer_list<int> data) {
+ PopulateTensor<int>(block_shape_, data);
+ }
+
+ void SetPaddings(std::initializer_list<int> data) {
+ PopulateTensor<int>(paddings_, data);
+ }
+
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
- private:
+ protected:
int input_;
+ int block_shape_;
+ int paddings_;
int output_;
};
+// Tests case where block_shape and paddings are const tensors.
+//
+// Example usage is as follows:
+// SpaceToBatchNDOpConstModel m(input_shape, block_shape, paddings);
+// m.SetInput(input_data);
+// m.Invoke();
+class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
+ public:
+ SpaceToBatchNDOpConstModel(std::initializer_list<int> input_shape,
+ std::initializer_list<int> block_shape,
+ std::initializer_list<int> paddings) {
+ input_ = AddInput(TensorType_FLOAT32);
+ block_shape_ = AddConstInput(TensorType_INT32, block_shape, {2});
+ paddings_ = AddConstInput(TensorType_INT32, paddings, {2, 2});
+ output_ = AddOutput(TensorType_FLOAT32);
+
+ SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
+ BuiltinOptions_SpaceToBatchNDOptions,
+ CreateSpaceToBatchNDOptions(builder_).Union());
+ BuildInterpreter({input_shape});
+ }
+};
+
+// Tests case where block_shape and paddings are non-const tensors.
+//
+// Example usage is as follows:
+// SpaceToBatchNDOpDynamicModel m(input_shape);
+// m.SetInput(input_data);
+// m.SetBlockShape(block_shape);
+// m.SetPaddings(paddings);
+// m.Invoke();
+class SpaceToBatchNDOpDynamicModel : public SpaceToBatchNDOpModel {
+ public:
+ SpaceToBatchNDOpDynamicModel(std::initializer_list<int> input_shape) {
+ input_ = AddInput(TensorType_FLOAT32);
+ block_shape_ = AddInput(TensorType_INT32);
+ paddings_ = AddInput(TensorType_INT32);
+ output_ = AddOutput(TensorType_FLOAT32);
+
+ SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
+ BuiltinOptions_SpaceToBatchNDOptions,
+ CreateSpaceToBatchNDOptions(builder_).Union());
+ BuildInterpreter({input_shape, {2}, {2, 2}});
+ }
+};
+
TEST(SpaceToBatchNDOpTest, InvalidShapeTest) {
- EXPECT_DEATH(SpaceToBatchNDOpModel({1, 3, 3, 1}, {2, 2}, {0, 0}, {0, 0}),
+ EXPECT_DEATH(SpaceToBatchNDOpConstModel({1, 3, 3, 1}, {2, 2}, {0, 0, 0, 0}),
"Cannot allocate tensors");
}
-TEST(SpaceToBatchNDOpTest, SimpleTest) {
- SpaceToBatchNDOpModel m({1, 4, 4, 1}, {2, 2}, {0, 0}, {0, 0});
+TEST(SpaceToBatchNDOpTest, SimpleConstTest) {
+ SpaceToBatchNDOpConstModel m({1, 4, 4, 1}, {2, 2}, {0, 0, 0, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
@@ -68,17 +108,39 @@ TEST(SpaceToBatchNDOpTest, SimpleTest) {
13, 15, 6, 8, 14, 16}));
}
-TEST(SpaceToBatchNDOpTest, MultipleInputBatches) {
- SpaceToBatchNDOpModel m({2, 2, 4, 1}, {2, 2}, {0, 0}, {0, 0});
+TEST(SpaceToBatchNDOpTest, SimpleDynamicTest) {
+ SpaceToBatchNDOpDynamicModel m({1, 4, 4, 1});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
+ m.SetBlockShape({2, 2});
+ m.SetPaddings({0, 0, 0, 0});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
+ 13, 15, 6, 8, 14, 16}));
+}
+
+TEST(SpaceToBatchNDOpTest, MultipleInputBatchesConstTest) {
+ SpaceToBatchNDOpConstModel m({2, 2, 4, 1}, {2, 2}, {0, 0, 0, 0});
+ m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
+ 13, 15, 6, 8, 14, 16}));
+}
+
+TEST(SpaceToBatchNDOpTest, MultipleInputBatchesDynamicTest) {
+ SpaceToBatchNDOpDynamicModel m({2, 2, 4, 1});
+ m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
+ m.SetBlockShape({2, 2});
+ m.SetPaddings({0, 0, 0, 0});
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
-TEST(SpaceToBatchNDOpTest, SimplePadding) {
- SpaceToBatchNDOpModel m({1, 5, 2, 1}, {3, 2}, {1, 2}, {0, 0});
+TEST(SpaceToBatchNDOpTest, SimplePaddingConstTest) {
+ SpaceToBatchNDOpConstModel m({1, 5, 2, 1}, {3, 2}, {1, 0, 2, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
@@ -88,9 +150,36 @@ TEST(SpaceToBatchNDOpTest, SimplePadding) {
}));
}
-TEST(SpaceToBatchNDOpTest, ComplexPadding) {
- SpaceToBatchNDOpModel m({1, 4, 2, 1}, {3, 2}, {1, 2}, {1, 4});
+TEST(SpaceToBatchNDOpTest, SimplePaddingDynamicTest) {
+ SpaceToBatchNDOpDynamicModel m({1, 5, 2, 1});
+ m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
+ m.SetBlockShape({3, 2});
+ m.SetPaddings({1, 0, 2, 0});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+ 0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+ 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
+ }));
+}
+
+TEST(SpaceToBatchNDOpTest, ComplexPaddingConstTest) {
+ SpaceToBatchNDOpConstModel m({1, 4, 2, 1}, {3, 2}, {1, 1, 2, 4});
+ m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({
+ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+ 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+ 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
+ }));
+}
+
+TEST(SpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
+ SpaceToBatchNDOpDynamicModel m({1, 4, 2, 1});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
+ m.SetBlockShape({3, 2});
+ m.SetPaddings({1, 1, 2, 4});
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index c82ae27953..b36bfcef84 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -516,23 +516,6 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type,
break;
}
case BuiltinOperator_SPACE_TO_BATCH_ND: {
- auto* params = MallocPOD<TfLiteSpaceToBatchNDParams>();
- if (auto* schema_params =
- op->builtin_options_as_SpaceToBatchNDOptions()) {
- const auto& block_shape = schema_params->block_shape();
- FlatBufferIntVectorToArray(sizeof(params->block_shape), block_shape,
- params->block_shape, error_reporter);
- const auto& before_paddings = schema_params->before_paddings();
- FlatBufferIntVectorToArray(sizeof(params->before_paddings),
- before_paddings, params->before_paddings,
- error_reporter);
- const auto& after_paddings = schema_params->after_paddings();
- FlatBufferIntVectorToArray(sizeof(params->after_paddings),
- after_paddings, params->after_paddings,
- error_reporter);
- params->num_spatial_dimensions = block_shape->Length();
- }
- builtin_data = reinterpret_cast<void*>(params);
break;
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index 91eac2ab48..c0b220e872 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -289,9 +289,6 @@ table ReshapeOptions {
}
table SpaceToBatchNDOptions {
- block_shape:[int];
- before_paddings:[int];
- after_paddings:[int];
}
table BatchToSpaceNDOptions {
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index a8370b34c6..29f3a17be7 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -2834,33 +2834,14 @@ flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(
struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable {
typedef SpaceToBatchNDOptions TableType;
- std::vector<int32_t> block_shape;
- std::vector<int32_t> before_paddings;
- std::vector<int32_t> after_paddings;
SpaceToBatchNDOptionsT() {}
};
struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS
: private flatbuffers::Table {
typedef SpaceToBatchNDOptionsT NativeTableType;
- enum { VT_BLOCK_SHAPE = 4, VT_BEFORE_PADDINGS = 6, VT_AFTER_PADDINGS = 8 };
- const flatbuffers::Vector<int32_t> *block_shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_SHAPE);
- }
- const flatbuffers::Vector<int32_t> *before_paddings() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BEFORE_PADDINGS);
- }
- const flatbuffers::Vector<int32_t> *after_paddings() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_AFTER_PADDINGS);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_BLOCK_SHAPE) &&
- verifier.Verify(block_shape()) &&
- VerifyOffset(verifier, VT_BEFORE_PADDINGS) &&
- verifier.Verify(before_paddings()) &&
- VerifyOffset(verifier, VT_AFTER_PADDINGS) &&
- verifier.Verify(after_paddings()) && verifier.EndTable();
+ return VerifyTableStart(verifier) && verifier.EndTable();
}
SpaceToBatchNDOptionsT *UnPack(
const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -2875,18 +2856,6 @@ struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS
struct SpaceToBatchNDOptionsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
- void add_block_shape(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_shape) {
- fbb_.AddOffset(SpaceToBatchNDOptions::VT_BLOCK_SHAPE, block_shape);
- }
- void add_before_paddings(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> before_paddings) {
- fbb_.AddOffset(SpaceToBatchNDOptions::VT_BEFORE_PADDINGS, before_paddings);
- }
- void add_after_paddings(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> after_paddings) {
- fbb_.AddOffset(SpaceToBatchNDOptions::VT_AFTER_PADDINGS, after_paddings);
- }
explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -2900,29 +2869,11 @@ struct SpaceToBatchNDOptionsBuilder {
};
inline flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_shape = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> before_paddings = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> after_paddings = 0) {
+ flatbuffers::FlatBufferBuilder &_fbb) {
SpaceToBatchNDOptionsBuilder builder_(_fbb);
- builder_.add_after_paddings(after_paddings);
- builder_.add_before_paddings(before_paddings);
- builder_.add_block_shape(block_shape);
return builder_.Finish();
}
-inline flatbuffers::Offset<SpaceToBatchNDOptions>
-CreateSpaceToBatchNDOptionsDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *block_shape = nullptr,
- const std::vector<int32_t> *before_paddings = nullptr,
- const std::vector<int32_t> *after_paddings = nullptr) {
- return tflite::CreateSpaceToBatchNDOptions(
- _fbb, block_shape ? _fbb.CreateVector<int32_t>(*block_shape) : 0,
- before_paddings ? _fbb.CreateVector<int32_t>(*before_paddings) : 0,
- after_paddings ? _fbb.CreateVector<int32_t>(*after_paddings) : 0);
-}
-
flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(
flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o,
const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -5639,33 +5590,6 @@ inline void SpaceToBatchNDOptions::UnPackTo(
const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
- {
- auto _e = block_shape();
- if (_e) {
- _o->block_shape.resize(_e->size());
- for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
- _o->block_shape[_i] = _e->Get(_i);
- }
- }
- };
- {
- auto _e = before_paddings();
- if (_e) {
- _o->before_paddings.resize(_e->size());
- for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
- _o->before_paddings[_i] = _e->Get(_i);
- }
- }
- };
- {
- auto _e = after_paddings();
- if (_e) {
- _o->after_paddings.resize(_e->size());
- for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
- _o->after_paddings[_i] = _e->Get(_i);
- }
- }
- };
}
inline flatbuffers::Offset<SpaceToBatchNDOptions> SpaceToBatchNDOptions::Pack(
@@ -5685,14 +5609,7 @@ inline flatbuffers::Offset<SpaceToBatchNDOptions> CreateSpaceToBatchNDOptions(
const flatbuffers::rehasher_function_t *__rehasher;
} _va = {&_fbb, _o, _rehasher};
(void)_va;
- auto _block_shape =
- _o->block_shape.size() ? _fbb.CreateVector(_o->block_shape) : 0;
- auto _before_paddings =
- _o->before_paddings.size() ? _fbb.CreateVector(_o->before_paddings) : 0;
- auto _after_paddings =
- _o->after_paddings.size() ? _fbb.CreateVector(_o->after_paddings) : 0;
- return tflite::CreateSpaceToBatchNDOptions(_fbb, _block_shape,
- _before_paddings, _after_paddings);
+ return tflite::CreateSpaceToBatchNDOptions(_fbb);
}
inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index f75d7c4bb9..e7606eecc4 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -1335,12 +1335,16 @@ def make_space_to_batch_nd_tests(zip_path):
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
+ "constant_block_shape": [True, False],
+ "constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
+ "constant_block_shape": [True, False],
+ "constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
@@ -1348,23 +1352,47 @@ def make_space_to_batch_nd_tests(zip_path):
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
+ "constant_block_shape": [True, False],
+ "constant_paddings": [True, False],
},
]
def build_graph(parameters):
+ """Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
- out = tf.space_to_batch_nd(input_tensor, parameters["block_shape"],
- parameters["paddings"])
- return [input_tensor], [out]
+ input_tensors = [input_tensor]
+
+ # Get block_shape either as a const or as a placeholder (tensor).
+ if parameters["constant_block_shape"]:
+ block_shape = parameters["block_shape"]
+ else:
+ shape = [len(parameters["block_shape"])]
+ block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
+ input_tensors.append(block_shape)
+
+ # Get paddings either as a const or as a placeholder (tensor).
+ if parameters["constant_paddings"]:
+ paddings = parameters["paddings"]
+ else:
+ shape = [len(parameters["paddings"]), 2]
+ paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
+ input_tensors.append(paddings)
+
+ out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
+ return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
- input_values = create_tensor_data(parameters["dtype"],
- parameters["input_shape"])
- return [input_values], sess.run(
- outputs, feed_dict=dict(zip(inputs, [input_values])))
+ values = [
+ create_tensor_data(parameters["dtype"], parameters["input_shape"])
+ ]
+ if not parameters["constant_block_shape"]:
+ values.append(np.array(parameters["block_shape"]))
+ if not parameters["constant_paddings"]:
+ values.append(np.array(parameters["paddings"]))
+ return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index e33a5788d8..e2162e1493 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -140,24 +140,11 @@ class SpaceToBatchND
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
- auto block_shape = builder->CreateVector(op.block_shape);
- auto before_paddings = builder->CreateVector(op.before_paddings);
- auto after_paddings = builder->CreateVector(op.after_paddings);
- return ::tflite::CreateSpaceToBatchNDOptions(
- *builder, block_shape, before_paddings, after_paddings);
+ return ::tflite::CreateSpaceToBatchNDOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
- op->block_shape.insert(op->block_shape.end(),
- options.block_shape()->begin(),
- options.block_shape()->end());
- op->before_paddings.insert(op->before_paddings.end(),
- options.before_paddings()->begin(),
- options.before_paddings()->end());
- op->after_paddings.insert(op->after_paddings.end(),
- options.after_paddings()->begin(),
- options.after_paddings()->end());
}
};
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index b4ec7bbd50..6daa296282 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -119,19 +119,6 @@ TEST_F(OperatorTest, BuiltinAdd) {
output_toco_op->fused_activation_function);
}
-TEST_F(OperatorTest, BuiltinSpaceToBatchND) {
- SpaceToBatchNDOperator op;
- op.block_shape = {2, 2};
- op.before_paddings = {1, 2};
- op.after_paddings = {3, 4};
-
- auto output_toco_op = SerializeAndDeserialize(
- GetOperator("SPACE_TO_BATCH_ND", OperatorType::kSpaceToBatchND), op);
- EXPECT_EQ(op.block_shape, output_toco_op->block_shape);
- EXPECT_EQ(op.before_paddings, output_toco_op->before_paddings);
- EXPECT_EQ(op.after_paddings, output_toco_op->after_paddings);
-}
-
TEST_F(OperatorTest, BuiltinMean) {
MeanOperator op;
op.keep_dims = false;