aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-01-11 14:01:15 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-11 14:05:06 -0800
commita663e5fda16c0c739eb11279422549d499bad597 (patch)
tree432980ff6599fa78f30ef4bdbae567972fffd737 /tensorflow/contrib
parent57fe4920932ccdfea3ce2235823cfb4327d119e7 (diff)
Adds Mean op to Tensorflow Lite.
PiperOrigin-RevId: 181658399
Diffstat (limited to 'tensorflow/contrib')
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h8
-rw-r--r--tensorflow/contrib/lite/kernels/BUILD13
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h58
-rw-r--r--tensorflow/contrib/lite/kernels/internal/types.h52
-rw-r--r--tensorflow/contrib/lite/kernels/mean.cc200
-rw-r--r--tensorflow/contrib/lite/kernels/mean_test.cc90
-rw-r--r--tensorflow/contrib/lite/kernels/register.cc2
-rw-r--r--tensorflow/contrib/lite/model.cc12
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.cc1
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs7
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h187
-rw-r--r--tensorflow/contrib/lite/testing/BUILD1
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py46
-rw-r--r--tensorflow/contrib/lite/testing/generated_examples_zip_test.cc1
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc12
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc21
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc11
17 files changed, 705 insertions, 17 deletions
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index 347e46b83c..062fea2aa3 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -198,6 +198,14 @@ typedef struct {
int num_dimensions;
} TfLiteTransposeParams;
+typedef struct {
+ // TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
+ // For now we will fix the maximum possible number of dimensions.
+ int axis[8];
+ int num_axis_dimensions;
+ bool keep_dims;
+} TfLiteMeanParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index d7f4b36f94..61147e8659 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -91,6 +91,7 @@ cc_library(
"local_response_norm.cc",
"lsh_projection.cc",
"lstm.cc",
+ "mean.cc",
"mul.cc",
"pad.cc",
"pooling.cc",
@@ -270,6 +271,18 @@ tf_cc_test(
)
tf_cc_test(
+ name = "mean_test",
+ size = "small",
+ srcs = ["mean_test.cc"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+tf_cc_test(
name = "mul_test",
size = "small",
srcs = ["mul_test.cc"],
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 9615848996..a645006a87 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -2337,6 +2337,64 @@ inline void Slice(const T* input_data, const Dims<4>& input_dims,
}
template <typename T>
+inline void Mean(T* input_data, const int* input_dims, const int input_num_dims,
+ T* output_data, const int* output_dims,
+ const int output_num_dims, const int* axis,
+ const int num_axis_dimensions, bool keep_dims, int* temp_index,
+ int* resolved_axis) {
+ // resets output data.
+ size_t num_outputs = 1;
+ for (int idx = 0; idx < output_num_dims; ++idx) {
+ num_outputs *= static_cast<size_t>(output_dims[idx]);
+ }
+ for (size_t idx = 0; idx < num_outputs; ++idx) {
+ output_data[idx] = 0;
+ }
+ // resets temp index.
+ for (int idx = 0; idx < input_num_dims; ++idx) {
+ temp_index[idx] = 0;
+ }
+ // resolves axis.
+ int num_resolved_axis = 0;
+ for (int idx = 0; idx < num_axis_dimensions; ++idx) {
+ int current = axis[idx];
+ TFLITE_DCHECK(current < input_num_dims && current + input_num_dims >= 0);
+ if (current < 0) {
+ current += input_num_dims;
+ }
+ bool is_dup = false;
+ for (int j = 0; j < num_resolved_axis; ++j) {
+ if (resolved_axis[j] == current) {
+ is_dup = true;
+ break;
+ }
+ }
+ if (!is_dup) {
+ resolved_axis[num_resolved_axis++] = current;
+ }
+ }
+ // iterates through input_data.
+ for (bool has_next = true; has_next;
+ has_next = NextIndex(input_num_dims, input_dims, temp_index)) {
+ size_t input_offset =
+ ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
+ size_t output_offset =
+ ReducedOutputOffset(input_num_dims, input_dims, temp_index,
+ num_resolved_axis, resolved_axis);
+ output_data[output_offset] += input_data[input_offset];
+ }
+ // takes average by num of elements added to get mean.
+ size_t num_elements_in_axis = 1;
+ for (int idx = 0; idx < num_resolved_axis; ++idx) {
+ num_elements_in_axis *= static_cast<size_t>(input_dims[resolved_axis[idx]]);
+ }
+ for (size_t idx = 0; idx < num_outputs; ++idx) {
+ output_data[idx] = static_cast<T>(static_cast<float>(output_data[idx]) /
+ num_elements_in_axis);
+ }
+}
+
+template <typename T>
inline void Mean(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& reduction_indices, T* output_data,
const Dims<4>& output_dims) {
diff --git a/tensorflow/contrib/lite/kernels/internal/types.h b/tensorflow/contrib/lite/kernels/internal/types.h
index cce0779bf4..5989ac8fcd 100644
--- a/tensorflow/contrib/lite/kernels/internal/types.h
+++ b/tensorflow/contrib/lite/kernels/internal/types.h
@@ -27,6 +27,58 @@ struct Dims {
int strides[N];
};
+// Gets next index to iterate through a multidimensional array.
+inline bool NextIndex(const int num_dims, const int* dims, int* current) {
+ TFLITE_DCHECK_GT(num_dims, 0);
+ TFLITE_DCHECK(dims != nullptr);
+ TFLITE_DCHECK(current != nullptr);
+ int carry = 1;
+ for (int idx = num_dims - 1; idx >= 0; --idx) {
+ int current_val = current[idx] + carry;
+ TFLITE_DCHECK_GE(dims[idx], current_val);
+ if (dims[idx] == current_val) {
+ current[idx] = 0;
+ } else {
+ current[idx] = current_val;
+ carry = 0;
+ break;
+ }
+ }
+ return (carry == 0);
+}
+
+// Gets offset of index if reducing on axis. When reducing, the flattened offset
+// will not change, if the input index changes on the given axis. For example,
+// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
+// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
+// offset.
+// TODO(kanlig): uses Dims to represent dimensions.
+inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
+ const int* index, const int num_axis,
+ const int* axis) {
+ TFLITE_DCHECK_GT(num_dims, 0);
+ TFLITE_DCHECK(dims != nullptr);
+ TFLITE_DCHECK(index != nullptr);
+ size_t offset = 0;
+ for (int idx = 0; idx < num_dims; ++idx) {
+ // if we need to skip this axis
+ bool is_axis = false;
+ if (axis != nullptr) {
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
+ if (idx == axis[axis_idx]) {
+ is_axis = true;
+ break;
+ }
+ }
+ }
+ if (!is_axis) {
+ offset = offset * static_cast<size_t>(dims[idx]) +
+ static_cast<size_t>(index[idx]);
+ }
+ }
+ return offset;
+}
+
inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) {
TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]);
TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]);
diff --git a/tensorflow/contrib/lite/kernels/mean.cc b/tensorflow/contrib/lite/kernels/mean.cc
new file mode 100644
index 0000000000..540e5a364d
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/mean.cc
@@ -0,0 +1,200 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <string.h>
+#include <vector>
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+#include "tensorflow/contrib/lite/kernels/op_macros.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace mean {
+
+// This file has reference implementation of Mean.
+enum KernelType {
+ kReference,
+};
+
+struct MeanContext {
+ MeanContext(TfLiteContext* context, TfLiteNode* node) {
+ params = reinterpret_cast<TfLiteMeanParams*>(node->builtin_data);
+ input = GetInput(context, node, 0);
+ output = GetOutput(context, node, 0);
+ }
+ TfLiteMeanParams* params;
+ TfLiteTensor* input;
+ TfLiteTensor* output;
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ // Creates two temp tensors to store index and axis for internal
+ // implementation only.
+ auto* scratch_tensor_index = new int;
+ context->AddTensors(context, 2, scratch_tensor_index);
+ return scratch_tensor_index;
+}
+
+void Free(TfLiteContext* context, void* buffer) {
+ delete reinterpret_cast<int*>(buffer);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ MeanContext op_context(context, node);
+ int input_num_dims = NumDimensions(op_context.input);
+ int axis_num_dims = op_context.params->num_axis_dimensions;
+
+ // Creates a temp index to iterate through input data.
+ int* scratch_tensor_index = reinterpret_cast<int*>(node->user_data);
+ TfLiteIntArrayFree(node->temporaries);
+ node->temporaries = TfLiteIntArrayCreate(2);
+ node->temporaries->data[0] = *scratch_tensor_index;
+ TfLiteTensor* scratch_tensor = &context->tensors[node->temporaries->data[0]];
+ scratch_tensor->type = kTfLiteInt32;
+ scratch_tensor->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* index_size = TfLiteIntArrayCreate(1);
+ index_size->data[0] = input_num_dims;
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, scratch_tensor, index_size));
+
+ // Creates a temp tensor to store resolved axis given input data.
+ node->temporaries->data[1] = *scratch_tensor_index + 1;
+ TfLiteTensor* axis_tensor = &context->tensors[node->temporaries->data[1]];
+ axis_tensor->type = kTfLiteInt32;
+ axis_tensor->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray* axis_size = TfLiteIntArrayCreate(1);
+ axis_size->data[0] = op_context.params->num_axis_dimensions;
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, axis_tensor, axis_size));
+
+ // Determines size of output tensor.
+ const TfLiteIntArray* input_dims = op_context.input->dims;
+ const int* axis = op_context.params->axis;
+ if (op_context.params->keep_dims) {
+ TfLiteIntArray* output_dims = TfLiteIntArrayCreate(input_num_dims);
+ for (int idx = 0; idx < input_num_dims; ++idx) {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < axis_num_dims; ++axis_idx) {
+ if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx) {
+ is_axis = true;
+ break;
+ }
+ }
+ if (is_axis) {
+ output_dims->data[idx] = 1;
+ } else {
+ output_dims->data[idx] = input_dims->data[idx];
+ }
+ }
+ return context->ResizeTensor(context, op_context.output, output_dims);
+ } else {
+ // Calculates size of reducing axis.
+ int num_reduce_axis = axis_num_dims;
+ for (int i = 0; i < axis_num_dims; ++i) {
+ int current = axis[i];
+ if (current < 0) {
+ current += input_num_dims;
+ }
+ TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
+ for (int j = 0; j < i; ++j) {
+ int previous = axis[j];
+ if (previous < 0) {
+ previous += input_num_dims;
+ }
+ if (current == previous) {
+ --num_reduce_axis;
+ break;
+ }
+ }
+ }
+ // Determines output dimensions.
+ TfLiteIntArray* output_dims =
+ TfLiteIntArrayCreate(input_num_dims - num_reduce_axis);
+ int num_skip_axis = 0;
+ for (int idx = 0; idx < input_num_dims; ++idx) {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < axis_num_dims; ++axis_idx) {
+ if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx) {
+ ++num_skip_axis;
+ is_axis = true;
+ break;
+ }
+ }
+ if (!is_axis) {
+ output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
+ }
+ }
+ return context->ResizeTensor(context, op_context.output, output_dims);
+ }
+}
+
+template <KernelType kernel_type>
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ MeanContext op_context(context, node);
+ TfLiteTensor* temp_index = &context->tensors[node->temporaries->data[0]];
+ TfLiteTensor* resolved_axis = &context->tensors[node->temporaries->data[1]];
+
+#define TF_LITE_MEAN(kernel_type, data_type) \
+ kernel_type::Mean<>( \
+ GetTensorData<data_type>(op_context.input), \
+ op_context.input->dims->data, op_context.input->dims->size, \
+ GetTensorData<data_type>(op_context.output), \
+ op_context.output->dims->data, op_context.output->dims->size, \
+ op_context.params->axis, op_context.params->num_axis_dimensions, \
+ op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+ GetTensorData<int>(resolved_axis))
+
+ if (kernel_type == kReference) {
+ switch (op_context.input->type) {
+ case kTfLiteFloat32:
+ TF_LITE_MEAN(reference_ops, float);
+ break;
+ case kTfLiteInt32:
+ TF_LITE_MEAN(reference_ops, int);
+ break;
+ case kTfLiteUInt8:
+ TF_LITE_MEAN(reference_ops, uint8_t);
+ break;
+ case kTfLiteInt64:
+ TF_LITE_MEAN(reference_ops, int64_t);
+ break;
+ default:
+ return kTfLiteError;
+ }
+ }
+#undef TF_LITE_MEAN
+ return kTfLiteOk;
+}
+
+} // namespace mean
+
+TfLiteRegistration* Register_MEAN_REF() {
+ static TfLiteRegistration r = {mean::Init, mean::Free, mean::Prepare,
+ mean::Eval<mean::kReference>};
+ return &r;
+}
+
+// TODO(kanlig): add optimized implementation of Mean.
+TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); }
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/mean_test.cc b/tensorflow/contrib/lite/kernels/mean_test.cc
new file mode 100644
index 0000000000..4305c0632f
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/mean_test.cc
@@ -0,0 +1,90 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+class BaseMeanOpModel : public SingleOpModel {
+ public:
+ BaseMeanOpModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis, bool keep_dims) {
+ input_ = AddInput(input);
+ output_ = AddOutput(output);
+ SetBuiltinOp(
+ BuiltinOperator_MEAN, BuiltinOptions_MeanOptions,
+ CreateMeanOptions(builder_, builder_.CreateVector<int>(axis), keep_dims)
+ .Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+
+ int input() { return input_; }
+
+ protected:
+ int input_;
+ int output_;
+};
+
+class FloatMeanOpModel : public BaseMeanOpModel {
+ public:
+ using BaseMeanOpModel::BaseMeanOpModel;
+
+ void SetInput(std::initializer_list<float> data) {
+ PopulateTensor(input_, data);
+ }
+
+ std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+};
+
+TEST(FloatMeanOpTest, NotKeepDims) {
+ std::initializer_list<float> data = {
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ FloatMeanOpModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+ {1, 0, -3, -3}, false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({12, 13})));
+}
+
+TEST(FloatMeanOpTest, KeepDims) {
+ std::initializer_list<float> data = {
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ FloatMeanOpModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+ {0, 2}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear({10.5, 12.5, 14.5})));
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index 9e4bacbf78..ecaf4d7042 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -53,6 +53,7 @@ TfLiteRegistration* Register_SKIP_GRAM();
TfLiteRegistration* Register_SPACE_TO_DEPTH();
TfLiteRegistration* Register_GATHER();
TfLiteRegistration* Register_TRANSPOSE();
+TfLiteRegistration* Register_MEAN();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
@@ -92,6 +93,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH());
AddBuiltin(BuiltinOperator_GATHER, Register_GATHER());
AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE());
+ AddBuiltin(BuiltinOperator_MEAN, Register_MEAN());
}
TfLiteRegistration* BuiltinOpResolver::FindOp(
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index 8a7b6b5c72..0cd6c3e8dd 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -566,6 +566,18 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type,
builtin_data = reinterpret_cast<void*>(params);
break;
}
+ case BuiltinOperator_MEAN: {
+ auto* params = MallocPOD<TfLiteMeanParams>();
+ if (auto* schema_params = op->builtin_options_as_MeanOptions()) {
+ const auto& axis = schema_params->axis();
+ FlatBufferIntVectorToArray(sizeof(params->axis), axis, params->axis,
+ error_reporter);
+ params->keep_dims = schema_params->keep_dims();
+ params->num_axis_dimensions = axis->Length();
+ }
+ builtin_data = reinterpret_cast<void*>(params);
+ break;
+ }
}
return builtin_data;
}
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index faed5b193c..0c25c5d7eb 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -310,6 +310,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
case tflite::BuiltinOperator_TRANSPOSE:
+ case tflite::BuiltinOperator_MEAN:
FATAL("Op code %d is currently not delegated to NNAPI", builtin);
nn_op_type = -1; // set to invalid
break;
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index 34dc16d661..54ef48f4ed 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -110,6 +110,7 @@ enum BuiltinOperator : byte {
BATCH_TO_SPACE_ND = 37,
SPACE_TO_BATCH_ND = 38,
TRANSPOSE = 39,
+ MEAN = 40,
}
// Options for the builtin operators.
@@ -140,6 +141,7 @@ union BuiltinOptions {
BatchToSpaceNDOptions,
SpaceToBatchNDOptions,
TransposeOptions,
+ MeanOptions,
}
enum Padding : byte { SAME, VALID }
@@ -304,6 +306,11 @@ table TransposeOptions {
perm:[int];
}
+table MeanOptions {
+ axis:[int];
+ keep_dims: bool;
+}
+
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index 00cd2b9e1b..0774a216f4 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -105,6 +105,9 @@ struct GatherOptionsT;
struct TransposeOptions;
struct TransposeOptionsT;
+struct MeanOptions;
+struct MeanOptionsT;
+
struct OperatorCode;
struct OperatorCodeT;
@@ -187,11 +190,12 @@ enum BuiltinOperator {
BuiltinOperator_BATCH_TO_SPACE_ND = 37,
BuiltinOperator_SPACE_TO_BATCH_ND = 38,
BuiltinOperator_TRANSPOSE = 39,
+ BuiltinOperator_MEAN = 40,
BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_TRANSPOSE
+ BuiltinOperator_MAX = BuiltinOperator_MEAN
};
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[37] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[38] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@@ -229,7 +233,8 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[37] {
BuiltinOperator_GATHER,
BuiltinOperator_BATCH_TO_SPACE_ND,
BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOperator_TRANSPOSE};
+ BuiltinOperator_TRANSPOSE,
+ BuiltinOperator_MEAN};
return values;
}
@@ -274,6 +279,7 @@ inline const char **EnumNamesBuiltinOperator() {
"BATCH_TO_SPACE_ND",
"SPACE_TO_BATCH_ND",
"TRANSPOSE",
+ "MEAN",
nullptr};
return names;
}
@@ -311,11 +317,12 @@ enum BuiltinOptions {
BuiltinOptions_BatchToSpaceNDOptions = 24,
BuiltinOptions_SpaceToBatchNDOptions = 25,
BuiltinOptions_TransposeOptions = 26,
+ BuiltinOptions_MeanOptions = 27,
BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_TransposeOptions
+ BuiltinOptions_MAX = BuiltinOptions_MeanOptions
};
-inline BuiltinOptions (&EnumValuesBuiltinOptions())[27] {
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[28] {
static BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
@@ -343,7 +350,8 @@ inline BuiltinOptions (&EnumValuesBuiltinOptions())[27] {
BuiltinOptions_GatherOptions,
BuiltinOptions_BatchToSpaceNDOptions,
BuiltinOptions_SpaceToBatchNDOptions,
- BuiltinOptions_TransposeOptions};
+ BuiltinOptions_TransposeOptions,
+ BuiltinOptions_MeanOptions};
return values;
}
@@ -375,6 +383,7 @@ inline const char **EnumNamesBuiltinOptions() {
"BatchToSpaceNDOptions",
"SpaceToBatchNDOptions",
"TransposeOptions",
+ "MeanOptions",
nullptr};
return names;
}
@@ -523,6 +532,11 @@ struct BuiltinOptionsTraits<TransposeOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
};
+template <>
+struct BuiltinOptionsTraits<MeanOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_MeanOptions;
+};
+
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
@@ -830,6 +844,16 @@ struct BuiltinOptionsUnion {
? reinterpret_cast<const TransposeOptionsT *>(value)
: nullptr;
}
+ MeanOptionsT *AsMeanOptions() {
+ return type == BuiltinOptions_MeanOptions
+ ? reinterpret_cast<MeanOptionsT *>(value)
+ : nullptr;
+ }
+ const MeanOptionsT *AsMeanOptions() const {
+ return type == BuiltinOptions_MeanOptions
+ ? reinterpret_cast<const MeanOptionsT *>(value)
+ : nullptr;
+ }
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
@@ -3082,6 +3106,78 @@ flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(
flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o,
const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct MeanOptionsT : public flatbuffers::NativeTable {
+ typedef MeanOptions TableType;
+ std::vector<int32_t> axis;
+ bool keep_dims;
+ MeanOptionsT() : keep_dims(false) {}
+};
+
+struct MeanOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef MeanOptionsT NativeTableType;
+ enum { VT_AXIS = 4, VT_KEEP_DIMS = 6 };
+ const flatbuffers::Vector<int32_t> *axis() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_AXIS);
+ }
+ bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_AXIS) &&
+ verifier.Verify(axis()) &&
+ VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) && verifier.EndTable();
+ }
+ MeanOptionsT *UnPack(
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(
+ MeanOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MeanOptions> Pack(
+ flatbuffers::FlatBufferBuilder &_fbb, const MeanOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MeanOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_axis(flatbuffers::Offset<flatbuffers::Vector<int32_t>> axis) {
+ fbb_.AddOffset(MeanOptions::VT_AXIS, axis);
+ }
+ void add_keep_dims(bool keep_dims) {
+ fbb_.AddElement<uint8_t>(MeanOptions::VT_KEEP_DIMS,
+ static_cast<uint8_t>(keep_dims), 0);
+ }
+ explicit MeanOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ MeanOptionsBuilder &operator=(const MeanOptionsBuilder &);
+ flatbuffers::Offset<MeanOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MeanOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MeanOptions> CreateMeanOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> axis = 0,
+ bool keep_dims = false) {
+ MeanOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_keep_dims(keep_dims);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<MeanOptions> CreateMeanOptionsDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *axis = nullptr, bool keep_dims = false) {
+ return tflite::CreateMeanOptions(
+ _fbb, axis ? _fbb.CreateVector<int32_t>(*axis) : 0, keep_dims);
+}
+
+flatbuffers::Offset<MeanOptions> CreateMeanOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, const MeanOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType;
BuiltinOperator builtin_code;
@@ -3341,6 +3437,11 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
? static_cast<const TransposeOptions *>(builtin_options())
: nullptr;
}
+ const MeanOptions *builtin_options_as_MeanOptions() const {
+ return builtin_options_type() == BuiltinOptions_MeanOptions
+ ? static_cast<const MeanOptions *>(builtin_options())
+ : nullptr;
+ }
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
@@ -3521,6 +3622,11 @@ inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>()
return builtin_options_as_TransposeOptions();
}
+template <>
+inline const MeanOptions *Operator::builtin_options_as<MeanOptions>() const {
+ return builtin_options_as_MeanOptions();
+}
+
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
@@ -5324,6 +5430,54 @@ inline flatbuffers::Offset<TransposeOptions> CreateTransposeOptions(
return tflite::CreateTransposeOptions(_fbb, _perm);
}
+inline MeanOptionsT *MeanOptions::UnPack(
+ const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new MeanOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void MeanOptions::UnPackTo(
+ MeanOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = axis();
+ if (_e) {
+ _o->axis.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) {
+ _o->axis[_i] = _e->Get(_i);
+ }
+ }
+ };
+ {
+ auto _e = keep_dims();
+ _o->keep_dims = _e;
+ };
+}
+
+inline flatbuffers::Offset<MeanOptions> MeanOptions::Pack(
+ flatbuffers::FlatBufferBuilder &_fbb, const MeanOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateMeanOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MeanOptions> CreateMeanOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, const MeanOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MeanOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _axis = _o->axis.size() ? _fbb.CreateVector(_o->axis) : 0;
+ auto _keep_dims = _o->keep_dims;
+ return tflite::CreateMeanOptions(_fbb, _axis, _keep_dims);
+}
+
inline OperatorCodeT *OperatorCode::UnPack(
const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new OperatorCodeT();
@@ -5816,6 +5970,10 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier,
auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
return verifier.VerifyTable(ptr);
}
+ case BuiltinOptions_MeanOptions: {
+ auto ptr = reinterpret_cast<const MeanOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default:
return false;
}
@@ -5944,6 +6102,10 @@ inline void *BuiltinOptionsUnion::UnPack(
auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
return ptr->UnPack(resolver);
}
+ case BuiltinOptions_MeanOptions: {
+ auto ptr = reinterpret_cast<const MeanOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
default:
return nullptr;
}
@@ -6059,6 +6221,10 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(
auto ptr = reinterpret_cast<const TransposeOptionsT *>(value);
return CreateTransposeOptions(_fbb, ptr, _rehasher).Union();
}
+ case BuiltinOptions_MeanOptions: {
+ auto ptr = reinterpret_cast<const MeanOptionsT *>(value);
+ return CreateMeanOptions(_fbb, ptr, _rehasher).Union();
+ }
default:
return 0;
}
@@ -6187,6 +6353,10 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u)
*reinterpret_cast<TransposeOptionsT *>(u.value));
break;
}
+ case BuiltinOptions_MeanOptions: {
+ value = new MeanOptionsT(*reinterpret_cast<MeanOptionsT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -6324,6 +6494,11 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr;
break;
}
+ case BuiltinOptions_MeanOptions: {
+ auto ptr = reinterpret_cast<MeanOptionsT *>(value);
+ delete ptr;
+ break;
+ }
default:
break;
}
diff --git a/tensorflow/contrib/lite/testing/BUILD b/tensorflow/contrib/lite/testing/BUILD
index f43b09ef48..81412ae51b 100644
--- a/tensorflow/contrib/lite/testing/BUILD
+++ b/tensorflow/contrib/lite/testing/BUILD
@@ -32,6 +32,7 @@ gen_zipped_test_files(
"l2norm.zip",
"local_response_norm.zip",
"max_pool.zip",
+ "mean.zip",
"mul.zip",
"pad.zip",
"relu.zip",
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index d1c1777cdd..b72f661e56 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -670,6 +670,51 @@ def make_add_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_mean_tests(zip_path):
+ """Make a set of tests to do mean."""
+
+ test_parameters = [{
+ "input_dtype": [tf.float32, tf.int32],
+ "input_shape": [[3, 2, 4]],
+ "axis": [
+ None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
+ [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0],
+ [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
+ ],
+ "keep_dims": [True, False],
+ }, {
+ "input_dtype": [tf.float32, tf.int32],
+ "input_shape": [[1, 224, 224, 3]],
+ "axis": [
+ None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],
+ [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2,
+ -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
+ [2, 2, 3], [-3, -3, -4], [-3, 2, 1]
+ ],
+ "keep_dims": [True, False],
+ }]
+
+ def build_graph(parameters):
+ """Build the mean op testing graph."""
+ input_tensor = tf.placeholder(
+ dtype=parameters["input_dtype"],
+ name="input",
+ shape=parameters["input_shape"])
+ out = tf.reduce_mean(
+ input_tensor,
+ axis=parameters["axis"],
+ keep_dims=parameters["keep_dims"])
+ return [input_tensor], [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ input_values = create_tensor_data(parameters["input_dtype"],
+ parameters["input_shape"])
+ return [input_values], sess.run(
+ outputs, feed_dict=dict(zip(inputs, [input_values])))
+
+ make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+
+
def make_mul_tests(zip_path):
"""Make a set of tests to do mul with and without broadcast."""
@@ -1375,6 +1420,7 @@ def main(unused_args):
"softmax.zip": make_softmax_tests,
"space_to_depth.zip": make_space_to_depth_tests,
"transpose.zip": make_transpose_tests,
+ "mean.zip": make_mean_tests,
}
out = FLAGS.zip_to_output
bin_path = FLAGS.toco
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index f7f75f48a6..9027cee8bd 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -270,6 +270,7 @@ INSTANTIATE_TESTS(sigmoid)
INSTANTIATE_TESTS(softmax)
INSTANTIATE_TESTS(space_to_depth)
INSTANTIATE_TESTS(transpose)
+INSTANTIATE_TESTS(mean)
} // namespace testing
} // namespace tflite
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
index 444f59d14b..b77be3f5c0 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
@@ -38,17 +38,7 @@ bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
const auto& indices_array = *model->arrays[op->inputs[1]];
if (!indices_array.has_shape()) return false;
-
- // We only support simultaneous reduction over width and height.
- std::vector<int> axis = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
- if (axis.size() != 2) {
- return false;
- }
- if (!((axis[0] == 1 && axis[1] == 2) || (axis[0] == 2 && axis[1] == 1))) {
- return false;
- }
-
- op->axis = axis;
+ op->axis = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
return true;
}
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index d153461444..d6335b8253 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -525,6 +525,25 @@ class Transpose
}
};
+class Mean : public BuiltinOperator<MeanOperator, ::tflite::MeanOptions,
+ ::tflite::BuiltinOptions_MeanOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ auto axis = builder->CreateVector(op.axis);
+ return ::tflite::CreateMeanOptions(*builder, axis, op.keep_dims);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->axis.insert(op->axis.end(), options.axis()->begin(),
+ options.axis()->end());
+ op->keep_dims = options.keep_dims();
+ }
+};
+
class Split : public CustomOperator<TensorFlowSplitOperator> {
public:
using CustomOperator::CustomOperator;
@@ -691,6 +710,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
new Svdf(::tflite::BuiltinOperator_SVDF, OperatorType::kSvdf));
ops.emplace_back(new Transpose(::tflite::BuiltinOperator_TRANSPOSE,
OperatorType::kTranspose));
+ ops.emplace_back(
+ new Mean(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
// Custom Operators.
ops.emplace_back(new Cast("CAST", OperatorType::kCast));
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index bcf8ac04ef..093144f6ac 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -145,6 +145,17 @@ TEST_F(OperatorTest, BuiltinBatchToSpaceND) {
EXPECT_EQ(op.after_crops, output_toco_op->after_crops);
}
+TEST_F(OperatorTest, BuiltinMean) {
+ MeanOperator op;
+ op.axis = {1, 2};
+ op.keep_dims = false;
+
+ auto output_toco_op =
+ SerializeAndDeserialize(GetOperator("MEAN", OperatorType::kMean), op);
+ EXPECT_EQ(op.axis, output_toco_op->axis);
+ EXPECT_EQ(op.keep_dims, output_toco_op->keep_dims);
+}
+
TEST_F(OperatorTest, CustomCast) {
CastOperator op;
op.src_data_type = ArrayDataType::kFloat;