aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar andrehentz <ahentz@google.com>2018-03-26 07:11:03 -0700
committerGravatar GitHub <noreply@github.com>2018-03-26 07:11:03 -0700
commit90e015a863246cd022ce7257204f0d716ac2d400 (patch)
treedd8f2b8b7f8431f7841ae307b5f20758992d851c
parent6c1737e6c8c9e5db405853178fb5e42abc080ba3 (diff)
parent2f9265ff01de864ff00aed57f399c00d05ce498d (diff)
Merge pull request #17074 from StanislawAntol/feature/add_tflite_maximum
Add support for Maximum to TensorFlowLite.
-rw-r--r--tensorflow/contrib/lite/kernels/BUILD13
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h25
-rw-r--r--tensorflow/contrib/lite/kernels/maximum.cc106
-rw-r--r--tensorflow/contrib/lite/kernels/maximum_test.cc81
-rw-r--r--tensorflow/contrib/lite/kernels/register.cc2
-rw-r--r--tensorflow/contrib/lite/model.cc3
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.cc1
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs5
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h124
-rw-r--r--tensorflow/contrib/lite/testing/BUILD1
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py36
-rw-r--r--tensorflow/contrib/lite/testing/generated_examples_zip_test.cc1
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc2
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc2
14 files changed, 396 insertions, 6 deletions
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index 200cb3075b..48021aea47 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -156,6 +156,7 @@ cc_library(
"local_response_norm.cc",
"lsh_projection.cc",
"lstm.cc",
+ "maximum.cc",
"mean.cc",
"mfcc.cc",
"mul.cc",
@@ -537,6 +538,18 @@ tf_cc_test(
)
tf_cc_test(
+ name = "maximum_test",
+ size = "small",
+ srcs = ["maximum_test.cc"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+tf_cc_test(
name = "mean_test",
size = "small",
srcs = ["mean_test.cc"],
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 472ddc60df..f31420b983 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -404,6 +404,7 @@ inline void DepthToSpace(const T* input_data, const Dims<4>& input_dims,
const int in_d =
out_d + ((out_h % block_size) * block_size + out_w % block_size) *
output_depth;
+
const int in_w = out_w / block_size;
const int in_h = out_h / block_size;
const int in_b = out_b;
@@ -3344,6 +3345,30 @@ void TensorFlowMaximum(const T* input1_data, const Dims<4>& input1_dims,
}
}
+template <typename T>
+void TensorFlowMaximum(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T* output_data, const Dims<4>& output_dims) {
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+
+ for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
+ for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
+ for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
+ for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
+ auto out_idx = Offset(output_dims, c, x, y, b);
+ auto in1_idx = SubscriptToIndex(desc1, c, x, y, b);
+ auto in2_idx = SubscriptToIndex(desc2, c, x, y, b);
+ auto in1_val = input1_data[in1_idx];
+ auto in2_val = input2_data[in2_idx];
+ output_data[out_idx] = in1_val > in2_val ? in1_val : in2_val;
+ }
+ }
+ }
+ }
+}
+
template <typename T1, typename T2, typename T3>
void ArgMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
T2* output_data, const Dims<4>& output_dims) {
diff --git a/tensorflow/contrib/lite/kernels/maximum.cc b/tensorflow/contrib/lite/kernels/maximum.cc
new file mode 100644
index 0000000000..9fdf2b47ea
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/maximum.cc
@@ -0,0 +1,106 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <string.h>
+#include <vector>
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+#include "tensorflow/contrib/lite/kernels/op_macros.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace maximum {
+
+// This file has a reference implemenation of TFMaximum.
+enum KernelType {
+ kReference,
+};
+
+constexpr int kInputTensor1 = 0;
+constexpr int kInputTensor2 = 1;
+constexpr int kOutputTensor = 0;
+
+struct MaximumContext {
+ MaximumContext(TfLiteContext* context, TfLiteNode* node) {
+ input1 = GetInput(context, node, kInputTensor1);
+ input2 = GetInput(context, node, kInputTensor2);
+ output = GetOutput(context, node, kOutputTensor);
+ }
+ TfLiteTensor* input1;
+ TfLiteTensor* input2;
+ TfLiteTensor* output;
+};
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ MaximumContext op_context(context, node);
+ TF_LITE_ENSURE_EQ(context, op_context.input1->type, op_context.input2->type);
+ TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input2->dims);
+ op_context.output->type = op_context.input2->type;
+ return context->ResizeTensor(context, op_context.output, output_dims);
+}
+
+template <KernelType kernel_type>
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ MaximumContext op_context(context, node);
+
+#define TF_LITE_MAXIMUM(kernel_type, data_type) \
+ kernel_type::TensorFlowMaximum<data_type>( \
+ GetTensorData<data_type>(op_context.input1), \
+ GetTensorDims(op_context.input1), \
+ GetTensorData<data_type>(op_context.input2), \
+ GetTensorDims(op_context.input2), \
+ GetTensorData<data_type>(op_context.output), \
+ GetTensorDims(op_context.output))
+
+ if (kernel_type == kReference) {
+ switch (op_context.output->type) {
+ case kTfLiteFloat32:
+ TF_LITE_MAXIMUM(reference_ops, float);
+ break;
+ default:
+ context->ReportError(context,
+ "Type %d is currently not supported by Maximum.",
+ op_context.output->type);
+ return kTfLiteError;
+ }
+ } else {
+ context->ReportError(context,
+ "Type %d is currently not supported by Maximum.",
+ op_context.output->type);
+ return kTfLiteError;
+ }
+#undef TF_LITE_MAXIMUM
+ return kTfLiteOk;
+}
+
+} // namespace maximum
+
+TfLiteRegistration* Register_MAXIMUM_REF() {
+ static TfLiteRegistration r = {nullptr, nullptr, maximum::Prepare,
+ maximum::Eval<maximum::kReference>};
+ return &r;
+}
+
+TfLiteRegistration* Register_MAXIMUM() { return Register_MAXIMUM_REF(); }
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/maximum_test.cc b/tensorflow/contrib/lite/kernels/maximum_test.cc
new file mode 100644
index 0000000000..b3fd7d4e6f
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/maximum_test.cc
@@ -0,0 +1,81 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+class MaximumOpModel : public SingleOpModel {
+ public:
+ MaximumOpModel(const TensorData& input1, const TensorData& input2,
+ const TensorType& output) {
+ input1_ = AddInput(input1);
+ input2_ = AddInput(input2);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_MAXIMUM, BuiltinOptions_MaximumOptions,
+ CreateMaximumOptions(builder_).Union());
+ BuildInterpreter({GetShape(input1_), GetShape(input2_)});
+ }
+
+ template <class T>
+ void SetInput1(std::initializer_list<T> data) {
+ PopulateTensor(input1_, data);
+ }
+
+ template <class T>
+ void SetInput2(std::initializer_list<T> data) {
+ PopulateTensor(input2_, data);
+ }
+
+ template <class T>
+ std::vector<T> GetOutput() {
+ return ExtractVector<T>(output_);
+ }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ protected:
+ int input1_;
+ int input2_;
+ int output_;
+};
+
+TEST(MaximumOpTest, FloatTest) {
+ std::initializer_list<float> data1 = {1.0, 0.0, -1.0, 11.0, -2.0, -1.44};
+ std::initializer_list<float> data2 = {-1.0, 0.0, 1.0, 12.0, -3.0, -1.43};
+ MaximumOpModel m({TensorType_FLOAT32, {3, 1, 2}},
+ {TensorType_FLOAT32, {3, 1, 2}}, TensorType_FLOAT32);
+ m.SetInput1<float>(data1);
+ m.SetInput2<float>(data2);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({1.0, 0.0, 1.0, 12.0, -2.0, -1.43})));
+}
+
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index 62045f0a4d..0f98154b90 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -76,6 +76,7 @@ TfLiteRegistration* Register_LOG_SOFTMAX();
TfLiteRegistration* Register_CAST();
TfLiteRegistration* Register_DEQUANTIZE();
TfLiteRegistration* Register_PRELU();
+TfLiteRegistration* Register_MAXIMUM();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
@@ -133,6 +134,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_CAST, Register_CAST());
AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE());
AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
+ AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index b7ccdf070b..791d1378f3 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -597,6 +597,9 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type,
builtin_data = reinterpret_cast<void*>(params);
break;
}
+ case BuiltinOperator_MAXIMUM: {
+ break;
+ }
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
error_reporter->Report("DELEGATE op shouldn't exist in model.");
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index e31b7c03a5..decaf9f160 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -350,6 +350,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
case tflite::BuiltinOperator_DELEGATE:
case tflite::BuiltinOperator_CAST:
case tflite::BuiltinOperator_PRELU:
+ case tflite::BuiltinOperator_MAXIMUM:
FATAL("Op code %d is currently not delegated to NNAPI", builtin);
nn_op_type = -1; // set to invalid
break;
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index e1075971e9..7d2e00fe32 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -131,6 +131,7 @@ enum BuiltinOperator : byte {
BIDIRECTIONAL_SEQUENCE_LSTM = 52,
CAST = 53,
PRELU = 54,
+ MAXIMUM = 55,
}
// Options for the builtin operators.
@@ -173,6 +174,7 @@ union BuiltinOptions {
LogSoftmaxOptions,
CastOptions,
DequantizeOptions,
+ MaximumOptions,
}
enum Padding : byte { SAME, VALID }
@@ -384,6 +386,9 @@ table CastOptions {
table DequantizeOptions {
}
+table MaximumOptions {
+}
+
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index 86daeaf5cc..66a97a1460 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -145,6 +145,9 @@ struct CastOptionsT;
struct DequantizeOptions;
struct DequantizeOptionsT;
+struct MaximumOptions;
+struct MaximumOptionsT;
+
struct OperatorCode;
struct OperatorCodeT;
@@ -255,11 +258,12 @@ enum BuiltinOperator {
BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
BuiltinOperator_CAST = 53,
BuiltinOperator_PRELU = 54,
+ BuiltinOperator_MAXIMUM = 55,
BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_PRELU
+ BuiltinOperator_MAX = BuiltinOperator_MAXIMUM
};
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[53] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[54] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@@ -313,7 +317,8 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[53] {
BuiltinOperator_DELEGATE,
BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
BuiltinOperator_CAST,
- BuiltinOperator_PRELU
+ BuiltinOperator_PRELU,
+ BuiltinOperator_MAXIMUM
};
return values;
}
@@ -375,6 +380,7 @@ inline const char **EnumNamesBuiltinOperator() {
"BIDIRECTIONAL_SEQUENCE_LSTM",
"CAST",
"PRELU",
+ "MAXIMUM",
nullptr
};
return names;
@@ -425,11 +431,12 @@ enum BuiltinOptions {
BuiltinOptions_LogSoftmaxOptions = 36,
BuiltinOptions_CastOptions = 37,
BuiltinOptions_DequantizeOptions = 38,
+ BuiltinOptions_MaximumOptions = 39,
BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_DequantizeOptions
+ BuiltinOptions_MAX = BuiltinOptions_MaximumOptions
};
-inline BuiltinOptions (&EnumValuesBuiltinOptions())[39] {
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[40] {
static BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
@@ -469,7 +476,8 @@ inline BuiltinOptions (&EnumValuesBuiltinOptions())[39] {
BuiltinOptions_SplitOptions,
BuiltinOptions_LogSoftmaxOptions,
BuiltinOptions_CastOptions,
- BuiltinOptions_DequantizeOptions
+ BuiltinOptions_DequantizeOptions,
+ BuiltinOptions_MaximumOptions
};
return values;
}
@@ -515,6 +523,7 @@ inline const char **EnumNamesBuiltinOptions() {
"LogSoftmaxOptions",
"CastOptions",
"DequantizeOptions",
+ "MaximumOptions",
nullptr
};
return names;
@@ -681,6 +690,10 @@ template<> struct BuiltinOptionsTraits<DequantizeOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
};
+template<> struct BuiltinOptionsTraits<MaximumOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_MaximumOptions;
+};
+
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
@@ -1016,6 +1029,14 @@ struct BuiltinOptionsUnion {
return type == BuiltinOptions_DequantizeOptions ?
reinterpret_cast<const DequantizeOptionsT *>(value) : nullptr;
}
+ MaximumOptionsT *AsMaximumOptions() {
+ return type == BuiltinOptions_MaximumOptions ?
+ reinterpret_cast<MaximumOptionsT *>(value) : nullptr;
+ }
+ const MaximumOptionsT *AsMaximumOptions() const {
+ return type == BuiltinOptions_MaximumOptions ?
+ reinterpret_cast<const MaximumOptionsT *>(value) : nullptr;
+ }
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
@@ -3759,6 +3780,46 @@ inline flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(
flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct MaximumOptionsT : public flatbuffers::NativeTable {
+ typedef MaximumOptions TableType;
+ MaximumOptionsT() {
+ }
+};
+
+struct MaximumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef MaximumOptionsT NativeTableType;
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ verifier.EndTable();
+ }
+ MaximumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MaximumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MaximumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MaximumOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit MaximumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ MaximumOptionsBuilder &operator=(const MaximumOptionsBuilder &);
+ flatbuffers::Offset<MaximumOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MaximumOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MaximumOptions> CreateMaximumOptions(
+ flatbuffers::FlatBufferBuilder &_fbb) {
+ MaximumOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MaximumOptions> CreateMaximumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType;
BuiltinOperator builtin_code;
@@ -3990,6 +4051,9 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const DequantizeOptions *builtin_options_as_DequantizeOptions() const {
return builtin_options_type() == BuiltinOptions_DequantizeOptions ? static_cast<const DequantizeOptions *>(builtin_options()) : nullptr;
}
+ const MaximumOptions *builtin_options_as_MaximumOptions() const {
+ return builtin_options_type() == BuiltinOptions_MaximumOptions ? static_cast<const MaximumOptions *>(builtin_options()) : nullptr;
+ }
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
@@ -4168,6 +4232,10 @@ template<> inline const DequantizeOptions *Operator::builtin_options_as<Dequanti
return builtin_options_as_DequantizeOptions();
}
+template<> inline const MaximumOptions *Operator::builtin_options_as<MaximumOptions>() const {
+ return builtin_options_as_MaximumOptions();
+}
+
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
@@ -5696,6 +5764,29 @@ inline flatbuffers::Offset<DequantizeOptions> CreateDequantizeOptions(flatbuffer
_fbb);
}
+inline MaximumOptionsT *MaximumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new MaximumOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void MaximumOptions::UnPackTo(MaximumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<MaximumOptions> MaximumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateMaximumOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MaximumOptions> CreateMaximumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ return tflite::CreateMaximumOptions(
+ _fbb);
+}
+
inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new OperatorCodeT();
UnPackTo(_o, _resolver);
@@ -6028,6 +6119,10 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
return verifier.VerifyTable(ptr);
}
+ case BuiltinOptions_MaximumOptions: {
+ auto ptr = reinterpret_cast<const MaximumOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return false;
}
}
@@ -6198,6 +6293,10 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
return ptr->UnPack(resolver);
}
+ case BuiltinOptions_MaximumOptions: {
+ auto ptr = reinterpret_cast<const MaximumOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -6356,6 +6455,10 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
auto ptr = reinterpret_cast<const DequantizeOptionsT *>(value);
return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union();
}
+ case BuiltinOptions_MaximumOptions: {
+ auto ptr = reinterpret_cast<const MaximumOptionsT *>(value);
+ return CreateMaximumOptions(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -6514,6 +6617,10 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
value = new DequantizeOptionsT(*reinterpret_cast<DequantizeOptionsT *>(u.value));
break;
}
+ case BuiltinOptions_MaximumOptions: {
+ value = new MaximumOptionsT(*reinterpret_cast<MaximumOptionsT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -6711,6 +6818,11 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr;
break;
}
+ case BuiltinOptions_MaximumOptions: {
+ auto ptr = reinterpret_cast<MaximumOptionsT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
diff --git a/tensorflow/contrib/lite/testing/BUILD b/tensorflow/contrib/lite/testing/BUILD
index 555ea90034..12b7b3c350 100644
--- a/tensorflow/contrib/lite/testing/BUILD
+++ b/tensorflow/contrib/lite/testing/BUILD
@@ -36,6 +36,7 @@ gen_zipped_test_files(
"local_response_norm.zip",
"log_softmax.zip",
"max_pool.zip",
+ "maximum.zip",
"mean.zip",
"mul.zip",
"pad.zip",
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index 38de9dcf2c..deab5a91d2 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -862,6 +862,41 @@ def make_log_softmax_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_maximum_tests(zip_path):
+ """Make a set of tests to do maximum."""
+
+ test_parameters = [{
+ "input_dtype": [tf.float32],
+ "input_shape_1": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape_2": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ }]
+
+ def build_graph(parameters):
+ """Build the maximum op testing graph."""
+ input_tensor_1 = tf.placeholder(
+ dtype=parameters["input_dtype"],
+ name="input_1",
+ shape=parameters["input_shape_1"])
+ input_tensor_2 = tf.placeholder(
+ dtype=parameters["input_dtype"],
+ name="input_2",
+ shape=parameters["input_shape_2"])
+
+ out = tf.maximum(input_tensor_1, input_tensor_2)
+ return [input_tensor_1, input_tensor_2], [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ values = [
+ create_tensor_data(parameters["input_dtype"],
+ parameters["input_shape_1"]),
+ create_tensor_data(parameters["input_dtype"],
+ parameters["input_shape_2"])
+ ]
+ return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
+
+ make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+
+
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda zip_path: make_binary_op_tests(zip_path, binary_operator)
@@ -1978,6 +2013,7 @@ def main(unused_args):
"exp.zip": make_exp_tests,
"log_softmax.zip": make_log_softmax_tests,
"lstm.zip": make_lstm_tests,
+ "maximum.zip": make_maximum_tests,
}
out = FLAGS.zip_to_output
bin_path = FLAGS.toco
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index ba2d259462..e9d505a76d 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -250,6 +250,7 @@ INSTANTIATE_TESTS(l2_pool)
INSTANTIATE_TESTS(l2norm)
INSTANTIATE_TESTS(local_response_norm)
INSTANTIATE_TESTS(log_softmax)
+INSTANTIATE_TESTS(maximum)
INSTANTIATE_TESTS(max_pool)
INSTANTIATE_TESTS(mean)
INSTANTIATE_TESTS(mul)
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index f23249cfa1..0989bfe5a3 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -863,6 +863,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
ops.emplace_back(new SimpleOperator<ExpOperator>("EXP", OperatorType::kExp));
ops.emplace_back(new SimpleOperator<LogSoftmaxOperator>(
"LOG_SOFTMAX", OperatorType::kLogSoftmax));
+ ops.emplace_back(new SimpleOperator<TensorFlowMaximumOperator>(
+ "MAXIMUM", OperatorType::kTensorFlowMaximum));
return ops;
}
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index 9c19f8d464..f7a213ecfc 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -109,6 +109,8 @@ TEST_F(OperatorTest, SimpleOperators) {
CheckSimpleOperator<ExpOperator>("EXP", OperatorType::kExp);
CheckSimpleOperator<LogSoftmaxOperator>("LOG_SOFTMAX",
OperatorType::kLogSoftmax);
+ CheckSimpleOperator<TensorFlowMaximumOperator>(
+ "MAXIMUM", OperatorType::kTensorFlowMaximum);
}
TEST_F(OperatorTest, BuiltinAdd) {