aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Pete Warden <pete@petewarden.com>2017-11-13 17:11:23 -0800
committerGravatar Martin Wicke <martin.wicke@gmail.com>2017-11-13 17:11:23 -0800
commit4b4b51cdd9e8c3c748b76dd8649bcd5556e84d76 (patch)
tree1c3bb15a84b6c30d5d946845dca118bcc838df8c
parentb066496f625930bc00397ad9d000741d724598eb (diff)
Ios (#14521)
* Initial implementation of Makefile build for TensorFlow Lite * Added support to TF Lite makefile build * Added iOS support to TF Lite makefile build * Added simple iOS example application for TF Lite * Added copyright header to file * Added optimization flag and cleaned up code
-rw-r--r--.gitignore5
-rw-r--r--tensorflow/contrib/lite/Makefile147
-rwxr-xr-xtensorflow/contrib/lite/build_ios_universal_lib.sh16
-rwxr-xr-xtensorflow/contrib/lite/download_dependencies.sh88
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/AppDelegate.h21
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/AppDelegate.mm44
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/Podfile5
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/RunModel-Info.plist47
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.h24
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.mm219
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.xib46
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/data/grace_hopper.jpgbin0 -> 73746 bytes
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/ios_image_load.h25
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/ios_image_load.mm85
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/main.mm22
-rw-r--r--tensorflow/contrib/lite/examples/ios/simple/simple.xcodeproj/project.pbxproj359
-rw-r--r--tensorflow/contrib/lite/ios_makefile.inc47
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h4521
-rw-r--r--tensorflow/contrib/lite/tools/benchmark_model.cc91
-rw-r--r--tensorflow/contrib/lite/tools/mutable_op_resolver.h11
20 files changed, 5823 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index 9ae0d9c96f..d11a504bdc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,3 +22,8 @@ Pods
Podfile.lock
*.pbxproj
*.xcworkspacedata
+/tensorflow/contrib/lite/downloads/**
+/tensorflow/contrib/lite/gen/**
+/tensorflow/contrib/lite/examples/ios/simple/data/*.txt
+/tensorflow/contrib/lite/examples/ios/simple/data/*.tflite
+xcuserdata/** \ No newline at end of file
diff --git a/tensorflow/contrib/lite/Makefile b/tensorflow/contrib/lite/Makefile
new file mode 100644
index 0000000000..8c65a0cc34
--- /dev/null
+++ b/tensorflow/contrib/lite/Makefile
@@ -0,0 +1,147 @@
+
+# Find where we're running from, so we can store generated files here.
+ifeq ($(origin MAKEFILE_DIR), undefined)
+ MAKEFILE_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+endif
+
+# Try to figure out the host system
+HOST_OS :=
+ifeq ($(OS),Windows_NT)
+ HOST_OS = WINDOWS
+else
+ UNAME_S := $(shell uname -s)
+ ifeq ($(UNAME_S),Linux)
+ HOST_OS := LINUX
+ endif
+ ifeq ($(UNAME_S),Darwin)
+ HOST_OS := OSX
+ endif
+endif
+
+ARCH := $(shell if [[ $(shell uname -m) =~ i[345678]86 ]]; then echo x86_32; else echo $(shell uname -m); fi)
+
+# Where compiled objects are stored.
+OBJDIR := $(MAKEFILE_DIR)/gen/obj/
+BINDIR := $(MAKEFILE_DIR)/gen/bin/
+LIBDIR := $(MAKEFILE_DIR)/gen/lib/
+GENDIR := $(MAKEFILE_DIR)/gen/obj/
+
+# Settings for the host compiler.
+CXX := $(CC_PREFIX) gcc
+CXXFLAGS := --std=c++11
+CC := $(CC_PREFIX) gcc
+CFLAGS :=
+LDOPTS :=
+LDOPTS += -L/usr/local/lib
+ARFLAGS := -r
+
+INCLUDES := \
+-I. \
+-I$(MAKEFILE_DIR)/../../../ \
+-I$(MAKEFILE_DIR)/downloads/ \
+-I$(MAKEFILE_DIR)/downloads/eigen \
+-I$(MAKEFILE_DIR)/downloads/gemmlowp \
+-I$(MAKEFILE_DIR)/downloads/neon_2_sse \
+-I$(MAKEFILE_DIR)/downloads/farmhash/src \
+-I$(MAKEFILE_DIR)/downloads/flatbuffers/include \
+-I$(GENDIR)
+# This is at the end so any globally-installed frameworks like protobuf don't
+# override local versions in the source tree.
+INCLUDES += -I/usr/local/include
+
+LIBS := \
+-lstdc++ \
+-lpthread \
+-lm \
+-lz
+
+# If we're on Linux, also link in the dl library.
+ifeq ($(OS),LINUX)
+ LIBS += -ldl -lpthread
+endif
+
+include $(MAKEFILE_DIR)/ios_makefile.inc
+
+# This library is the main target for this makefile. It will contain a minimal
+# runtime that can be linked in to other programs.
+LIB_NAME := libtensorflow-lite.a
+LIB_PATH := $(LIBDIR)$(LIB_NAME)
+
+# A small example program that shows how to link against the library.
+BENCHMARK_PATH := $(BINDIR)benchmark_model
+
+BENCHMARK_SRCS := \
+tensorflow/contrib/lite/tools/benchmark_model.cc
+BENCHMARK_OBJS := $(addprefix $(OBJDIR), \
+$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_SRCS))))
+
+# What sources we want to compile, must be kept in sync with the main Bazel
+# build files.
+
+CORE_CC_ALL_SRCS := \
+$(wildcard tensorflow/contrib/lite/*.cc) \
+$(wildcard tensorflow/contrib/lite/kernels/*.cc) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/*.cc) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/optimized/*.cc) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/reference/*.cc) \
+$(wildcard tensorflow/contrib/lite/*.c) \
+$(wildcard tensorflow/contrib/lite/kernels/*.c) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/*.c) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/optimized/*.c) \
+$(wildcard tensorflow/contrib/lite/kernels/internal/reference/*.c) \
+$(wildcard tensorflow/contrib/lite/downloads/farmhash/src/farmhash.cc)
+# Remove any duplicates.
+CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS))
+CORE_CC_EXCLUDE_SRCS := \
+$(wildcard tensorflow/contrib/lite/*test.cc) \
+$(wildcard tensorflow/contrib/lite/*/*test.cc) \
+$(wildcard tensorflow/contrib/lite/*/*/*test.cc) \
+$(wildcard tensorflow/contrib/lite/*/*/*/*test.cc) \
+$(wildcard tensorflow/contrib/lite/kernels/test_util.cc) \
+$(BENCHMARK_SRCS)
+# Filter out all the excluded files.
+TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))
+# File names of the intermediate files target compilation generates.
+TF_LITE_CC_OBJS := $(addprefix $(OBJDIR), \
+$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(TF_LITE_CC_SRCS))))
+LIB_OBJS := $(TF_LITE_CC_OBJS)
+
+# For normal manually-created TensorFlow C++ source files.
+$(OBJDIR)%.o: %.cc
+ @mkdir -p $(dir $@)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
+
+# For normal manually-created TensorFlow C++ source files.
+$(OBJDIR)%.o: %.c
+ @mkdir -p $(dir $@)
+ $(CC) $(CCFLAGS) $(INCLUDES) -c $< -o $@
+
+# The target that's compiled if there's no command-line arguments.
+all: $(LIB_PATH) $(BENCHMARK_PATH)
+
+# Gathers together all the objects we've compiled into a single '.a' archive.
+$(LIB_PATH): $(LIB_OBJS)
+ @mkdir -p $(dir $@)
+ $(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)
+
+$(BENCHMARK_PATH): $(BENCHMARK_OBJS) $(LIB_PATH)
+ @mkdir -p $(dir $@)
+ $(CXX) $(CXXFLAGS) $(INCLUDES) \
+ -o $(BENCHMARK_PATH) $(BENCHMARK_OBJS) \
+ $(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
+
+# Gets rid of all generated files.
+clean:
+ rm -rf $(MAKEFILE_DIR)/gen
+
+# Gets rid of target files only, leaving the host alone. Also leaves the lib
+# directory untouched deliberately, so we can persist multiple architectures
+# across builds for iOS and Android.
+cleantarget:
+ rm -rf $(OBJDIR)
+ rm -rf $(BINDIR)
+
+$(DEPDIR)/%.d: ;
+.PRECIOUS: $(DEPDIR)/%.d
+
+-include $(patsubst %,$(DEPDIR)/%.d,$(basename $(TF_CC_SRCS)))
diff --git a/tensorflow/contrib/lite/build_ios_universal_lib.sh b/tensorflow/contrib/lite/build_ios_universal_lib.sh
new file mode 100755
index 0000000000..e0f2ef768b
--- /dev/null
+++ b/tensorflow/contrib/lite/build_ios_universal_lib.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -x
+set -e
+make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=x86_64 -j 8
+make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=i386 -j 8
+make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=armv7 -j 8
+make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=armv7s -j 8
+make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=arm64 -j 8
+
+lipo \
+tensorflow/contrib/lite/gen/lib/ios_x86_64/libtensorflow-lite.a \
+tensorflow/contrib/lite/gen/lib/ios_i386/libtensorflow-lite.a \
+tensorflow/contrib/lite/gen/lib/ios_armv7/libtensorflow-lite.a \
+tensorflow/contrib/lite/gen/lib/ios_armv7s/libtensorflow-lite.a \
+tensorflow/contrib/lite/gen/lib/ios_arm64/libtensorflow-lite.a \
+-create \
+-output tensorflow/contrib/lite/gen/lib/libtensorflow-lite.a
diff --git a/tensorflow/contrib/lite/download_dependencies.sh b/tensorflow/contrib/lite/download_dependencies.sh
new file mode 100755
index 0000000000..0d9842fefa
--- /dev/null
+++ b/tensorflow/contrib/lite/download_dependencies.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+set -e
+
+DOWNLOADS_DIR=tensorflow/contrib/lite/downloads
+BZL_FILE_PATH=tensorflow/workspace.bzl
+
+EIGEN_URL="$(grep -o 'http.*bitbucket.org/eigen/eigen/get/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v bazel-mirror | head -n1)"
+GEMMLOWP_URL="$(grep -o 'https://mirror.bazel.build/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)"
+GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz"
+ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)"
+NEON_2_SSE_URL="https://github.com/intel/ARM_NEON_2_x86_SSE/archive/master.zip"
+FARMHASH_URL="https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz"
+FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/master.zip"
+MODELS_URL="https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_1.0_224_ios_lite_float_2017_11_08.zip"
+
+# TODO(petewarden): Some new code in Eigen triggers a clang bug with iOS arm64,
+# so work around it by patching the source.
+replace_by_sed() {
+ local regex="${1}"
+ shift
+ # Detect the version of sed by the return value of "--version" flag. GNU-sed
+ # supports "--version" while BSD-sed doesn't.
+ if ! sed --version >/dev/null 2>&1; then
+ # BSD-sed.
+ sed -i '' -e "${regex}" "$@"
+ else
+ # GNU-sed.
+ sed -i -e "${regex}" "$@"
+ fi
+}
+
+download_and_extract() {
+ local usage="Usage: download_and_extract URL DIR"
+ local url="${1:?${usage}}"
+ local dir="${2:?${usage}}"
+ echo "downloading ${url}" >&2
+ mkdir -p "${dir}"
+ if [[ "${url}" == *gz ]]; then
+ curl -Ls "${url}" | tar -C "${dir}" --strip-components=1 -xz
+ elif [[ "${url}" == *zip ]]; then
+ tempdir=$(mktemp -d)
+ tempdir2=$(mktemp -d)
+ wget -P ${tempdir} ${url}
+ unzip ${tempdir}/* -d ${tempdir2}
+ # unzip has no strip components, so unzip to a temp dir, and move the files
+ # we want from the tempdir to destination.
+ cp -R ${tempdir2}/*/* ${dir}/
+ rm -rf ${tempdir2} ${tempdir}
+ fi
+
+ # Delete any potential BUILD files, which would interfere with Bazel builds.
+ find "${dir}" -type f -name '*BUILD' -delete
+}
+
+download_and_extract "${EIGEN_URL}" "${DOWNLOADS_DIR}/eigen"
+download_and_extract "${GEMMLOWP_URL}" "${DOWNLOADS_DIR}/gemmlowp"
+download_and_extract "${GOOGLETEST_URL}" "${DOWNLOADS_DIR}/googletest"
+download_and_extract "${ABSL_URL}" "${DOWNLOADS_DIR}/absl"
+download_and_extract "${NEON_2_SSE_URL}" "${DOWNLOADS_DIR}/neon_2_sse"
+download_and_extract "${FARMHASH_URL}" "${DOWNLOADS_DIR}/farmhash"
+download_and_extract "${FLATBUFFERS_URL}" "${DOWNLOADS_DIR}/flatbuffers"
+download_and_extract "${MODELS_URL}" "${DOWNLOADS_DIR}/models"
+
+replace_by_sed 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
+ "${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
+replace_by_sed 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
+ "${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
+replace_by_sed 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
+ "${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
+
+cp ${DOWNLOADS_DIR}/models/models/* tensorflow/contrib/lite/examples/ios/simple/data/
+
+echo "download_dependencies.sh completed successfully." >&2
diff --git a/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.h b/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.h
new file mode 100644
index 0000000000..75b1f1da38
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.h
@@ -0,0 +1,21 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#import <UIKit/UIKit.h>
+
+@interface AppDelegate : UIResponder <UIApplicationDelegate>
+
+@property (strong, nonatomic) UIWindow *window;
+
+@end
diff --git a/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.mm b/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.mm
new file mode 100644
index 0000000000..1e808eb976
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/AppDelegate.mm
@@ -0,0 +1,44 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#import "AppDelegate.h"
+
+#import "RunModelViewController.h"
+
+@implementation AppDelegate
+
+- (BOOL)application:(UIApplication *)application
+ didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
+
+ UITabBarController *bar = [[UITabBarController alloc] init];
+ [bar setViewControllers:
+ @[[[RunModelViewController alloc] init]]];
+ bar.selectedIndex = 0;
+ self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
+ self.window.rootViewController = bar;
+ [self.window makeKeyAndVisible];
+ return YES;
+}
+
+- (void)applicationWillResignActive:(UIApplication *)application {}
+
+- (void)applicationDidEnterBackground:(UIApplication *)application {}
+
+- (void)applicationWillEnterForeground:(UIApplication *)application {}
+
+- (void)applicationDidBecomeActive:(UIApplication *)application {}
+
+- (void)applicationWillTerminate:(UIApplication *)application {}
+
+@end
diff --git a/tensorflow/contrib/lite/examples/ios/simple/Podfile b/tensorflow/contrib/lite/examples/ios/simple/Podfile
new file mode 100644
index 0000000000..1740ad6457
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/Podfile
@@ -0,0 +1,5 @@
+platform :ios, '8.0'
+inhibit_all_warnings!
+
+target 'tf_simple_example'
+ pod 'TensorFlow-experimental'
diff --git a/tensorflow/contrib/lite/examples/ios/simple/RunModel-Info.plist b/tensorflow/contrib/lite/examples/ios/simple/RunModel-Info.plist
new file mode 100644
index 0000000000..1a3eaa8a2c
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/RunModel-Info.plist
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleDisplayName</key>
+ <string>tflite-simple-example</string>
+ <key>CFBundleExecutable</key>
+ <string>tf_simple_example</string>
+ <key>CFBundleIdentifier</key>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>ios-app</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>UILaunchStoryboardName</key>
+ <string>RunModelViewController</string>
+ <key>UIRequiredDeviceCapabilities</key>
+ <array>
+ <string>armv7</string>
+ </array>
+ <key>UISupportedInterfaceOrientations</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationLandscapeLeft</string>
+ <string>UIInterfaceOrientationLandscapeRight</string>
+ </array>
+ <key>UISupportedInterfaceOrientations~ipad</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationPortraitUpsideDown</string>
+ <string>UIInterfaceOrientationLandscapeLeft</string>
+ <string>UIInterfaceOrientationLandscapeRight</string>
+ </array>
+</dict>
+</plist>
diff --git a/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.h b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.h
new file mode 100644
index 0000000000..4e1a83ccf5
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.h
@@ -0,0 +1,24 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#import <UIKit/UIKit.h>
+
+@interface RunModelViewController : UIViewController
+
+- (IBAction)getUrl:(id)sender;
+
+@property (weak, nonatomic) IBOutlet UITextView *urlContentTextView;
+@property (weak, nonatomic) IBOutlet UITextField *urlTextField;
+
+@end
diff --git a/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.mm b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.mm
new file mode 100644
index 0000000000..965d830105
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.mm
@@ -0,0 +1,219 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#import "RunModelViewController.h"
+
+#include <fstream>
+#include <iostream>
+#include <pthread.h>
+#include <unistd.h>
+#include <queue>
+#include <sstream>
+#include <string>
+
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "tensorflow/contrib/lite/tools/mutable_op_resolver.h"
+
+#include "ios_image_load.h"
+
+#define LOG(x) std::cerr
+#define CHECK(x) if (!(x)) { LOG(ERROR) << #x << "failed"; exit(1); }
+
+NSString* RunInferenceOnImage();
+
+@interface RunModelViewController ()
+@end
+
+@implementation RunModelViewController {
+}
+
+- (IBAction)getUrl:(id)sender {
+ NSString* inference_result = RunInferenceOnImage();
+ self.urlContentTextView.text = inference_result;
+}
+
+@end
+
+// Returns the top N confidence values over threshold in the provided vector,
+// sorted by confidence in descending order.
+static void GetTopN(
+ const float* prediction,
+ const int prediction_size,
+ const int num_results, const float threshold,
+ std::vector<std::pair<float, int> >* top_results) {
+ // Will contain top N results in ascending order.
+ std::priority_queue<std::pair<float, int>,
+ std::vector<std::pair<float, int> >,
+ std::greater<std::pair<float, int> > > top_result_pq;
+
+ const long count = prediction_size;
+ for (int i = 0; i < count; ++i) {
+ const float value = prediction[i];
+
+ // Only add it if it beats the threshold and has a chance at being in
+ // the top N.
+ if (value < threshold) {
+ continue;
+ }
+
+ top_result_pq.push(std::pair<float, int>(value, i));
+
+ // If at capacity, kick the smallest value out.
+ if (top_result_pq.size() > num_results) {
+ top_result_pq.pop();
+ }
+ }
+
+ // Copy to output vector and reverse into descending order.
+ while (!top_result_pq.empty()) {
+ top_results->push_back(top_result_pq.top());
+ top_result_pq.pop();
+ }
+ std::reverse(top_results->begin(), top_results->end());
+}
+
+NSString* FilePathForResourceName(NSString* name, NSString* extension) {
+ NSString* file_path = [[NSBundle mainBundle] pathForResource:name ofType:extension];
+ if (file_path == NULL) {
+ LOG(FATAL) << "Couldn't find '" << [name UTF8String] << "."
+ << [extension UTF8String] << "' in bundle.";
+ }
+ return file_path;
+}
+
+NSString* RunInferenceOnImage() {
+ std::string graph;
+ const int num_threads = 1;
+ std::string input_layer_type = "float";
+ std::vector<int> sizes = {1, 224, 224, 3};
+
+ NSString* graph_path = FilePathForResourceName(@"mobilenet_v1_1.0_224", @"tflite");
+
+ std::unique_ptr<tflite::FlatBufferModel> model(tflite::FlatBufferModel::BuildFromFile([graph_path UTF8String]));
+ if (!model) {
+ LOG(FATAL) << "Failed to mmap model " << graph;
+ }
+ LOG(INFO) << "Loaded model " << graph;
+ model->error_reporter();
+ LOG(INFO) << "resolved reporter";
+
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+ tflite::MutableOpResolver resolver;
+ RegisterSelectedOps(&resolver);
+#else
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+#endif
+
+ std::unique_ptr<tflite::Interpreter> interpreter;
+ tflite::InterpreterBuilder(*model, resolver)(&interpreter);
+ if (!interpreter) {
+ LOG(FATAL) << "Failed to construct interpreter";
+ }
+
+ if (num_threads != -1) {
+ interpreter->SetNumThreads(num_threads);
+ }
+
+ int input = interpreter->inputs()[0];
+
+ if (input_layer_type != "string") {
+ interpreter->ResizeInputTensor(input, sizes);
+ }
+
+ if (interpreter->AllocateTensors() != kTfLiteOk) {
+ LOG(FATAL) << "Failed to allocate tensors!";
+ }
+
+ // Read the label list
+ NSString* labels_path = FilePathForResourceName(@"labels", @"txt");
+ std::vector<std::string> label_strings;
+ std::ifstream t;
+ t.open([labels_path UTF8String]);
+ std::string line;
+ while(t){
+ std::getline(t, line);
+ label_strings.push_back(line);
+ }
+ t.close();
+
+ // Read the Grace Hopper image.
+ NSString* image_path = FilePathForResourceName(@"grace_hopper", @"jpg");
+ int image_width;
+ int image_height;
+ int image_channels;
+ std::vector<uint8_t> image_data = LoadImageFromFile([image_path UTF8String], &image_width, &image_height, &image_channels);
+ const int wanted_width = 224;
+ const int wanted_height = 224;
+ const int wanted_channels = 3;
+ const float input_mean = 127.5f;
+ const float input_std = 127.5f;
+ assert(image_channels >= wanted_channels);
+ uint8_t* in = image_data.data();
+ float* out = interpreter->typed_tensor<float>(input);
+ for (int y = 0; y < wanted_height; ++y) {
+ const int in_y = (y * image_height) / wanted_height;
+ uint8_t* in_row = in + (in_y * image_width * image_channels);
+ float* out_row = out + (y * wanted_width * wanted_channels);
+ for (int x = 0; x < wanted_width; ++x) {
+ const int in_x = (x * image_width) / wanted_width;
+ uint8_t* in_pixel = in_row + (in_x * image_channels);
+ float* out_pixel = out_row + (x * wanted_channels);
+ for (int c = 0; c < wanted_channels; ++c) {
+ out_pixel[c] = (in_pixel[c] - input_mean) / input_std;
+ }
+ }
+ }
+
+ if (interpreter->Invoke() != kTfLiteOk) {
+ LOG(FATAL) << "Failed to invoke!";
+ }
+
+ float* output = interpreter->typed_output_tensor<float>(0);
+ const int output_size = 1000;
+ const int kNumResults = 5;
+ const float kThreshold = 0.1f;
+ std::vector<std::pair<float, int> > top_results;
+ GetTopN(output, output_size, kNumResults, kThreshold, &top_results);
+
+ std::stringstream ss;
+ ss.precision(3);
+ for (const auto& result : top_results) {
+ const float confidence = result.first;
+ const int index = result.second;
+
+ ss << index << " " << confidence << " ";
+
+ // Write out the result as a string
+ if (index < label_strings.size()) {
+ // just for safety: theoretically, the output is under 1000 unless there
+ // is some numerical issues leading to a wrong prediction.
+ ss << label_strings[index];
+ } else {
+ ss << "Prediction: " << index;
+ }
+
+ ss << "\n";
+ }
+
+ LOG(INFO) << "Predictions: " << ss.str();
+
+ std::string predictions = ss.str();
+ NSString* result = @"";
+ result = [NSString stringWithFormat: @"%@ - %s", result,
+ predictions.c_str()];
+
+ return result;
+}
diff --git a/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.xib b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.xib
new file mode 100644
index 0000000000..93f334b985
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/RunModelViewController.xib
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="9531" systemVersion="15D21" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="9529"/>
+ </dependencies>
+ <objects>
+ <placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner" customClass="RunModelViewController">
+ <connections>
+ <outlet property="urlContentTextView" destination="quY-AK-ZCn" id="YjW-BO-1Ta"/>
+ <outlet property="urlTextField" destination="hPw-q5-vh5" id="wmc-b6-2CV"/>
+ <outlet property="view" destination="1" id="iHm-Rr-4wj"/>
+ </connections>
+ </placeholder>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
+ <view contentMode="scaleToFill" id="1">
+ <rect key="frame" x="0.0" y="0.0" width="320" height="568"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <subviews>
+ <textView clipsSubviews="YES" contentMode="scaleToFill" fixedFrame="YES" editable="NO" text="The results of running the model will appear here." selectable="NO" translatesAutoresizingMaskIntoConstraints="NO" id="quY-AK-ZCn">
+ <rect key="frame" x="40" y="99" width="240" height="168"/>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
+ <fontDescription key="fontDescription" type="system" pointSize="14"/>
+ <textInputTraits key="textInputTraits" autocapitalizationType="sentences"/>
+ </textView>
+ <button opaque="NO" contentMode="scaleToFill" fixedFrame="YES" contentHorizontalAlignment="center" contentVerticalAlignment="center" buttonType="roundedRect" lineBreakMode="middleTruncation" translatesAutoresizingMaskIntoConstraints="NO" id="AAC-Bk-PCC">
+ <rect key="frame" x="76" y="37" width="168" height="30"/>
+ <color key="backgroundColor" white="0.33333333333333331" alpha="1" colorSpace="calibratedWhite"/>
+ <state key="normal" title="Run Model">
+ <color key="titleShadowColor" white="0.5" alpha="1" colorSpace="calibratedWhite"/>
+ </state>
+ <connections>
+ <action selector="getUrl:" destination="-1" eventType="touchUpInside" id="mdP-nK-k9T"/>
+ </connections>
+ </button>
+ </subviews>
+ <color key="backgroundColor" red="0.78314738357315861" green="0.79869981749999996" blue="0.56305065858222869" alpha="1" colorSpace="calibratedRGB"/>
+ </view>
+ <textField opaque="NO" clipsSubviews="YES" contentMode="scaleToFill" contentHorizontalAlignment="left" contentVerticalAlignment="center" text="http://localhost:8080" borderStyle="roundedRect" placeholder="Enter URL" minimumFontSize="17" id="hPw-q5-vh5">
+ <rect key="frame" x="0.0" y="0.0" width="280" height="30"/>
+ <autoresizingMask key="autoresizingMask" flexibleMaxX="YES" flexibleMaxY="YES"/>
+ <fontDescription key="fontDescription" type="system" pointSize="14"/>
+ <textInputTraits key="textInputTraits"/>
+ <point key="canvasLocation" x="795" y="44"/>
+ </textField>
+ </objects>
+</document>
diff --git a/tensorflow/contrib/lite/examples/ios/simple/data/grace_hopper.jpg b/tensorflow/contrib/lite/examples/ios/simple/data/grace_hopper.jpg
new file mode 100644
index 0000000000..d2a427810f
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/data/grace_hopper.jpg
Binary files differ
diff --git a/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.h b/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.h
new file mode 100644
index 0000000000..7287d0d63d
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.h
@@ -0,0 +1,25 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TENSORFLOW_EXAMPLES_IOS_IOS_IMAGE_LOAD_H_
+#define TENSORFLOW_EXAMPLES_IOS_IOS_IMAGE_LOAD_H_
+
+#include <vector>
+
+std::vector<uint8_t> LoadImageFromFile(const char* file_name,
+ int* out_width,
+ int* out_height,
+ int* out_channels);
+
+#endif // TENSORFLOW_EXAMPLES_IOS_IOS_IMAGE_LOAD_H_
diff --git a/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.mm b/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.mm
new file mode 100644
index 0000000000..789522d2a9
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/ios_image_load.mm
@@ -0,0 +1,85 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "ios_image_load.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+
+#import <CoreImage/CoreImage.h>
+#import <ImageIO/ImageIO.h>
+
+std::vector<uint8_t> LoadImageFromFile(const char* file_name,
+ int* out_width, int* out_height,
+ int* out_channels) {
+ FILE* file_handle = fopen(file_name, "rb");
+ fseek(file_handle, 0, SEEK_END);
+ const size_t bytes_in_file = ftell(file_handle);
+ fseek(file_handle, 0, SEEK_SET);
+ std::vector<uint8_t> file_data(bytes_in_file);
+ fread(file_data.data(), 1, bytes_in_file, file_handle);
+ fclose(file_handle);
+ CFDataRef file_data_ref = CFDataCreateWithBytesNoCopy(NULL, file_data.data(),
+ bytes_in_file,
+ kCFAllocatorNull);
+ CGDataProviderRef image_provider =
+ CGDataProviderCreateWithCFData(file_data_ref);
+
+ const char* suffix = strrchr(file_name, '.');
+ if (!suffix || suffix == file_name) {
+ suffix = "";
+ }
+ CGImageRef image;
+ if (strcasecmp(suffix, ".png") == 0) {
+ image = CGImageCreateWithPNGDataProvider(image_provider, NULL, true,
+ kCGRenderingIntentDefault);
+ } else if ((strcasecmp(suffix, ".jpg") == 0) ||
+ (strcasecmp(suffix, ".jpeg") == 0)) {
+ image = CGImageCreateWithJPEGDataProvider(image_provider, NULL, true,
+ kCGRenderingIntentDefault);
+ } else {
+ CFRelease(image_provider);
+ CFRelease(file_data_ref);
+ fprintf(stderr, "Unknown suffix for file '%s'\n", file_name);
+ *out_width = 0;
+ *out_height = 0;
+ *out_channels = 0;
+ return std::vector<uint8_t>();
+ }
+
+ const int width = (int)CGImageGetWidth(image);
+ const int height = (int)CGImageGetHeight(image);
+ const int channels = 4;
+ CGColorSpaceRef color_space = CGColorSpaceCreateDeviceRGB();
+ const int bytes_per_row = (width * channels);
+ const int bytes_in_image = (bytes_per_row * height);
+ std::vector<uint8_t> result(bytes_in_image);
+ const int bits_per_component = 8;
+ CGContextRef context = CGBitmapContextCreate(result.data(), width, height,
+ bits_per_component, bytes_per_row, color_space,
+ kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
+ CGColorSpaceRelease(color_space);
+ CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
+ CGContextRelease(context);
+ CFRelease(image);
+ CFRelease(image_provider);
+ CFRelease(file_data_ref);
+
+ *out_width = width;
+ *out_height = height;
+ *out_channels = channels;
+ return result;
+}
diff --git a/tensorflow/contrib/lite/examples/ios/simple/main.mm b/tensorflow/contrib/lite/examples/ios/simple/main.mm
new file mode 100644
index 0000000000..d70550a730
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/main.mm
@@ -0,0 +1,22 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#import <UIKit/UIKit.h>
+
+int main(int argc, char * argv[]) {
+ @autoreleasepool {
+ NSString *delegateClassName = @"AppDelegate";
+ return UIApplicationMain(argc, argv, nil, delegateClassName);
+ }
+}
diff --git a/tensorflow/contrib/lite/examples/ios/simple/simple.xcodeproj/project.pbxproj b/tensorflow/contrib/lite/examples/ios/simple/simple.xcodeproj/project.pbxproj
new file mode 100644
index 0000000000..9277c230b8
--- /dev/null
+++ b/tensorflow/contrib/lite/examples/ios/simple/simple.xcodeproj/project.pbxproj
@@ -0,0 +1,359 @@
+// !$*UTF8*$!
+{
+ archiveVersion = 1;
+ classes = {
+ };
+ objectVersion = 46;
+ objects = {
+
+/* Begin PBXBuildFile section */
+ 1C0D734B1ECCC460008C1DAB /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1C0D734A1ECCC460008C1DAB /* CoreGraphics.framework */; };
+ 1CA45FFF1ECCC356002FA6A4 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1CA45FFE1ECCC356002FA6A4 /* UIKit.framework */; };
+ 594C14AE1FB8F9B500EE8BFE /* libtensorflow-lite.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 594C14AD1FB8F9B500EE8BFE /* libtensorflow-lite.a */; };
+ 594C14B11FB9037100EE8BFE /* labels.txt in Resources */ = {isa = PBXBuildFile; fileRef = 594C14AF1FB9037100EE8BFE /* labels.txt */; };
+ 594C14B21FB9037100EE8BFE /* mobilenet_v1_1.0_224.tflite in Resources */ = {isa = PBXBuildFile; fileRef = 594C14B01FB9037100EE8BFE /* mobilenet_v1_1.0_224.tflite */; };
+ 59A3D0011CF4E68100C4259F /* AppDelegate.mm in Sources */ = {isa = PBXBuildFile; fileRef = 59A3CFF21CF4E68100C4259F /* AppDelegate.mm */; };
+ 59A3D0031CF4E68100C4259F /* grace_hopper.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 59A3CFF51CF4E68100C4259F /* grace_hopper.jpg */; };
+ 59A3D0081CF4E68100C4259F /* ios_image_load.mm in Sources */ = {isa = PBXBuildFile; fileRef = 59A3CFFB1CF4E68100C4259F /* ios_image_load.mm */; };
+ 59A3D0091CF4E68100C4259F /* main.mm in Sources */ = {isa = PBXBuildFile; fileRef = 59A3CFFC1CF4E68100C4259F /* main.mm */; };
+ 59A3D00B1CF4E68100C4259F /* RunModelViewController.mm in Sources */ = {isa = PBXBuildFile; fileRef = 59A3CFFF1CF4E68100C4259F /* RunModelViewController.mm */; };
+ 59A3D00C1CF4E68100C4259F /* RunModelViewController.xib in Resources */ = {isa = PBXBuildFile; fileRef = 59A3D0001CF4E68100C4259F /* RunModelViewController.xib */; };
+/* End PBXBuildFile section */
+
+/* Begin PBXFileReference section */
+ 1C0D73481ECCC41B008C1DAB /* CoreImage.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreImage.framework; path = System/Library/Frameworks/CoreImage.framework; sourceTree = SDKROOT; };
+ 1C0D734A1ECCC460008C1DAB /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; };
+ 1CA45FFE1ECCC356002FA6A4 /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; };
+ 5911579B1CF4011C00C31E3A /* tf_simple_example.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = tf_simple_example.app; sourceTree = BUILT_PRODUCTS_DIR; };
+ 594C14AD1FB8F9B500EE8BFE /* libtensorflow-lite.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = "libtensorflow-lite.a"; path = "../../../gen/lib/libtensorflow-lite.a"; sourceTree = "<group>"; };
+ 594C14AF1FB9037100EE8BFE /* labels.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = labels.txt; sourceTree = "<group>"; };
+ 594C14B01FB9037100EE8BFE /* mobilenet_v1_1.0_224.tflite */ = {isa = PBXFileReference; lastKnownFileType = file; path = mobilenet_v1_1.0_224.tflite; sourceTree = "<group>"; };
+ 59A3CFF11CF4E68100C4259F /* AppDelegate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = "<group>"; };
+ 59A3CFF21CF4E68100C4259F /* AppDelegate.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AppDelegate.mm; sourceTree = "<group>"; };
+ 59A3CFF51CF4E68100C4259F /* grace_hopper.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = grace_hopper.jpg; sourceTree = "<group>"; };
+ 59A3CFFA1CF4E68100C4259F /* ios_image_load.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ios_image_load.h; sourceTree = "<group>"; };
+ 59A3CFFB1CF4E68100C4259F /* ios_image_load.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = ios_image_load.mm; sourceTree = "<group>"; };
+ 59A3CFFC1CF4E68100C4259F /* main.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = main.mm; sourceTree = "<group>"; };
+ 59A3CFFD1CF4E68100C4259F /* RunModel-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "RunModel-Info.plist"; sourceTree = "<group>"; };
+ 59A3CFFE1CF4E68100C4259F /* RunModelViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RunModelViewController.h; sourceTree = "<group>"; };
+ 59A3CFFF1CF4E68100C4259F /* RunModelViewController.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = RunModelViewController.mm; sourceTree = "<group>"; };
+ 59A3D0001CF4E68100C4259F /* RunModelViewController.xib */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.xib; path = RunModelViewController.xib; sourceTree = "<group>"; };
+ 73DBC33C5DD9A526EE6D1EF2 /* libPods-tf_simple_example.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-tf_simple_example.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+/* End PBXFileReference section */
+
+/* Begin PBXFrameworksBuildPhase section */
+ 591157981CF4011C00C31E3A /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 594C14AE1FB8F9B500EE8BFE /* libtensorflow-lite.a in Frameworks */,
+ 1C0D734B1ECCC460008C1DAB /* CoreGraphics.framework in Frameworks */,
+ 1CA45FFF1ECCC356002FA6A4 /* UIKit.framework in Frameworks */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXFrameworksBuildPhase section */
+
+/* Begin PBXGroup section */
+ 24D7686C331131624F4454A0 /* Frameworks */ = {
+ isa = PBXGroup;
+ children = (
+ 594C14AD1FB8F9B500EE8BFE /* libtensorflow-lite.a */,
+ 1C0D734A1ECCC460008C1DAB /* CoreGraphics.framework */,
+ 1C0D73481ECCC41B008C1DAB /* CoreImage.framework */,
+ 1CA45FFE1ECCC356002FA6A4 /* UIKit.framework */,
+ 73DBC33C5DD9A526EE6D1EF2 /* libPods-tf_simple_example.a */,
+ );
+ name = Frameworks;
+ sourceTree = "<group>";
+ };
+ 591157921CF4011C00C31E3A = {
+ isa = PBXGroup;
+ children = (
+ 59A3CFF11CF4E68100C4259F /* AppDelegate.h */,
+ 59A3CFF21CF4E68100C4259F /* AppDelegate.mm */,
+ 59A3CFF31CF4E68100C4259F /* data */,
+ 59A3CFFA1CF4E68100C4259F /* ios_image_load.h */,
+ 59A3CFFB1CF4E68100C4259F /* ios_image_load.mm */,
+ 59A3CFFC1CF4E68100C4259F /* main.mm */,
+ 59A3CFFD1CF4E68100C4259F /* RunModel-Info.plist */,
+ 59A3CFFE1CF4E68100C4259F /* RunModelViewController.h */,
+ 59A3CFFF1CF4E68100C4259F /* RunModelViewController.mm */,
+ 59A3D0001CF4E68100C4259F /* RunModelViewController.xib */,
+ 5911579C1CF4011C00C31E3A /* Products */,
+ 24D7686C331131624F4454A0 /* Frameworks */,
+ );
+ sourceTree = "<group>";
+ };
+ 5911579C1CF4011C00C31E3A /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ 5911579B1CF4011C00C31E3A /* tf_simple_example.app */,
+ );
+ name = Products;
+ sourceTree = "<group>";
+ };
+ 59A3CFF31CF4E68100C4259F /* data */ = {
+ isa = PBXGroup;
+ children = (
+ 59A3CFF51CF4E68100C4259F /* grace_hopper.jpg */,
+ 594C14AF1FB9037100EE8BFE /* labels.txt */,
+ 594C14B01FB9037100EE8BFE /* mobilenet_v1_1.0_224.tflite */,
+ );
+ path = data;
+ sourceTree = "<group>";
+ };
+/* End PBXGroup section */
+
+/* Begin PBXNativeTarget section */
+ 5911579A1CF4011C00C31E3A /* tf_simple_example */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 591157B21CF4011D00C31E3A /* Build configuration list for PBXNativeTarget "tf_simple_example" */;
+ buildPhases = (
+ 591157971CF4011C00C31E3A /* Sources */,
+ 591157981CF4011C00C31E3A /* Frameworks */,
+ 591157991CF4011C00C31E3A /* Resources */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = tf_simple_example;
+ productName = tf_ios_makefile_example;
+ productReference = 5911579B1CF4011C00C31E3A /* tf_simple_example.app */;
+ productType = "com.apple.product-type.application";
+ };
+/* End PBXNativeTarget section */
+
+/* Begin PBXProject section */
+ 591157931CF4011C00C31E3A /* Project object */ = {
+ isa = PBXProject;
+ attributes = {
+ LastUpgradeCheck = 0830;
+ ORGANIZATIONNAME = Google;
+ TargetAttributes = {
+ 5911579A1CF4011C00C31E3A = {
+ CreatedOnToolsVersion = 7.2;
+ DevelopmentTeam = EQHXZ8M8AV;
+ ProvisioningStyle = Manual;
+ };
+ };
+ };
+ buildConfigurationList = 591157961CF4011C00C31E3A /* Build configuration list for PBXProject "simple" */;
+ compatibilityVersion = "Xcode 3.2";
+ developmentRegion = English;
+ hasScannedForEncodings = 0;
+ knownRegions = (
+ en,
+ Base,
+ );
+ mainGroup = 591157921CF4011C00C31E3A;
+ productRefGroup = 5911579C1CF4011C00C31E3A /* Products */;
+ projectDirPath = "";
+ projectRoot = "";
+ targets = (
+ 5911579A1CF4011C00C31E3A /* tf_simple_example */,
+ );
+ };
+/* End PBXProject section */
+
+/* Begin PBXResourcesBuildPhase section */
+ 591157991CF4011C00C31E3A /* Resources */ = {
+ isa = PBXResourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 59A3D00C1CF4E68100C4259F /* RunModelViewController.xib in Resources */,
+ 594C14B11FB9037100EE8BFE /* labels.txt in Resources */,
+ 59A3D0031CF4E68100C4259F /* grace_hopper.jpg in Resources */,
+ 594C14B21FB9037100EE8BFE /* mobilenet_v1_1.0_224.tflite in Resources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXResourcesBuildPhase section */
+
+/* Begin PBXSourcesBuildPhase section */
+ 591157971CF4011C00C31E3A /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 59A3D0091CF4E68100C4259F /* main.mm in Sources */,
+ 59A3D0011CF4E68100C4259F /* AppDelegate.mm in Sources */,
+ 59A3D00B1CF4E68100C4259F /* RunModelViewController.mm in Sources */,
+ 59A3D0081CF4E68100C4259F /* ios_image_load.mm in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXSourcesBuildPhase section */
+
+/* Begin XCBuildConfiguration section */
+ 591157B01CF4011D00C31E3A /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = dwarf;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ ENABLE_TESTABILITY = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_DYNAMIC_NO_PIC = NO;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_OPTIMIZATION_LEVEL = 0;
+ GCC_PREPROCESSOR_DEFINITIONS = (
+ "DEBUG=1",
+ "$(inherited)",
+ );
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ IPHONEOS_DEPLOYMENT_TARGET = 8.0;
+ MTL_ENABLE_DEBUG_INFO = YES;
+ ONLY_ACTIVE_ARCH = YES;
+ SDKROOT = iphoneos;
+ TARGETED_DEVICE_FAMILY = "1,2";
+ };
+ name = Debug;
+ };
+ 591157B11CF4011D00C31E3A /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+ ENABLE_NS_ASSERTIONS = NO;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ IPHONEOS_DEPLOYMENT_TARGET = 8.0;
+ MTL_ENABLE_DEBUG_INFO = NO;
+ SDKROOT = iphoneos;
+ TARGETED_DEVICE_FAMILY = "1,2";
+ VALIDATE_PRODUCT = YES;
+ };
+ name = Release;
+ };
+ 591157B31CF4011D00C31E3A /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ CLANG_DEBUG_INFORMATION_LEVEL = default;
+ CODE_SIGN_IDENTITY = "iPhone Developer";
+ DEVELOPMENT_TEAM = EQHXZ8M8AV;
+ ENABLE_BITCODE = NO;
+ GCC_ENABLE_CPP_EXCEPTIONS = YES;
+ GCC_ENABLE_CPP_RTTI = YES;
+ HEADER_SEARCH_PATHS = (
+ "$(inherited)",
+ ../../../../../../,
+ ../../../downloads/flatbuffers/include/,
+ ../../../downloads/eigen/,
+ ../../../downloads/,
+ );
+ INFOPLIST_FILE = "$(SRCROOT)/RunModel-Info.plist";
+ IPHONEOS_DEPLOYMENT_TARGET = 9.2;
+ LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
+ LIBRARY_SEARCH_PATHS = ../../../gen/lib/;
+ OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+ OTHER_LDFLAGS = "$(inherited)";
+ PRODUCT_BUNDLE_IDENTIFIER = "com.google.tflite-simple-example";
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ PROVISIONING_PROFILE = "1072bd47-ff19-4e5f-8107-d912748f83f1";
+ PROVISIONING_PROFILE_SPECIFIER = "Google Development";
+ SEPARATE_STRIP = NO;
+ };
+ name = Debug;
+ };
+ 591157B41CF4011D00C31E3A /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ CLANG_DEBUG_INFORMATION_LEVEL = default;
+ CODE_SIGN_IDENTITY = "iPhone Developer";
+ DEVELOPMENT_TEAM = "";
+ ENABLE_BITCODE = NO;
+ GCC_ENABLE_CPP_EXCEPTIONS = YES;
+ GCC_ENABLE_CPP_RTTI = YES;
+ HEADER_SEARCH_PATHS = (
+ "$(inherited)",
+ ../../../../../../,
+ ../../../downloads/flatbuffers/include/,
+ ../../../downloads/eigen/,
+ ../../../downloads/,
+ );
+ INFOPLIST_FILE = "$(SRCROOT)/RunModel-Info.plist";
+ IPHONEOS_DEPLOYMENT_TARGET = 9.2;
+ LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
+ LIBRARY_SEARCH_PATHS = ../../../gen/lib/;
+ ONLY_ACTIVE_ARCH = YES;
+ OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)";
+ OTHER_LDFLAGS = "$(inherited)";
+ PRODUCT_BUNDLE_IDENTIFIER = "com.google.tflite-simple-example";
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ PROVISIONING_PROFILE_SPECIFIER = "";
+ SEPARATE_STRIP = NO;
+ };
+ name = Release;
+ };
+/* End XCBuildConfiguration section */
+
+/* Begin XCConfigurationList section */
+ 591157961CF4011C00C31E3A /* Build configuration list for PBXProject "simple" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 591157B01CF4011D00C31E3A /* Debug */,
+ 591157B11CF4011D00C31E3A /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ 591157B21CF4011D00C31E3A /* Build configuration list for PBXNativeTarget "tf_simple_example" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 591157B31CF4011D00C31E3A /* Debug */,
+ 591157B41CF4011D00C31E3A /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+/* End XCConfigurationList section */
+ };
+ rootObject = 591157931CF4011C00C31E3A /* Project object */;
+}
diff --git a/tensorflow/contrib/lite/ios_makefile.inc b/tensorflow/contrib/lite/ios_makefile.inc
new file mode 100644
index 0000000000..bcff7ed988
--- /dev/null
+++ b/tensorflow/contrib/lite/ios_makefile.inc
@@ -0,0 +1,47 @@
+# Settings for iOS.
+ifeq ($(TARGET), IOS)
+ BUILD_FOR_IOS_SIMULATOR := false
+ ifeq ($(IOS_ARCH), x86_64)
+ BUILD_FOR_IOS_SIMULATOR := true
+ endif
+ ifeq ($(IOS_ARCH), i386)
+ BUILD_FOR_IOS_SIMULATOR := true
+ endif
+ ifeq ($(BUILD_FOR_IOS_SIMULATOR), true)
+ IPHONEOS_PLATFORM := $(shell xcrun --sdk iphonesimulator \
+ --show-sdk-platform-path)
+ IPHONEOS_SYSROOT := $(shell xcrun --sdk iphonesimulator \
+ --show-sdk-path)
+ else
+ IPHONEOS_PLATFORM := $(shell xcrun --sdk iphoneos --show-sdk-platform-path)
+ IPHONEOS_SYSROOT := $(shell xcrun --sdk iphoneos --show-sdk-path)
+ endif
+ IOS_SDK_VERSION := $(shell xcrun --sdk iphoneos --show-sdk-version)
+ MIN_SDK_VERSION := 9.0
+ # Override IOS_ARCH with armv7, armv7s, arm64, i386, or x86_64.
+ IOS_ARCH := x86_64
+ CXXFLAGS += -miphoneos-version-min=$(MIN_SDK_VERSION) \
+ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
+ -fembed-bitcode \
+ -Wno-c++11-narrowing \
+ -mno-thumb \
+ -fno-exceptions \
+ -isysroot \
+ ${IPHONEOS_SYSROOT} \
+ -arch $(IOS_ARCH) \
+ -O3
+ CCFLAGS += -miphoneos-version-min=$(MIN_SDK_VERSION) \
+ -fembed-bitcode \
+ -mno-thumb \
+ -isysroot \
+ ${IPHONEOS_SYSROOT} \
+ -arch $(IOS_ARCH) \
+ -O3
+ LDFLAGS := -fembed-bitcode \
+ -miphoneos-version-min=${MIN_SDK_VERSION} \
+ -arch $(IOS_ARCH)
+ OBJDIR := $(OBJDIR)ios_$(IOS_ARCH)/
+ LIBDIR := $(LIBDIR)ios_$(IOS_ARCH)/
+ BINDIR := $(BINDIR)ios_$(IOS_ARCH)/
+ DEPDIR := $(DEPDIR)ios_$(IOS_ARCH)/
+endif
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
new file mode 100755
index 0000000000..df460ab9a3
--- /dev/null
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -0,0 +1,4521 @@
+// automatically generated by the FlatBuffers compiler, do not modify
+
+
+#ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_
+#define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+namespace tflite {
+
+struct QuantizationParameters;
+struct QuantizationParametersT;
+
+struct Tensor;
+struct TensorT;
+
+struct Conv2DOptions;
+struct Conv2DOptionsT;
+
+struct Pool2DOptions;
+struct Pool2DOptionsT;
+
+struct DepthwiseConv2DOptions;
+struct DepthwiseConv2DOptionsT;
+
+struct ConcatEmbeddingsOptions;
+struct ConcatEmbeddingsOptionsT;
+
+struct LSHProjectionOptions;
+struct LSHProjectionOptionsT;
+
+struct SVDFOptions;
+struct SVDFOptionsT;
+
+struct RNNOptions;
+struct RNNOptionsT;
+
+struct FullyConnectedOptions;
+struct FullyConnectedOptionsT;
+
+struct SoftmaxOptions;
+struct SoftmaxOptionsT;
+
+struct ConcatenationOptions;
+struct ConcatenationOptionsT;
+
+struct AddOptions;
+struct AddOptionsT;
+
+struct MulOptions;
+struct MulOptionsT;
+
+struct L2NormOptions;
+struct L2NormOptionsT;
+
+struct LocalResponseNormalizationOptions;
+struct LocalResponseNormalizationOptionsT;
+
+struct LSTMOptions;
+struct LSTMOptionsT;
+
+struct ResizeBilinearOptions;
+struct ResizeBilinearOptionsT;
+
+struct CallOptions;
+struct CallOptionsT;
+
+struct ReshapeOptions;
+struct ReshapeOptionsT;
+
+struct SkipGramOptions;
+struct SkipGramOptionsT;
+
+struct SpaceToDepthOptions;
+struct SpaceToDepthOptionsT;
+
+struct EmbeddingLookupSparseOptions;
+struct EmbeddingLookupSparseOptionsT;
+
+struct OperatorCode;
+struct OperatorCodeT;
+
+struct Operator;
+struct OperatorT;
+
+struct SubGraph;
+struct SubGraphT;
+
+struct Buffer;
+struct BufferT;
+
+struct Model;
+struct ModelT;
+
+enum TensorType {
+ TensorType_FLOAT32 = 0,
+ TensorType_FLOAT16 = 1,
+ TensorType_INT32 = 2,
+ TensorType_UINT8 = 3,
+ TensorType_INT64 = 4,
+ TensorType_STRING = 5,
+ TensorType_MIN = TensorType_FLOAT32,
+ TensorType_MAX = TensorType_STRING
+};
+
+inline TensorType (&EnumValuesTensorType())[6] {
+ static TensorType values[] = {
+ TensorType_FLOAT32,
+ TensorType_FLOAT16,
+ TensorType_INT32,
+ TensorType_UINT8,
+ TensorType_INT64,
+ TensorType_STRING
+ };
+ return values;
+}
+
+inline const char **EnumNamesTensorType() {
+ static const char *names[] = {
+ "FLOAT32",
+ "FLOAT16",
+ "INT32",
+ "UINT8",
+ "INT64",
+ "STRING",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameTensorType(TensorType e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesTensorType()[index];
+}
+
+enum BuiltinOperator {
+ BuiltinOperator_ADD = 0,
+ BuiltinOperator_AVERAGE_POOL_2D = 1,
+ BuiltinOperator_CONCATENATION = 2,
+ BuiltinOperator_CONV_2D = 3,
+ BuiltinOperator_DEPTHWISE_CONV_2D = 4,
+ BuiltinOperator_EMBEDDING_LOOKUP = 7,
+ BuiltinOperator_FULLY_CONNECTED = 9,
+ BuiltinOperator_HASHTABLE_LOOKUP = 10,
+ BuiltinOperator_L2_NORMALIZATION = 11,
+ BuiltinOperator_L2_POOL_2D = 12,
+ BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
+ BuiltinOperator_LOGISTIC = 14,
+ BuiltinOperator_LSH_PROJECTION = 15,
+ BuiltinOperator_LSTM = 16,
+ BuiltinOperator_MAX_POOL_2D = 17,
+ BuiltinOperator_MUL = 18,
+ BuiltinOperator_RELU = 19,
+ BuiltinOperator_RELU1 = 20,
+ BuiltinOperator_RELU6 = 21,
+ BuiltinOperator_RESHAPE = 22,
+ BuiltinOperator_RESIZE_BILINEAR = 23,
+ BuiltinOperator_RNN = 24,
+ BuiltinOperator_SOFTMAX = 25,
+ BuiltinOperator_SPACE_TO_DEPTH = 26,
+ BuiltinOperator_SVDF = 27,
+ BuiltinOperator_TANH = 28,
+ BuiltinOperator_CONCAT_EMBEDDINGS = 29,
+ BuiltinOperator_SKIP_GRAM = 30,
+ BuiltinOperator_CALL = 31,
+ BuiltinOperator_CUSTOM = 32,
+ BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
+ BuiltinOperator_MIN = BuiltinOperator_ADD,
+ BuiltinOperator_MAX = BuiltinOperator_EMBEDDING_LOOKUP_SPARSE
+};
+
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[31] {
+ static BuiltinOperator values[] = {
+ BuiltinOperator_ADD,
+ BuiltinOperator_AVERAGE_POOL_2D,
+ BuiltinOperator_CONCATENATION,
+ BuiltinOperator_CONV_2D,
+ BuiltinOperator_DEPTHWISE_CONV_2D,
+ BuiltinOperator_EMBEDDING_LOOKUP,
+ BuiltinOperator_FULLY_CONNECTED,
+ BuiltinOperator_HASHTABLE_LOOKUP,
+ BuiltinOperator_L2_NORMALIZATION,
+ BuiltinOperator_L2_POOL_2D,
+ BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+ BuiltinOperator_LOGISTIC,
+ BuiltinOperator_LSH_PROJECTION,
+ BuiltinOperator_LSTM,
+ BuiltinOperator_MAX_POOL_2D,
+ BuiltinOperator_MUL,
+ BuiltinOperator_RELU,
+ BuiltinOperator_RELU1,
+ BuiltinOperator_RELU6,
+ BuiltinOperator_RESHAPE,
+ BuiltinOperator_RESIZE_BILINEAR,
+ BuiltinOperator_RNN,
+ BuiltinOperator_SOFTMAX,
+ BuiltinOperator_SPACE_TO_DEPTH,
+ BuiltinOperator_SVDF,
+ BuiltinOperator_TANH,
+ BuiltinOperator_CONCAT_EMBEDDINGS,
+ BuiltinOperator_SKIP_GRAM,
+ BuiltinOperator_CALL,
+ BuiltinOperator_CUSTOM,
+ BuiltinOperator_EMBEDDING_LOOKUP_SPARSE
+ };
+ return values;
+}
+
+inline const char **EnumNamesBuiltinOperator() {
+ static const char *names[] = {
+ "ADD",
+ "AVERAGE_POOL_2D",
+ "CONCATENATION",
+ "CONV_2D",
+ "DEPTHWISE_CONV_2D",
+ "",
+ "",
+ "EMBEDDING_LOOKUP",
+ "",
+ "FULLY_CONNECTED",
+ "HASHTABLE_LOOKUP",
+ "L2_NORMALIZATION",
+ "L2_POOL_2D",
+ "LOCAL_RESPONSE_NORMALIZATION",
+ "LOGISTIC",
+ "LSH_PROJECTION",
+ "LSTM",
+ "MAX_POOL_2D",
+ "MUL",
+ "RELU",
+ "RELU1",
+ "RELU6",
+ "RESHAPE",
+ "RESIZE_BILINEAR",
+ "RNN",
+ "SOFTMAX",
+ "SPACE_TO_DEPTH",
+ "SVDF",
+ "TANH",
+ "CONCAT_EMBEDDINGS",
+ "SKIP_GRAM",
+ "CALL",
+ "CUSTOM",
+ "EMBEDDING_LOOKUP_SPARSE",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameBuiltinOperator(BuiltinOperator e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesBuiltinOperator()[index];
+}
+
+enum BuiltinOptions {
+ BuiltinOptions_NONE = 0,
+ BuiltinOptions_Conv2DOptions = 1,
+ BuiltinOptions_DepthwiseConv2DOptions = 2,
+ BuiltinOptions_ConcatEmbeddingsOptions = 3,
+ BuiltinOptions_LSHProjectionOptions = 4,
+ BuiltinOptions_Pool2DOptions = 5,
+ BuiltinOptions_SVDFOptions = 6,
+ BuiltinOptions_RNNOptions = 7,
+ BuiltinOptions_FullyConnectedOptions = 8,
+ BuiltinOptions_SoftmaxOptions = 9,
+ BuiltinOptions_ConcatenationOptions = 10,
+ BuiltinOptions_AddOptions = 11,
+ BuiltinOptions_L2NormOptions = 12,
+ BuiltinOptions_LocalResponseNormalizationOptions = 13,
+ BuiltinOptions_LSTMOptions = 14,
+ BuiltinOptions_ResizeBilinearOptions = 15,
+ BuiltinOptions_CallOptions = 16,
+ BuiltinOptions_ReshapeOptions = 17,
+ BuiltinOptions_SkipGramOptions = 18,
+ BuiltinOptions_SpaceToDepthOptions = 19,
+ BuiltinOptions_EmbeddingLookupSparseOptions = 20,
+ BuiltinOptions_MulOptions = 21,
+ BuiltinOptions_MIN = BuiltinOptions_NONE,
+ BuiltinOptions_MAX = BuiltinOptions_MulOptions
+};
+
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[22] {
+ static BuiltinOptions values[] = {
+ BuiltinOptions_NONE,
+ BuiltinOptions_Conv2DOptions,
+ BuiltinOptions_DepthwiseConv2DOptions,
+ BuiltinOptions_ConcatEmbeddingsOptions,
+ BuiltinOptions_LSHProjectionOptions,
+ BuiltinOptions_Pool2DOptions,
+ BuiltinOptions_SVDFOptions,
+ BuiltinOptions_RNNOptions,
+ BuiltinOptions_FullyConnectedOptions,
+ BuiltinOptions_SoftmaxOptions,
+ BuiltinOptions_ConcatenationOptions,
+ BuiltinOptions_AddOptions,
+ BuiltinOptions_L2NormOptions,
+ BuiltinOptions_LocalResponseNormalizationOptions,
+ BuiltinOptions_LSTMOptions,
+ BuiltinOptions_ResizeBilinearOptions,
+ BuiltinOptions_CallOptions,
+ BuiltinOptions_ReshapeOptions,
+ BuiltinOptions_SkipGramOptions,
+ BuiltinOptions_SpaceToDepthOptions,
+ BuiltinOptions_EmbeddingLookupSparseOptions,
+ BuiltinOptions_MulOptions
+ };
+ return values;
+}
+
+inline const char **EnumNamesBuiltinOptions() {
+ static const char *names[] = {
+ "NONE",
+ "Conv2DOptions",
+ "DepthwiseConv2DOptions",
+ "ConcatEmbeddingsOptions",
+ "LSHProjectionOptions",
+ "Pool2DOptions",
+ "SVDFOptions",
+ "RNNOptions",
+ "FullyConnectedOptions",
+ "SoftmaxOptions",
+ "ConcatenationOptions",
+ "AddOptions",
+ "L2NormOptions",
+ "LocalResponseNormalizationOptions",
+ "LSTMOptions",
+ "ResizeBilinearOptions",
+ "CallOptions",
+ "ReshapeOptions",
+ "SkipGramOptions",
+ "SpaceToDepthOptions",
+ "EmbeddingLookupSparseOptions",
+ "MulOptions",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameBuiltinOptions(BuiltinOptions e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesBuiltinOptions()[index];
+}
+
+template<typename T> struct BuiltinOptionsTraits {
+ static const BuiltinOptions enum_value = BuiltinOptions_NONE;
+};
+
+template<> struct BuiltinOptionsTraits<Conv2DOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
+};
+
+template<> struct BuiltinOptionsTraits<DepthwiseConv2DOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
+};
+
+template<> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
+};
+
+template<> struct BuiltinOptionsTraits<LSHProjectionOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
+};
+
+template<> struct BuiltinOptionsTraits<Pool2DOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
+};
+
+template<> struct BuiltinOptionsTraits<SVDFOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
+};
+
+template<> struct BuiltinOptionsTraits<RNNOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
+};
+
+template<> struct BuiltinOptionsTraits<FullyConnectedOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
+};
+
+template<> struct BuiltinOptionsTraits<SoftmaxOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
+};
+
+template<> struct BuiltinOptionsTraits<ConcatenationOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
+};
+
+template<> struct BuiltinOptionsTraits<AddOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
+};
+
+template<> struct BuiltinOptionsTraits<L2NormOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
+};
+
+template<> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
+};
+
+template<> struct BuiltinOptionsTraits<LSTMOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
+};
+
+template<> struct BuiltinOptionsTraits<ResizeBilinearOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
+};
+
+template<> struct BuiltinOptionsTraits<CallOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
+};
+
+template<> struct BuiltinOptionsTraits<ReshapeOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
+};
+
+template<> struct BuiltinOptionsTraits<SkipGramOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
+};
+
+template<> struct BuiltinOptionsTraits<SpaceToDepthOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
+};
+
+template<> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
+};
+
+template<> struct BuiltinOptionsTraits<MulOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
+};
+
+struct BuiltinOptionsUnion {
+ BuiltinOptions type;
+ void *value;
+
+ BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {}
+ BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT :
+ type(BuiltinOptions_NONE), value(nullptr)
+ { std::swap(type, u.type); std::swap(value, u.value); }
+ BuiltinOptionsUnion(const BuiltinOptionsUnion &) FLATBUFFERS_NOEXCEPT;
+ BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT
+ { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }
+ BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT
+ { std::swap(type, u.type); std::swap(value, u.value); return *this; }
+ ~BuiltinOptionsUnion() { Reset(); }
+
+ void Reset();
+
+#ifndef FLATBUFFERS_CPP98_STL
+ template <typename T>
+ void Set(T&& val) {
+ Reset();
+ type = BuiltinOptionsTraits<typename T::TableType>::enum_value;
+ if (type != BuiltinOptions_NONE) {
+ value = new T(std::forward<T>(val));
+ }
+ }
+#endif // FLATBUFFERS_CPP98_STL
+
+ static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver);
+ flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+
+ Conv2DOptionsT *AsConv2DOptions() {
+ return type == BuiltinOptions_Conv2DOptions ?
+ reinterpret_cast<Conv2DOptionsT *>(value) : nullptr;
+ }
+ const Conv2DOptionsT *AsConv2DOptions() const {
+ return type == BuiltinOptions_Conv2DOptions ?
+ reinterpret_cast<const Conv2DOptionsT *>(value) : nullptr;
+ }
+ DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() {
+ return type == BuiltinOptions_DepthwiseConv2DOptions ?
+ reinterpret_cast<DepthwiseConv2DOptionsT *>(value) : nullptr;
+ }
+ const DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const {
+ return type == BuiltinOptions_DepthwiseConv2DOptions ?
+ reinterpret_cast<const DepthwiseConv2DOptionsT *>(value) : nullptr;
+ }
+ ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() {
+ return type == BuiltinOptions_ConcatEmbeddingsOptions ?
+ reinterpret_cast<ConcatEmbeddingsOptionsT *>(value) : nullptr;
+ }
+ const ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const {
+ return type == BuiltinOptions_ConcatEmbeddingsOptions ?
+ reinterpret_cast<const ConcatEmbeddingsOptionsT *>(value) : nullptr;
+ }
+ LSHProjectionOptionsT *AsLSHProjectionOptions() {
+ return type == BuiltinOptions_LSHProjectionOptions ?
+ reinterpret_cast<LSHProjectionOptionsT *>(value) : nullptr;
+ }
+ const LSHProjectionOptionsT *AsLSHProjectionOptions() const {
+ return type == BuiltinOptions_LSHProjectionOptions ?
+ reinterpret_cast<const LSHProjectionOptionsT *>(value) : nullptr;
+ }
+ Pool2DOptionsT *AsPool2DOptions() {
+ return type == BuiltinOptions_Pool2DOptions ?
+ reinterpret_cast<Pool2DOptionsT *>(value) : nullptr;
+ }
+ const Pool2DOptionsT *AsPool2DOptions() const {
+ return type == BuiltinOptions_Pool2DOptions ?
+ reinterpret_cast<const Pool2DOptionsT *>(value) : nullptr;
+ }
+ SVDFOptionsT *AsSVDFOptions() {
+ return type == BuiltinOptions_SVDFOptions ?
+ reinterpret_cast<SVDFOptionsT *>(value) : nullptr;
+ }
+ const SVDFOptionsT *AsSVDFOptions() const {
+ return type == BuiltinOptions_SVDFOptions ?
+ reinterpret_cast<const SVDFOptionsT *>(value) : nullptr;
+ }
+ RNNOptionsT *AsRNNOptions() {
+ return type == BuiltinOptions_RNNOptions ?
+ reinterpret_cast<RNNOptionsT *>(value) : nullptr;
+ }
+ const RNNOptionsT *AsRNNOptions() const {
+ return type == BuiltinOptions_RNNOptions ?
+ reinterpret_cast<const RNNOptionsT *>(value) : nullptr;
+ }
+ FullyConnectedOptionsT *AsFullyConnectedOptions() {
+ return type == BuiltinOptions_FullyConnectedOptions ?
+ reinterpret_cast<FullyConnectedOptionsT *>(value) : nullptr;
+ }
+ const FullyConnectedOptionsT *AsFullyConnectedOptions() const {
+ return type == BuiltinOptions_FullyConnectedOptions ?
+ reinterpret_cast<const FullyConnectedOptionsT *>(value) : nullptr;
+ }
+ SoftmaxOptionsT *AsSoftmaxOptions() {
+ return type == BuiltinOptions_SoftmaxOptions ?
+ reinterpret_cast<SoftmaxOptionsT *>(value) : nullptr;
+ }
+ const SoftmaxOptionsT *AsSoftmaxOptions() const {
+ return type == BuiltinOptions_SoftmaxOptions ?
+ reinterpret_cast<const SoftmaxOptionsT *>(value) : nullptr;
+ }
+ ConcatenationOptionsT *AsConcatenationOptions() {
+ return type == BuiltinOptions_ConcatenationOptions ?
+ reinterpret_cast<ConcatenationOptionsT *>(value) : nullptr;
+ }
+ const ConcatenationOptionsT *AsConcatenationOptions() const {
+ return type == BuiltinOptions_ConcatenationOptions ?
+ reinterpret_cast<const ConcatenationOptionsT *>(value) : nullptr;
+ }
+ AddOptionsT *AsAddOptions() {
+ return type == BuiltinOptions_AddOptions ?
+ reinterpret_cast<AddOptionsT *>(value) : nullptr;
+ }
+ const AddOptionsT *AsAddOptions() const {
+ return type == BuiltinOptions_AddOptions ?
+ reinterpret_cast<const AddOptionsT *>(value) : nullptr;
+ }
+ L2NormOptionsT *AsL2NormOptions() {
+ return type == BuiltinOptions_L2NormOptions ?
+ reinterpret_cast<L2NormOptionsT *>(value) : nullptr;
+ }
+ const L2NormOptionsT *AsL2NormOptions() const {
+ return type == BuiltinOptions_L2NormOptions ?
+ reinterpret_cast<const L2NormOptionsT *>(value) : nullptr;
+ }
+ LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() {
+ return type == BuiltinOptions_LocalResponseNormalizationOptions ?
+ reinterpret_cast<LocalResponseNormalizationOptionsT *>(value) : nullptr;
+ }
+ const LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const {
+ return type == BuiltinOptions_LocalResponseNormalizationOptions ?
+ reinterpret_cast<const LocalResponseNormalizationOptionsT *>(value) : nullptr;
+ }
+ LSTMOptionsT *AsLSTMOptions() {
+ return type == BuiltinOptions_LSTMOptions ?
+ reinterpret_cast<LSTMOptionsT *>(value) : nullptr;
+ }
+ const LSTMOptionsT *AsLSTMOptions() const {
+ return type == BuiltinOptions_LSTMOptions ?
+ reinterpret_cast<const LSTMOptionsT *>(value) : nullptr;
+ }
+ ResizeBilinearOptionsT *AsResizeBilinearOptions() {
+ return type == BuiltinOptions_ResizeBilinearOptions ?
+ reinterpret_cast<ResizeBilinearOptionsT *>(value) : nullptr;
+ }
+ const ResizeBilinearOptionsT *AsResizeBilinearOptions() const {
+ return type == BuiltinOptions_ResizeBilinearOptions ?
+ reinterpret_cast<const ResizeBilinearOptionsT *>(value) : nullptr;
+ }
+ CallOptionsT *AsCallOptions() {
+ return type == BuiltinOptions_CallOptions ?
+ reinterpret_cast<CallOptionsT *>(value) : nullptr;
+ }
+ const CallOptionsT *AsCallOptions() const {
+ return type == BuiltinOptions_CallOptions ?
+ reinterpret_cast<const CallOptionsT *>(value) : nullptr;
+ }
+ ReshapeOptionsT *AsReshapeOptions() {
+ return type == BuiltinOptions_ReshapeOptions ?
+ reinterpret_cast<ReshapeOptionsT *>(value) : nullptr;
+ }
+ const ReshapeOptionsT *AsReshapeOptions() const {
+ return type == BuiltinOptions_ReshapeOptions ?
+ reinterpret_cast<const ReshapeOptionsT *>(value) : nullptr;
+ }
+ SkipGramOptionsT *AsSkipGramOptions() {
+ return type == BuiltinOptions_SkipGramOptions ?
+ reinterpret_cast<SkipGramOptionsT *>(value) : nullptr;
+ }
+ const SkipGramOptionsT *AsSkipGramOptions() const {
+ return type == BuiltinOptions_SkipGramOptions ?
+ reinterpret_cast<const SkipGramOptionsT *>(value) : nullptr;
+ }
+ SpaceToDepthOptionsT *AsSpaceToDepthOptions() {
+ return type == BuiltinOptions_SpaceToDepthOptions ?
+ reinterpret_cast<SpaceToDepthOptionsT *>(value) : nullptr;
+ }
+ const SpaceToDepthOptionsT *AsSpaceToDepthOptions() const {
+ return type == BuiltinOptions_SpaceToDepthOptions ?
+ reinterpret_cast<const SpaceToDepthOptionsT *>(value) : nullptr;
+ }
+ EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() {
+ return type == BuiltinOptions_EmbeddingLookupSparseOptions ?
+ reinterpret_cast<EmbeddingLookupSparseOptionsT *>(value) : nullptr;
+ }
+ const EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const {
+ return type == BuiltinOptions_EmbeddingLookupSparseOptions ?
+ reinterpret_cast<const EmbeddingLookupSparseOptionsT *>(value) : nullptr;
+ }
+ MulOptionsT *AsMulOptions() {
+ return type == BuiltinOptions_MulOptions ?
+ reinterpret_cast<MulOptionsT *>(value) : nullptr;
+ }
+ const MulOptionsT *AsMulOptions() const {
+ return type == BuiltinOptions_MulOptions ?
+ reinterpret_cast<const MulOptionsT *>(value) : nullptr;
+ }
+};
+
+bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
+bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
+
+enum Padding {
+ Padding_SAME = 0,
+ Padding_VALID = 1,
+ Padding_MIN = Padding_SAME,
+ Padding_MAX = Padding_VALID
+};
+
+inline Padding (&EnumValuesPadding())[2] {
+ static Padding values[] = {
+ Padding_SAME,
+ Padding_VALID
+ };
+ return values;
+}
+
+inline const char **EnumNamesPadding() {
+ static const char *names[] = {
+ "SAME",
+ "VALID",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNamePadding(Padding e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesPadding()[index];
+}
+
+enum ActivationFunctionType {
+ ActivationFunctionType_NONE = 0,
+ ActivationFunctionType_RELU = 1,
+ ActivationFunctionType_RELU1 = 2,
+ ActivationFunctionType_RELU6 = 3,
+ ActivationFunctionType_TANH = 4,
+ ActivationFunctionType_SIGN_BIT = 5,
+ ActivationFunctionType_MIN = ActivationFunctionType_NONE,
+ ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
+};
+
+inline ActivationFunctionType (&EnumValuesActivationFunctionType())[6] {
+ static ActivationFunctionType values[] = {
+ ActivationFunctionType_NONE,
+ ActivationFunctionType_RELU,
+ ActivationFunctionType_RELU1,
+ ActivationFunctionType_RELU6,
+ ActivationFunctionType_TANH,
+ ActivationFunctionType_SIGN_BIT
+ };
+ return values;
+}
+
+inline const char **EnumNamesActivationFunctionType() {
+ static const char *names[] = {
+ "NONE",
+ "RELU",
+ "RELU1",
+ "RELU6",
+ "TANH",
+ "SIGN_BIT",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesActivationFunctionType()[index];
+}
+
+enum LSHProjectionType {
+ LSHProjectionType_UNKNOWN = 0,
+ LSHProjectionType_SPARSE = 1,
+ LSHProjectionType_DENSE = 2,
+ LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
+ LSHProjectionType_MAX = LSHProjectionType_DENSE
+};
+
+inline LSHProjectionType (&EnumValuesLSHProjectionType())[3] {
+ static LSHProjectionType values[] = {
+ LSHProjectionType_UNKNOWN,
+ LSHProjectionType_SPARSE,
+ LSHProjectionType_DENSE
+ };
+ return values;
+}
+
+inline const char **EnumNamesLSHProjectionType() {
+ static const char *names[] = {
+ "UNKNOWN",
+ "SPARSE",
+ "DENSE",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameLSHProjectionType(LSHProjectionType e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesLSHProjectionType()[index];
+}
+
+enum CombinerType {
+ CombinerType_SUM = 0,
+ CombinerType_MEAN = 1,
+ CombinerType_SQRTN = 2,
+ CombinerType_MIN = CombinerType_SUM,
+ CombinerType_MAX = CombinerType_SQRTN
+};
+
+inline CombinerType (&EnumValuesCombinerType())[3] {
+ static CombinerType values[] = {
+ CombinerType_SUM,
+ CombinerType_MEAN,
+ CombinerType_SQRTN
+ };
+ return values;
+}
+
+inline const char **EnumNamesCombinerType() {
+ static const char *names[] = {
+ "SUM",
+ "MEAN",
+ "SQRTN",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameCombinerType(CombinerType e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesCombinerType()[index];
+}
+
+enum CustomOptionsFormat {
+ CustomOptionsFormat_FLEXBUFFERS = 0,
+ CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
+ CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
+};
+
+inline CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] {
+ static CustomOptionsFormat values[] = {
+ CustomOptionsFormat_FLEXBUFFERS
+ };
+ return values;
+}
+
+inline const char **EnumNamesCustomOptionsFormat() {
+ static const char *names[] = {
+ "FLEXBUFFERS",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) {
+ const size_t index = static_cast<int>(e);
+ return EnumNamesCustomOptionsFormat()[index];
+}
+
+struct QuantizationParametersT : public flatbuffers::NativeTable {
+ typedef QuantizationParameters TableType;
+ std::vector<float> min;
+ std::vector<float> max;
+ std::vector<float> scale;
+ std::vector<int64_t> zero_point;
+ QuantizationParametersT() {
+ }
+};
+
+struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef QuantizationParametersT NativeTableType;
+ enum {
+ VT_MIN = 4,
+ VT_MAX = 6,
+ VT_SCALE = 8,
+ VT_ZERO_POINT = 10
+ };
+ const flatbuffers::Vector<float> *min() const {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
+ }
+ const flatbuffers::Vector<float> *max() const {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
+ }
+ const flatbuffers::Vector<float> *scale() const {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
+ }
+ const flatbuffers::Vector<int64_t> *zero_point() const {
+ return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_MIN) &&
+ verifier.Verify(min()) &&
+ VerifyOffset(verifier, VT_MAX) &&
+ verifier.Verify(max()) &&
+ VerifyOffset(verifier, VT_SCALE) &&
+ verifier.Verify(scale()) &&
+ VerifyOffset(verifier, VT_ZERO_POINT) &&
+ verifier.Verify(zero_point()) &&
+ verifier.EndTable();
+ }
+ QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<QuantizationParameters> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct QuantizationParametersBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min) {
+ fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
+ }
+ void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max) {
+ fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
+ }
+ void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale) {
+ fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
+ }
+ void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point) {
+ fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
+ }
+ explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &);
+ flatbuffers::Offset<QuantizationParameters> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<QuantizationParameters>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
+ flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
+ flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0) {
+ QuantizationParametersBuilder builder_(_fbb);
+ builder_.add_zero_point(zero_point);
+ builder_.add_scale(scale);
+ builder_.add_max(max);
+ builder_.add_min(min);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<float> *min = nullptr,
+ const std::vector<float> *max = nullptr,
+ const std::vector<float> *scale = nullptr,
+ const std::vector<int64_t> *zero_point = nullptr) {
+ return tflite::CreateQuantizationParameters(
+ _fbb,
+ min ? _fbb.CreateVector<float>(*min) : 0,
+ max ? _fbb.CreateVector<float>(*max) : 0,
+ scale ? _fbb.CreateVector<float>(*scale) : 0,
+ zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0);
+}
+
+flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TensorT : public flatbuffers::NativeTable {
+ typedef Tensor TableType;
+ std::vector<int32_t> shape;
+ TensorType type;
+ uint32_t buffer;
+ std::string name;
+ std::unique_ptr<QuantizationParametersT> quantization;
+ TensorT()
+ : type(TensorType_FLOAT32),
+ buffer(0) {
+ }
+};
+
+struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef TensorT NativeTableType;
+ enum {
+ VT_SHAPE = 4,
+ VT_TYPE = 6,
+ VT_BUFFER = 8,
+ VT_NAME = 10,
+ VT_QUANTIZATION = 12
+ };
+ const flatbuffers::Vector<int32_t> *shape() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+ }
+ TensorType type() const {
+ return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0));
+ }
+ uint32_t buffer() const {
+ return GetField<uint32_t>(VT_BUFFER, 0);
+ }
+ const flatbuffers::String *name() const {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ const QuantizationParameters *quantization() const {
+ return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_SHAPE) &&
+ verifier.Verify(shape()) &&
+ VerifyField<int8_t>(verifier, VT_TYPE) &&
+ VerifyField<uint32_t>(verifier, VT_BUFFER) &&
+ VerifyOffset(verifier, VT_NAME) &&
+ verifier.Verify(name()) &&
+ VerifyOffset(verifier, VT_QUANTIZATION) &&
+ verifier.VerifyTable(quantization()) &&
+ verifier.EndTable();
+ }
+ TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Tensor> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TensorBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
+ fbb_.AddOffset(Tensor::VT_SHAPE, shape);
+ }
+ void add_type(TensorType type) {
+ fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
+ }
+ void add_buffer(uint32_t buffer) {
+ fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0);
+ }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+ fbb_.AddOffset(Tensor::VT_NAME, name);
+ }
+ void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization) {
+ fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
+ }
+ explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ TensorBuilder &operator=(const TensorBuilder &);
+ flatbuffers::Offset<Tensor> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Tensor>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Tensor> CreateTensor(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
+ TensorType type = TensorType_FLOAT32,
+ uint32_t buffer = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<QuantizationParameters> quantization = 0) {
+ TensorBuilder builder_(_fbb);
+ builder_.add_quantization(quantization);
+ builder_.add_name(name);
+ builder_.add_buffer(buffer);
+ builder_.add_shape(shape);
+ builder_.add_type(type);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Tensor> CreateTensorDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *shape = nullptr,
+ TensorType type = TensorType_FLOAT32,
+ uint32_t buffer = 0,
+ const char *name = nullptr,
+ flatbuffers::Offset<QuantizationParameters> quantization = 0) {
+ return tflite::CreateTensor(
+ _fbb,
+ shape ? _fbb.CreateVector<int32_t>(*shape) : 0,
+ type,
+ buffer,
+ name ? _fbb.CreateString(name) : 0,
+ quantization);
+}
+
+flatbuffers::Offset<Tensor> CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Conv2DOptionsT : public flatbuffers::NativeTable {
+ typedef Conv2DOptions TableType;
+ Padding padding;
+ int32_t stride_w;
+ int32_t stride_h;
+ ActivationFunctionType fused_activation_function;
+ Conv2DOptionsT()
+ : padding(Padding_SAME),
+ stride_w(0),
+ stride_h(0),
+ fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef Conv2DOptionsT NativeTableType;
+ enum {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_FUSED_ACTIVATION_FUNCTION = 10
+ };
+ Padding padding() const {
+ return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const {
+ return GetField<int32_t>(VT_STRIDE_W, 0);
+ }
+ int32_t stride_h() const {
+ return GetField<int32_t>(VT_STRIDE_H, 0);
+ }
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Conv2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Conv2DOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(Padding padding) {
+ fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w) {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h) {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &);
+ flatbuffers::Offset<Conv2DOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Conv2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ Padding padding = Padding_SAME,
+ int32_t stride_w = 0,
+ int32_t stride_h = 0,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ Conv2DOptionsBuilder builder_(_fbb);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Pool2DOptionsT : public flatbuffers::NativeTable {
+ typedef Pool2DOptions TableType;
+ Padding padding;
+ int32_t stride_w;
+ int32_t stride_h;
+ int32_t filter_width;
+ int32_t filter_height;
+ ActivationFunctionType fused_activation_function;
+ Pool2DOptionsT()
+ : padding(Padding_SAME),
+ stride_w(0),
+ stride_h(0),
+ filter_width(0),
+ filter_height(0),
+ fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef Pool2DOptionsT NativeTableType;
+ enum {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_FILTER_WIDTH = 10,
+ VT_FILTER_HEIGHT = 12,
+ VT_FUSED_ACTIVATION_FUNCTION = 14
+ };
+ Padding padding() const {
+ return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const {
+ return GetField<int32_t>(VT_STRIDE_W, 0);
+ }
+ int32_t stride_h() const {
+ return GetField<int32_t>(VT_STRIDE_H, 0);
+ }
+ int32_t filter_width() const {
+ return GetField<int32_t>(VT_FILTER_WIDTH, 0);
+ }
+ int32_t filter_height() const {
+ return GetField<int32_t>(VT_FILTER_HEIGHT, 0);
+ }
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
+ VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Pool2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Pool2DOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(Padding padding) {
+ fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w) {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h) {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_filter_width(int32_t filter_width) {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
+ }
+ void add_filter_height(int32_t filter_height) {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
+ }
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &);
+ flatbuffers::Offset<Pool2DOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Pool2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ Padding padding = Padding_SAME,
+ int32_t stride_w = 0,
+ int32_t stride_h = 0,
+ int32_t filter_width = 0,
+ int32_t filter_height = 0,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ Pool2DOptionsBuilder builder_(_fbb);
+ builder_.add_filter_height(filter_height);
+ builder_.add_filter_width(filter_width);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable {
+ typedef DepthwiseConv2DOptions TableType;
+ Padding padding;
+ int32_t stride_w;
+ int32_t stride_h;
+ int32_t depth_multiplier;
+ ActivationFunctionType fused_activation_function;
+ DepthwiseConv2DOptionsT()
+ : padding(Padding_SAME),
+ stride_w(0),
+ stride_h(0),
+ depth_multiplier(0),
+ fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef DepthwiseConv2DOptionsT NativeTableType;
+ enum {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_DEPTH_MULTIPLIER = 10,
+ VT_FUSED_ACTIVATION_FUNCTION = 12
+ };
+ Padding padding() const {
+ return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const {
+ return GetField<int32_t>(VT_STRIDE_W, 0);
+ }
+ int32_t stride_h() const {
+ return GetField<int32_t>(VT_STRIDE_H, 0);
+ }
+ int32_t depth_multiplier() const {
+ return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0);
+ }
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DepthwiseConv2DOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DepthwiseConv2DOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(Padding padding) {
+ fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w) {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h) {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_depth_multiplier(int32_t depth_multiplier) {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
+ }
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &);
+ flatbuffers::Offset<DepthwiseConv2DOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ Padding padding = Padding_SAME,
+ int32_t stride_w = 0,
+ int32_t stride_h = 0,
+ int32_t depth_multiplier = 0,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ DepthwiseConv2DOptionsBuilder builder_(_fbb);
+ builder_.add_depth_multiplier(depth_multiplier);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable {
+ typedef ConcatEmbeddingsOptions TableType;
+ int32_t num_channels;
+ std::vector<int32_t> num_columns_per_channel;
+ std::vector<int32_t> embedding_dim_per_channel;
+ ConcatEmbeddingsOptionsT()
+ : num_channels(0) {
+ }
+};
+
+struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ConcatEmbeddingsOptionsT NativeTableType;
+ enum {
+ VT_NUM_CHANNELS = 4,
+ VT_NUM_COLUMNS_PER_CHANNEL = 6,
+ VT_EMBEDDING_DIM_PER_CHANNEL = 8
+ };
+ int32_t num_channels() const {
+ return GetField<int32_t>(VT_NUM_CHANNELS, 0);
+ }
+ const flatbuffers::Vector<int32_t> *num_columns_per_channel() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
+ }
+ const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
+ VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
+ verifier.Verify(num_columns_per_channel()) &&
+ VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
+ verifier.Verify(embedding_dim_per_channel()) &&
+ verifier.EndTable();
+ }
+ ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ConcatEmbeddingsOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ConcatEmbeddingsOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_num_channels(int32_t num_channels) {
+ fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
+ }
+ void add_num_columns_per_channel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel) {
+ fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
+ }
+ void add_embedding_dim_per_channel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel) {
+ fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel);
+ }
+ explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &);
+ flatbuffers::Offset<ConcatEmbeddingsOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t num_channels = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0) {
+ ConcatEmbeddingsOptionsBuilder builder_(_fbb);
+ builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
+ builder_.add_num_columns_per_channel(num_columns_per_channel);
+ builder_.add_num_channels(num_channels);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptionsDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t num_channels = 0,
+ const std::vector<int32_t> *num_columns_per_channel = nullptr,
+ const std::vector<int32_t> *embedding_dim_per_channel = nullptr) {
+ return tflite::CreateConcatEmbeddingsOptions(
+ _fbb,
+ num_channels,
+ num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0,
+ embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0);
+}
+
+flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LSHProjectionOptionsT : public flatbuffers::NativeTable {
+ typedef LSHProjectionOptions TableType;
+ LSHProjectionType type;
+ LSHProjectionOptionsT()
+ : type(LSHProjectionType_UNKNOWN) {
+ }
+};
+
+struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef LSHProjectionOptionsT NativeTableType;
+ enum {
+ VT_TYPE = 4
+ };
+ LSHProjectionType type() const {
+ return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_TYPE) &&
+ verifier.EndTable();
+ }
+ LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LSHProjectionOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LSHProjectionOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_type(LSHProjectionType type) {
+ fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
+ }
+ explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &);
+ flatbuffers::Offset<LSHProjectionOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ LSHProjectionType type = LSHProjectionType_UNKNOWN) {
+ LSHProjectionOptionsBuilder builder_(_fbb);
+ builder_.add_type(type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SVDFOptionsT : public flatbuffers::NativeTable {
+ typedef SVDFOptions TableType;
+ int32_t rank;
+ ActivationFunctionType fused_activation_function;
+ SVDFOptionsT()
+ : rank(0),
+ fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SVDFOptionsT NativeTableType;
+ enum {
+ VT_RANK = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6
+ };
+ int32_t rank() const {
+ return GetField<int32_t>(VT_RANK, 0);
+ }
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_RANK) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SVDFOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SVDFOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_rank(int32_t rank) {
+ fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0);
+ }
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &);
+ flatbuffers::Offset<SVDFOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SVDFOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t rank = 0,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ SVDFOptionsBuilder builder_(_fbb);
+ builder_.add_rank(rank);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct RNNOptionsT : public flatbuffers::NativeTable {
+ typedef RNNOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ RNNOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef RNNOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<RNNOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct RNNOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ RNNOptionsBuilder &operator=(const RNNOptionsBuilder &);
+ flatbuffers::Offset<RNNOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RNNOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RNNOptions> CreateRNNOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ RNNOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<RNNOptions> CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FullyConnectedOptionsT : public flatbuffers::NativeTable {
+ typedef FullyConnectedOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ FullyConnectedOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef FullyConnectedOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FullyConnectedOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FullyConnectedOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &);
+ flatbuffers::Offset<FullyConnectedOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ FullyConnectedOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SoftmaxOptionsT : public flatbuffers::NativeTable {
+ typedef SoftmaxOptions TableType;
+ float beta;
+ SoftmaxOptionsT()
+ : beta(0.0f) {
+ }
+};
+
+struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SoftmaxOptionsT NativeTableType;
+ enum {
+ VT_BETA = 4
+ };
+ float beta() const {
+ return GetField<float>(VT_BETA, 0.0f);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<float>(verifier, VT_BETA) &&
+ verifier.EndTable();
+ }
+ SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SoftmaxOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SoftmaxOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_beta(float beta) {
+ fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f);
+ }
+ explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &);
+ flatbuffers::Offset<SoftmaxOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SoftmaxOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ float beta = 0.0f) {
+ SoftmaxOptionsBuilder builder_(_fbb);
+ builder_.add_beta(beta);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ConcatenationOptionsT : public flatbuffers::NativeTable {
+ typedef ConcatenationOptions TableType;
+ int32_t axis;
+ ActivationFunctionType fused_activation_function;
+ ConcatenationOptionsT()
+ : axis(0),
+ fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ConcatenationOptionsT NativeTableType;
+ enum {
+ VT_AXIS = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6
+ };
+ int32_t axis() const {
+ return GetField<int32_t>(VT_AXIS, 0);
+ }
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_AXIS) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ConcatenationOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ConcatenationOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_axis(int32_t axis) {
+ fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0);
+ }
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &);
+ flatbuffers::Offset<ConcatenationOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ConcatenationOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t axis = 0,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ ConcatenationOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct AddOptionsT : public flatbuffers::NativeTable {
+ typedef AddOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ AddOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef AddOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<AddOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct AddOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ AddOptionsBuilder &operator=(const AddOptionsBuilder &);
+ flatbuffers::Offset<AddOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<AddOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<AddOptions> CreateAddOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ AddOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<AddOptions> CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MulOptionsT : public flatbuffers::NativeTable {
+ typedef MulOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ MulOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef MulOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MulOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MulOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ MulOptionsBuilder &operator=(const MulOptionsBuilder &);
+ flatbuffers::Offset<MulOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MulOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MulOptions> CreateMulOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ MulOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MulOptions> CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct L2NormOptionsT : public flatbuffers::NativeTable {
+ typedef L2NormOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ L2NormOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE) {
+ }
+};
+
+struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef L2NormOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ verifier.EndTable();
+ }
+ L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<L2NormOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct L2NormOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &);
+ flatbuffers::Offset<L2NormOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<L2NormOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) {
+ L2NormOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable {
+ typedef LocalResponseNormalizationOptions TableType;
+ int32_t radius;
+ float bias;
+ float alpha;
+ float beta;
+ LocalResponseNormalizationOptionsT()
+ : radius(0),
+ bias(0.0f),
+ alpha(0.0f),
+ beta(0.0f) {
+ }
+};
+
+struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef LocalResponseNormalizationOptionsT NativeTableType;
+ enum {
+ VT_RADIUS = 4,
+ VT_BIAS = 6,
+ VT_ALPHA = 8,
+ VT_BETA = 10
+ };
+ int32_t radius() const {
+ return GetField<int32_t>(VT_RADIUS, 0);
+ }
+ float bias() const {
+ return GetField<float>(VT_BIAS, 0.0f);
+ }
+ float alpha() const {
+ return GetField<float>(VT_ALPHA, 0.0f);
+ }
+ float beta() const {
+ return GetField<float>(VT_BETA, 0.0f);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_RADIUS) &&
+ VerifyField<float>(verifier, VT_BIAS) &&
+ VerifyField<float>(verifier, VT_ALPHA) &&
+ VerifyField<float>(verifier, VT_BETA) &&
+ verifier.EndTable();
+ }
+ LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LocalResponseNormalizationOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LocalResponseNormalizationOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_radius(int32_t radius) {
+ fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
+ }
+ void add_bias(float bias) {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
+ }
+ void add_alpha(float alpha) {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
+ }
+ void add_beta(float beta) {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
+ }
+ explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ LocalResponseNormalizationOptionsBuilder &operator=(const LocalResponseNormalizationOptionsBuilder &);
+ flatbuffers::Offset<LocalResponseNormalizationOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t radius = 0,
+ float bias = 0.0f,
+ float alpha = 0.0f,
+ float beta = 0.0f) {
+ LocalResponseNormalizationOptionsBuilder builder_(_fbb);
+ builder_.add_beta(beta);
+ builder_.add_alpha(alpha);
+ builder_.add_bias(bias);
+ builder_.add_radius(radius);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LSTMOptionsT : public flatbuffers::NativeTable {
+ typedef LSTMOptions TableType;
+ ActivationFunctionType fused_activation_function;
+ float cell_clip;
+ float proj_clip;
+ LSTMOptionsT()
+ : fused_activation_function(ActivationFunctionType_NONE),
+ cell_clip(0.0f),
+ proj_clip(0.0f) {
+ }
+};
+
+struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef LSTMOptionsT NativeTableType;
+ enum {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_CELL_CLIP = 6,
+ VT_PROJ_CLIP = 8
+ };
+ ActivationFunctionType fused_activation_function() const {
+ return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ float cell_clip() const {
+ return GetField<float>(VT_CELL_CLIP, 0.0f);
+ }
+ float proj_clip() const {
+ return GetField<float>(VT_PROJ_CLIP, 0.0f);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<float>(verifier, VT_CELL_CLIP) &&
+ VerifyField<float>(verifier, VT_PROJ_CLIP) &&
+ verifier.EndTable();
+ }
+ LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LSTMOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LSTMOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(ActivationFunctionType fused_activation_function) {
+ fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_cell_clip(float cell_clip) {
+ fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
+ }
+ void add_proj_clip(float proj_clip) {
+ fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
+ }
+ explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &);
+ flatbuffers::Offset<LSTMOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LSTMOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
+ float cell_clip = 0.0f,
+ float proj_clip = 0.0f) {
+ LSTMOptionsBuilder builder_(_fbb);
+ builder_.add_proj_clip(proj_clip);
+ builder_.add_cell_clip(cell_clip);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ResizeBilinearOptionsT : public flatbuffers::NativeTable {
+ typedef ResizeBilinearOptions TableType;
+ int32_t new_height;
+ int32_t new_width;
+ ResizeBilinearOptionsT()
+ : new_height(0),
+ new_width(0) {
+ }
+};
+
+struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ResizeBilinearOptionsT NativeTableType;
+ enum {
+ VT_NEW_HEIGHT = 4,
+ VT_NEW_WIDTH = 6
+ };
+ int32_t new_height() const {
+ return GetField<int32_t>(VT_NEW_HEIGHT, 0);
+ }
+ int32_t new_width() const {
+ return GetField<int32_t>(VT_NEW_WIDTH, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_NEW_HEIGHT) &&
+ VerifyField<int32_t>(verifier, VT_NEW_WIDTH) &&
+ verifier.EndTable();
+ }
+ ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ResizeBilinearOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ResizeBilinearOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_new_height(int32_t new_height) {
+ fbb_.AddElement<int32_t>(ResizeBilinearOptions::VT_NEW_HEIGHT, new_height, 0);
+ }
+ void add_new_width(int32_t new_width) {
+ fbb_.AddElement<int32_t>(ResizeBilinearOptions::VT_NEW_WIDTH, new_width, 0);
+ }
+ explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &);
+ flatbuffers::Offset<ResizeBilinearOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t new_height = 0,
+ int32_t new_width = 0) {
+ ResizeBilinearOptionsBuilder builder_(_fbb);
+ builder_.add_new_width(new_width);
+ builder_.add_new_height(new_height);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CallOptionsT : public flatbuffers::NativeTable {
+ typedef CallOptions TableType;
+ uint32_t subgraph;
+ CallOptionsT()
+ : subgraph(0) {
+ }
+};
+
+struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef CallOptionsT NativeTableType;
+ enum {
+ VT_SUBGRAPH = 4
+ };
+ uint32_t subgraph() const {
+ return GetField<uint32_t>(VT_SUBGRAPH, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
+ verifier.EndTable();
+ }
+ CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CallOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CallOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_subgraph(uint32_t subgraph) {
+ fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
+ }
+ explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ CallOptionsBuilder &operator=(const CallOptionsBuilder &);
+ flatbuffers::Offset<CallOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CallOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CallOptions> CreateCallOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t subgraph = 0) {
+ CallOptionsBuilder builder_(_fbb);
+ builder_.add_subgraph(subgraph);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReshapeOptionsT : public flatbuffers::NativeTable {
+ typedef ReshapeOptions TableType;
+ std::vector<int32_t> new_shape;
+ ReshapeOptionsT() {
+ }
+};
+
+struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ReshapeOptionsT NativeTableType;
+ enum {
+ VT_NEW_SHAPE = 4
+ };
+ const flatbuffers::Vector<int32_t> *new_shape() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_NEW_SHAPE) &&
+ verifier.Verify(new_shape()) &&
+ verifier.EndTable();
+ }
+ ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReshapeOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReshapeOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape) {
+ fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
+ }
+ explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &);
+ flatbuffers::Offset<ReshapeOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReshapeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0) {
+ ReshapeOptionsBuilder builder_(_fbb);
+ builder_.add_new_shape(new_shape);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptionsDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *new_shape = nullptr) {
+ return tflite::CreateReshapeOptions(
+ _fbb,
+ new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0);
+}
+
+flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SkipGramOptionsT : public flatbuffers::NativeTable {
+ typedef SkipGramOptions TableType;
+ int32_t ngram_size;
+ int32_t max_skip_size;
+ bool include_all_ngrams;
+ SkipGramOptionsT()
+ : ngram_size(0),
+ max_skip_size(0),
+ include_all_ngrams(false) {
+ }
+};
+
+struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SkipGramOptionsT NativeTableType;
+ enum {
+ VT_NGRAM_SIZE = 4,
+ VT_MAX_SKIP_SIZE = 6,
+ VT_INCLUDE_ALL_NGRAMS = 8
+ };
+ int32_t ngram_size() const {
+ return GetField<int32_t>(VT_NGRAM_SIZE, 0);
+ }
+ int32_t max_skip_size() const {
+ return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0);
+ }
+ bool include_all_ngrams() const {
+ return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
+ VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
+ VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) &&
+ verifier.EndTable();
+ }
+ SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SkipGramOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SkipGramOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_ngram_size(int32_t ngram_size) {
+ fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
+ }
+ void add_max_skip_size(int32_t max_skip_size) {
+ fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
+ }
+ void add_include_all_ngrams(bool include_all_ngrams) {
+ fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast<uint8_t>(include_all_ngrams), 0);
+ }
+ explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &);
+ flatbuffers::Offset<SkipGramOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SkipGramOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t ngram_size = 0,
+ int32_t max_skip_size = 0,
+ bool include_all_ngrams = false) {
+ SkipGramOptionsBuilder builder_(_fbb);
+ builder_.add_max_skip_size(max_skip_size);
+ builder_.add_ngram_size(ngram_size);
+ builder_.add_include_all_ngrams(include_all_ngrams);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SpaceToDepthOptionsT : public flatbuffers::NativeTable {
+ typedef SpaceToDepthOptions TableType;
+ int32_t block_size;
+ SpaceToDepthOptionsT()
+ : block_size(0) {
+ }
+};
+
+struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SpaceToDepthOptionsT NativeTableType;
+ enum {
+ VT_BLOCK_SIZE = 4
+ };
+ int32_t block_size() const {
+ return GetField<int32_t>(VT_BLOCK_SIZE, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
+ verifier.EndTable();
+ }
+ SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SpaceToDepthOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SpaceToDepthOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_block_size(int32_t block_size) {
+ fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
+ }
+ explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &);
+ flatbuffers::Offset<SpaceToDepthOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t block_size = 0) {
+ SpaceToDepthOptionsBuilder builder_(_fbb);
+ builder_.add_block_size(block_size);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable {
+ typedef EmbeddingLookupSparseOptions TableType;
+ CombinerType combiner;
+ EmbeddingLookupSparseOptionsT()
+ : combiner(CombinerType_SUM) {
+ }
+};
+
+struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef EmbeddingLookupSparseOptionsT NativeTableType;
+ enum {
+ VT_COMBINER = 4
+ };
+ CombinerType combiner() const {
+ return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_COMBINER) &&
+ verifier.EndTable();
+ }
+ EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<EmbeddingLookupSparseOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct EmbeddingLookupSparseOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_combiner(CombinerType combiner) {
+ fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast<int8_t>(combiner), 0);
+ }
+ explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &);
+ flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ CombinerType combiner = CombinerType_SUM) {
+ EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
+ builder_.add_combiner(combiner);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorCodeT : public flatbuffers::NativeTable {
+ typedef OperatorCode TableType;
+ BuiltinOperator builtin_code;
+ std::string custom_code;
+ OperatorCodeT()
+ : builtin_code(BuiltinOperator_ADD) {
+ }
+};
+
+struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef OperatorCodeT NativeTableType;
+ enum {
+ VT_BUILTIN_CODE = 4,
+ VT_CUSTOM_CODE = 6
+ };
+ BuiltinOperator builtin_code() const {
+ return static_cast<BuiltinOperator>(GetField<int8_t>(VT_BUILTIN_CODE, 0));
+ }
+ const flatbuffers::String *custom_code() const {
+ return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_BUILTIN_CODE) &&
+ VerifyOffset(verifier, VT_CUSTOM_CODE) &&
+ verifier.Verify(custom_code()) &&
+ verifier.EndTable();
+ }
+ OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<OperatorCode> Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct OperatorCodeBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_builtin_code(BuiltinOperator builtin_code) {
+ fbb_.AddElement<int8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int8_t>(builtin_code), 0);
+ }
+ void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code) {
+ fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
+ }
+ explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ OperatorCodeBuilder &operator=(const OperatorCodeBuilder &);
+ flatbuffers::Offset<OperatorCode> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<OperatorCode>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<OperatorCode> CreateOperatorCode(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ BuiltinOperator builtin_code = BuiltinOperator_ADD,
+ flatbuffers::Offset<flatbuffers::String> custom_code = 0) {
+ OperatorCodeBuilder builder_(_fbb);
+ builder_.add_custom_code(custom_code);
+ builder_.add_builtin_code(builtin_code);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<OperatorCode> CreateOperatorCodeDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ BuiltinOperator builtin_code = BuiltinOperator_ADD,
+ const char *custom_code = nullptr) {
+ return tflite::CreateOperatorCode(
+ _fbb,
+ builtin_code,
+ custom_code ? _fbb.CreateString(custom_code) : 0);
+}
+
+flatbuffers::Offset<OperatorCode> CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorT : public flatbuffers::NativeTable {
+ typedef Operator TableType;
+ uint32_t opcode_index;
+ std::vector<int32_t> inputs;
+ std::vector<int32_t> outputs;
+ BuiltinOptionsUnion builtin_options;
+ std::vector<uint8_t> custom_options;
+ CustomOptionsFormat custom_options_format;
+ OperatorT()
+ : opcode_index(0),
+ custom_options_format(CustomOptionsFormat_FLEXBUFFERS) {
+ }
+};
+
+struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef OperatorT NativeTableType;
+ enum {
+ VT_OPCODE_INDEX = 4,
+ VT_INPUTS = 6,
+ VT_OUTPUTS = 8,
+ VT_BUILTIN_OPTIONS_TYPE = 10,
+ VT_BUILTIN_OPTIONS = 12,
+ VT_CUSTOM_OPTIONS = 14,
+ VT_CUSTOM_OPTIONS_FORMAT = 16
+ };
+ uint32_t opcode_index() const {
+ return GetField<uint32_t>(VT_OPCODE_INDEX, 0);
+ }
+ const flatbuffers::Vector<int32_t> *inputs() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+ }
+ const flatbuffers::Vector<int32_t> *outputs() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+ }
+ BuiltinOptions builtin_options_type() const {
+ return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
+ }
+ const void *builtin_options() const {
+ return GetPointer<const void *>(VT_BUILTIN_OPTIONS);
+ }
+ template<typename T> const T *builtin_options_as() const;
+ const Conv2DOptions *builtin_options_as_Conv2DOptions() const {
+ return builtin_options_type() == BuiltinOptions_Conv2DOptions ? static_cast<const Conv2DOptions *>(builtin_options()) : nullptr;
+ }
+ const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const {
+ return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions ? static_cast<const DepthwiseConv2DOptions *>(builtin_options()) : nullptr;
+ }
+ const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const {
+ return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options()) : nullptr;
+ }
+ const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const {
+ return builtin_options_type() == BuiltinOptions_LSHProjectionOptions ? static_cast<const LSHProjectionOptions *>(builtin_options()) : nullptr;
+ }
+ const Pool2DOptions *builtin_options_as_Pool2DOptions() const {
+ return builtin_options_type() == BuiltinOptions_Pool2DOptions ? static_cast<const Pool2DOptions *>(builtin_options()) : nullptr;
+ }
+ const SVDFOptions *builtin_options_as_SVDFOptions() const {
+ return builtin_options_type() == BuiltinOptions_SVDFOptions ? static_cast<const SVDFOptions *>(builtin_options()) : nullptr;
+ }
+ const RNNOptions *builtin_options_as_RNNOptions() const {
+ return builtin_options_type() == BuiltinOptions_RNNOptions ? static_cast<const RNNOptions *>(builtin_options()) : nullptr;
+ }
+ const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const {
+ return builtin_options_type() == BuiltinOptions_FullyConnectedOptions ? static_cast<const FullyConnectedOptions *>(builtin_options()) : nullptr;
+ }
+ const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const {
+ return builtin_options_type() == BuiltinOptions_SoftmaxOptions ? static_cast<const SoftmaxOptions *>(builtin_options()) : nullptr;
+ }
+ const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const {
+ return builtin_options_type() == BuiltinOptions_ConcatenationOptions ? static_cast<const ConcatenationOptions *>(builtin_options()) : nullptr;
+ }
+ const AddOptions *builtin_options_as_AddOptions() const {
+ return builtin_options_type() == BuiltinOptions_AddOptions ? static_cast<const AddOptions *>(builtin_options()) : nullptr;
+ }
+ const L2NormOptions *builtin_options_as_L2NormOptions() const {
+ return builtin_options_type() == BuiltinOptions_L2NormOptions ? static_cast<const L2NormOptions *>(builtin_options()) : nullptr;
+ }
+ const LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const {
+ return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options()) : nullptr;
+ }
+ const LSTMOptions *builtin_options_as_LSTMOptions() const {
+ return builtin_options_type() == BuiltinOptions_LSTMOptions ? static_cast<const LSTMOptions *>(builtin_options()) : nullptr;
+ }
+ const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const {
+ return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions ? static_cast<const ResizeBilinearOptions *>(builtin_options()) : nullptr;
+ }
+ const CallOptions *builtin_options_as_CallOptions() const {
+ return builtin_options_type() == BuiltinOptions_CallOptions ? static_cast<const CallOptions *>(builtin_options()) : nullptr;
+ }
+ const ReshapeOptions *builtin_options_as_ReshapeOptions() const {
+ return builtin_options_type() == BuiltinOptions_ReshapeOptions ? static_cast<const ReshapeOptions *>(builtin_options()) : nullptr;
+ }
+ const SkipGramOptions *builtin_options_as_SkipGramOptions() const {
+ return builtin_options_type() == BuiltinOptions_SkipGramOptions ? static_cast<const SkipGramOptions *>(builtin_options()) : nullptr;
+ }
+ const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const {
+ return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions ? static_cast<const SpaceToDepthOptions *>(builtin_options()) : nullptr;
+ }
+ const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const {
+ return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options()) : nullptr;
+ }
+ const MulOptions *builtin_options_as_MulOptions() const {
+ return builtin_options_type() == BuiltinOptions_MulOptions ? static_cast<const MulOptions *>(builtin_options()) : nullptr;
+ }
+ const flatbuffers::Vector<uint8_t> *custom_options() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
+ }
+ CustomOptionsFormat custom_options_format() const {
+ return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
+ VerifyOffset(verifier, VT_INPUTS) &&
+ verifier.Verify(inputs()) &&
+ VerifyOffset(verifier, VT_OUTPUTS) &&
+ verifier.Verify(outputs()) &&
+ VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
+ VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
+ VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
+ VerifyOffset(verifier, VT_CUSTOM_OPTIONS) &&
+ verifier.Verify(custom_options()) &&
+ VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
+ verifier.EndTable();
+ }
+ OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Operator> Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+template<> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const {
+ return builtin_options_as_Conv2DOptions();
+}
+
+template<> inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const {
+ return builtin_options_as_DepthwiseConv2DOptions();
+}
+
+template<> inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const {
+ return builtin_options_as_ConcatEmbeddingsOptions();
+}
+
+template<> inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const {
+ return builtin_options_as_LSHProjectionOptions();
+}
+
+template<> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const {
+ return builtin_options_as_Pool2DOptions();
+}
+
+template<> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const {
+ return builtin_options_as_SVDFOptions();
+}
+
+template<> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const {
+ return builtin_options_as_RNNOptions();
+}
+
+template<> inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const {
+ return builtin_options_as_FullyConnectedOptions();
+}
+
+template<> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const {
+ return builtin_options_as_SoftmaxOptions();
+}
+
+template<> inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const {
+ return builtin_options_as_ConcatenationOptions();
+}
+
+template<> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const {
+ return builtin_options_as_AddOptions();
+}
+
+template<> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const {
+ return builtin_options_as_L2NormOptions();
+}
+
+template<> inline const LocalResponseNormalizationOptions *Operator::builtin_options_as<LocalResponseNormalizationOptions>() const {
+ return builtin_options_as_LocalResponseNormalizationOptions();
+}
+
+template<> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const {
+ return builtin_options_as_LSTMOptions();
+}
+
+template<> inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const {
+ return builtin_options_as_ResizeBilinearOptions();
+}
+
+template<> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const {
+ return builtin_options_as_CallOptions();
+}
+
+template<> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const {
+ return builtin_options_as_ReshapeOptions();
+}
+
+template<> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const {
+ return builtin_options_as_SkipGramOptions();
+}
+
+template<> inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const {
+ return builtin_options_as_SpaceToDepthOptions();
+}
+
+template<> inline const EmbeddingLookupSparseOptions *Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const {
+ return builtin_options_as_EmbeddingLookupSparseOptions();
+}
+
+template<> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const {
+ return builtin_options_as_MulOptions();
+}
+
+struct OperatorBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_opcode_index(uint32_t opcode_index) {
+ fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
+ }
+ void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) {
+ fbb_.AddOffset(Operator::VT_INPUTS, inputs);
+ }
+ void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) {
+ fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
+ }
+ void add_builtin_options_type(BuiltinOptions builtin_options_type) {
+ fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast<uint8_t>(builtin_options_type), 0);
+ }
+ void add_builtin_options(flatbuffers::Offset<void> builtin_options) {
+ fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
+ }
+ void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options) {
+ fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
+ }
+ void add_custom_options_format(CustomOptionsFormat custom_options_format) {
+ fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast<int8_t>(custom_options_format), 0);
+ }
+ explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ OperatorBuilder &operator=(const OperatorBuilder &);
+ flatbuffers::Offset<Operator> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Operator>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Operator> CreateOperator(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t opcode_index = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
+ BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
+ flatbuffers::Offset<void> builtin_options = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
+ CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS) {
+ OperatorBuilder builder_(_fbb);
+ builder_.add_custom_options(custom_options);
+ builder_.add_builtin_options(builtin_options);
+ builder_.add_outputs(outputs);
+ builder_.add_inputs(inputs);
+ builder_.add_opcode_index(opcode_index);
+ builder_.add_custom_options_format(custom_options_format);
+ builder_.add_builtin_options_type(builtin_options_type);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Operator> CreateOperatorDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t opcode_index = 0,
+ const std::vector<int32_t> *inputs = nullptr,
+ const std::vector<int32_t> *outputs = nullptr,
+ BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
+ flatbuffers::Offset<void> builtin_options = 0,
+ const std::vector<uint8_t> *custom_options = nullptr,
+ CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS) {
+ return tflite::CreateOperator(
+ _fbb,
+ opcode_index,
+ inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
+ outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
+ builtin_options_type,
+ builtin_options,
+ custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0,
+ custom_options_format);
+}
+
+flatbuffers::Offset<Operator> CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SubGraphT : public flatbuffers::NativeTable {
+ typedef SubGraph TableType;
+ std::vector<std::unique_ptr<TensorT>> tensors;
+ std::vector<int32_t> inputs;
+ std::vector<int32_t> outputs;
+ std::vector<std::unique_ptr<OperatorT>> operators;
+ std::string name;
+ SubGraphT() {
+ }
+};
+
+struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef SubGraphT NativeTableType;
+ enum {
+ VT_TENSORS = 4,
+ VT_INPUTS = 6,
+ VT_OUTPUTS = 8,
+ VT_OPERATORS = 10,
+ VT_NAME = 12
+ };
+ const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS);
+ }
+ const flatbuffers::Vector<int32_t> *inputs() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+ }
+ const flatbuffers::Vector<int32_t> *outputs() const {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS);
+ }
+ const flatbuffers::String *name() const {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_TENSORS) &&
+ verifier.Verify(tensors()) &&
+ verifier.VerifyVectorOfTables(tensors()) &&
+ VerifyOffset(verifier, VT_INPUTS) &&
+ verifier.Verify(inputs()) &&
+ VerifyOffset(verifier, VT_OUTPUTS) &&
+ verifier.Verify(outputs()) &&
+ VerifyOffset(verifier, VT_OPERATORS) &&
+ verifier.Verify(operators()) &&
+ verifier.VerifyVectorOfTables(operators()) &&
+ VerifyOffset(verifier, VT_NAME) &&
+ verifier.Verify(name()) &&
+ verifier.EndTable();
+ }
+ SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SubGraph> Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SubGraphBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors) {
+ fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
+ }
+ void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) {
+ fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
+ }
+ void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) {
+ fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
+ }
+ void add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators) {
+ fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
+ }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+ fbb_.AddOffset(SubGraph::VT_NAME, name);
+ }
+ explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ SubGraphBuilder &operator=(const SubGraphBuilder &);
+ flatbuffers::Offset<SubGraph> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SubGraph>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SubGraph> CreateSubGraph(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0) {
+ SubGraphBuilder builder_(_fbb);
+ builder_.add_name(name);
+ builder_.add_operators(operators);
+ builder_.add_outputs(outputs);
+ builder_.add_inputs(inputs);
+ builder_.add_tensors(tensors);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SubGraph> CreateSubGraphDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr,
+ const std::vector<int32_t> *inputs = nullptr,
+ const std::vector<int32_t> *outputs = nullptr,
+ const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr,
+ const char *name = nullptr) {
+ return tflite::CreateSubGraph(
+ _fbb,
+ tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0,
+ inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
+ outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
+ operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0,
+ name ? _fbb.CreateString(name) : 0);
+}
+
+flatbuffers::Offset<SubGraph> CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BufferT : public flatbuffers::NativeTable {
+ typedef Buffer TableType;
+ std::vector<uint8_t> data;
+ BufferT() {
+ }
+};
+
+struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef BufferT NativeTableType;
+ enum {
+ VT_DATA = 4
+ };
+ const flatbuffers::Vector<uint8_t> *data() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_DATA) &&
+ verifier.Verify(data()) &&
+ verifier.EndTable();
+ }
+ BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Buffer> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BufferBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+ fbb_.AddOffset(Buffer::VT_DATA, data);
+ }
+ explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ BufferBuilder &operator=(const BufferBuilder &);
+ flatbuffers::Offset<Buffer> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Buffer>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Buffer> CreateBuffer(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) {
+ BufferBuilder builder_(_fbb);
+ builder_.add_data(data);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Buffer> CreateBufferDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *data = nullptr) {
+ return tflite::CreateBuffer(
+ _fbb,
+ data ? _fbb.CreateVector<uint8_t>(*data) : 0);
+}
+
+flatbuffers::Offset<Buffer> CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ModelT : public flatbuffers::NativeTable {
+ typedef Model TableType;
+ uint32_t version;
+ std::vector<std::unique_ptr<OperatorCodeT>> operator_codes;
+ std::vector<std::unique_ptr<SubGraphT>> subgraphs;
+ std::string description;
+ std::vector<std::unique_ptr<BufferT>> buffers;
+ ModelT()
+ : version(0) {
+ }
+};
+
+struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ModelT NativeTableType;
+ enum {
+ VT_VERSION = 4,
+ VT_OPERATOR_CODES = 6,
+ VT_SUBGRAPHS = 8,
+ VT_DESCRIPTION = 10,
+ VT_BUFFERS = 12
+ };
+ uint32_t version() const {
+ return GetField<uint32_t>(VT_VERSION, 0);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>(VT_OPERATOR_CODES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS);
+ }
+ const flatbuffers::String *description() const {
+ return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint32_t>(verifier, VT_VERSION) &&
+ VerifyOffset(verifier, VT_OPERATOR_CODES) &&
+ verifier.Verify(operator_codes()) &&
+ verifier.VerifyVectorOfTables(operator_codes()) &&
+ VerifyOffset(verifier, VT_SUBGRAPHS) &&
+ verifier.Verify(subgraphs()) &&
+ verifier.VerifyVectorOfTables(subgraphs()) &&
+ VerifyOffset(verifier, VT_DESCRIPTION) &&
+ verifier.Verify(description()) &&
+ VerifyOffset(verifier, VT_BUFFERS) &&
+ verifier.Verify(buffers()) &&
+ verifier.VerifyVectorOfTables(buffers()) &&
+ verifier.EndTable();
+ }
+ ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Model> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ModelBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_version(uint32_t version) {
+ fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0);
+ }
+ void add_operator_codes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes) {
+ fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
+ }
+ void add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs) {
+ fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
+ }
+ void add_description(flatbuffers::Offset<flatbuffers::String> description) {
+ fbb_.AddOffset(Model::VT_DESCRIPTION, description);
+ }
+ void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers) {
+ fbb_.AddOffset(Model::VT_BUFFERS, buffers);
+ }
+ explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ModelBuilder &operator=(const ModelBuilder &);
+ flatbuffers::Offset<Model> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Model>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Model> CreateModel(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t version = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0,
+ flatbuffers::Offset<flatbuffers::String> description = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0) {
+ ModelBuilder builder_(_fbb);
+ builder_.add_buffers(buffers);
+ builder_.add_description(description);
+ builder_.add_subgraphs(subgraphs);
+ builder_.add_operator_codes(operator_codes);
+ builder_.add_version(version);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Model> CreateModelDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t version = 0,
+ const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr,
+ const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr,
+ const char *description = nullptr,
+ const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr) {
+ return tflite::CreateModel(
+ _fbb,
+ version,
+ operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0,
+ subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0,
+ description ? _fbb.CreateString(description) : 0,
+ buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0);
+}
+
+flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new QuantizationParametersT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } };
+ { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } };
+ { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } };
+ { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } };
+}
+
+inline flatbuffers::Offset<QuantizationParameters> QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateQuantizationParameters(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0;
+ auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0;
+ auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0;
+ auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0;
+ return tflite::CreateQuantizationParameters(
+ _fbb,
+ _min,
+ _max,
+ _scale,
+ _zero_point);
+}
+
+inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new TensorT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } };
+ { auto _e = type(); _o->type = _e; };
+ { auto _e = buffer(); _o->buffer = _e; };
+ { auto _e = name(); if (_e) _o->name = _e->str(); };
+ { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr<QuantizationParametersT>(_e->UnPack(_resolver)); };
+}
+
+inline flatbuffers::Offset<Tensor> Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateTensor(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Tensor> CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0;
+ auto _type = _o->type;
+ auto _buffer = _o->buffer;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0;
+ return tflite::CreateTensor(
+ _fbb,
+ _shape,
+ _type,
+ _buffer,
+ _name,
+ _quantization);
+}
+
+inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new Conv2DOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = padding(); _o->padding = _e; };
+ { auto _e = stride_w(); _o->stride_w = _e; };
+ { auto _e = stride_h(); _o->stride_h = _e; };
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<Conv2DOptions> Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateConv2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateConv2DOptions(
+ _fbb,
+ _padding,
+ _stride_w,
+ _stride_h,
+ _fused_activation_function);
+}
+
+inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new Pool2DOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = padding(); _o->padding = _e; };
+ { auto _e = stride_w(); _o->stride_w = _e; };
+ { auto _e = stride_h(); _o->stride_h = _e; };
+ { auto _e = filter_width(); _o->filter_width = _e; };
+ { auto _e = filter_height(); _o->filter_height = _e; };
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<Pool2DOptions> Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreatePool2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _filter_width = _o->filter_width;
+ auto _filter_height = _o->filter_height;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreatePool2DOptions(
+ _fbb,
+ _padding,
+ _stride_w,
+ _stride_h,
+ _filter_width,
+ _filter_height,
+ _fused_activation_function);
+}
+
+inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new DepthwiseConv2DOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = padding(); _o->padding = _e; };
+ { auto _e = stride_w(); _o->stride_w = _e; };
+ { auto _e = stride_h(); _o->stride_h = _e; };
+ { auto _e = depth_multiplier(); _o->depth_multiplier = _e; };
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions> DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _depth_multiplier = _o->depth_multiplier;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateDepthwiseConv2DOptions(
+ _fbb,
+ _padding,
+ _stride_w,
+ _stride_h,
+ _depth_multiplier,
+ _fused_activation_function);
+}
+
+inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ConcatEmbeddingsOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = num_channels(); _o->num_channels = _e; };
+ { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } };
+ { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } };
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions> ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _num_channels = _o->num_channels;
+ auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0;
+ auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0;
+ return tflite::CreateConcatEmbeddingsOptions(
+ _fbb,
+ _num_channels,
+ _num_columns_per_channel,
+ _embedding_dim_per_channel);
+}
+
+inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new LSHProjectionOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = type(); _o->type = _e; };
+}
+
+inline flatbuffers::Offset<LSHProjectionOptions> LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateLSHProjectionOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LSHProjectionOptions> CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _type = _o->type;
+ return tflite::CreateLSHProjectionOptions(
+ _fbb,
+ _type);
+}
+
+inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SVDFOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = rank(); _o->rank = _e; };
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<SVDFOptions> SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSVDFOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _rank = _o->rank;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateSVDFOptions(
+ _fbb,
+ _rank,
+ _fused_activation_function);
+}
+
+inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new RNNOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<RNNOptions> RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateRNNOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<RNNOptions> CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateRNNOptions(
+ _fbb,
+ _fused_activation_function);
+}
+
+inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new FullyConnectedOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<FullyConnectedOptions> FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateFullyConnectedOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateFullyConnectedOptions(
+ _fbb,
+ _fused_activation_function);
+}
+
+inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SoftmaxOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = beta(); _o->beta = _e; };
+}
+
+inline flatbuffers::Offset<SoftmaxOptions> SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSoftmaxOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SoftmaxOptions> CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _beta = _o->beta;
+ return tflite::CreateSoftmaxOptions(
+ _fbb,
+ _beta);
+}
+
+inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ConcatenationOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = axis(); _o->axis = _e; };
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<ConcatenationOptions> ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateConcatenationOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _axis = _o->axis;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateConcatenationOptions(
+ _fbb,
+ _axis,
+ _fused_activation_function);
+}
+
+inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new AddOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<AddOptions> AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateAddOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<AddOptions> CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateAddOptions(
+ _fbb,
+ _fused_activation_function);
+}
+
+inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new MulOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<MulOptions> MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateMulOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MulOptions> CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateMulOptions(
+ _fbb,
+ _fused_activation_function);
+}
+
+inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new L2NormOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+}
+
+inline flatbuffers::Offset<L2NormOptions> L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateL2NormOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return tflite::CreateL2NormOptions(
+ _fbb,
+ _fused_activation_function);
+}
+
+inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new LocalResponseNormalizationOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = radius(); _o->radius = _e; };
+ { auto _e = bias(); _o->bias = _e; };
+ { auto _e = alpha(); _o->alpha = _e; };
+ { auto _e = beta(); _o->beta = _e; };
+}
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions> LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _radius = _o->radius;
+ auto _bias = _o->bias;
+ auto _alpha = _o->alpha;
+ auto _beta = _o->beta;
+ return tflite::CreateLocalResponseNormalizationOptions(
+ _fbb,
+ _radius,
+ _bias,
+ _alpha,
+ _beta);
+}
+
+inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new LSTMOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = fused_activation_function(); _o->fused_activation_function = _e; };
+ { auto _e = cell_clip(); _o->cell_clip = _e; };
+ { auto _e = proj_clip(); _o->proj_clip = _e; };
+}
+
+inline flatbuffers::Offset<LSTMOptions> LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateLSTMOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _cell_clip = _o->cell_clip;
+ auto _proj_clip = _o->proj_clip;
+ return tflite::CreateLSTMOptions(
+ _fbb,
+ _fused_activation_function,
+ _cell_clip,
+ _proj_clip);
+}
+
+inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ResizeBilinearOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = new_height(); _o->new_height = _e; };
+ { auto _e = new_width(); _o->new_width = _e; };
+}
+
+inline flatbuffers::Offset<ResizeBilinearOptions> ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateResizeBilinearOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ResizeBilinearOptions> CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _new_height = _o->new_height;
+ auto _new_width = _o->new_width;
+ return tflite::CreateResizeBilinearOptions(
+ _fbb,
+ _new_height,
+ _new_width);
+}
+
+inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new CallOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = subgraph(); _o->subgraph = _e; };
+}
+
+inline flatbuffers::Offset<CallOptions> CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateCallOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _subgraph = _o->subgraph;
+ return tflite::CreateCallOptions(
+ _fbb,
+ _subgraph);
+}
+
+inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ReshapeOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } };
+}
+
+inline flatbuffers::Offset<ReshapeOptions> ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateReshapeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReshapeOptions> CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0;
+ return tflite::CreateReshapeOptions(
+ _fbb,
+ _new_shape);
+}
+
+inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SkipGramOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = ngram_size(); _o->ngram_size = _e; };
+ { auto _e = max_skip_size(); _o->max_skip_size = _e; };
+ { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; };
+}
+
+inline flatbuffers::Offset<SkipGramOptions> SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSkipGramOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SkipGramOptions> CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _ngram_size = _o->ngram_size;
+ auto _max_skip_size = _o->max_skip_size;
+ auto _include_all_ngrams = _o->include_all_ngrams;
+ return tflite::CreateSkipGramOptions(
+ _fbb,
+ _ngram_size,
+ _max_skip_size,
+ _include_all_ngrams);
+}
+
+inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SpaceToDepthOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = block_size(); _o->block_size = _e; };
+}
+
+inline flatbuffers::Offset<SpaceToDepthOptions> SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSpaceToDepthOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SpaceToDepthOptions> CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _block_size = _o->block_size;
+ return tflite::CreateSpaceToDepthOptions(
+ _fbb,
+ _block_size);
+}
+
+inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new EmbeddingLookupSparseOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = combiner(); _o->combiner = _e; };
+}
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions> EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions> CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _combiner = _o->combiner;
+ return tflite::CreateEmbeddingLookupSparseOptions(
+ _fbb,
+ _combiner);
+}
+
+inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new OperatorCodeT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = builtin_code(); _o->builtin_code = _e; };
+ { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); };
+}
+
+inline flatbuffers::Offset<OperatorCode> OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateOperatorCode(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<OperatorCode> CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _builtin_code = _o->builtin_code;
+ auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code);
+ return tflite::CreateOperatorCode(
+ _fbb,
+ _builtin_code,
+ _custom_code);
+}
+
+inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new OperatorT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = opcode_index(); _o->opcode_index = _e; };
+ { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } };
+ { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } };
+ { auto _e = builtin_options_type(); _o->builtin_options.type = _e; };
+ { auto _e = builtin_options(); if (_e) _o->builtin_options.value = BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); };
+ { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->custom_options[_i] = _e->Get(_i); } } };
+ { auto _e = custom_options_format(); _o->custom_options_format = _e; };
+}
+
+inline flatbuffers::Offset<Operator> Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateOperator(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Operator> CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _opcode_index = _o->opcode_index;
+ auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
+ auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
+ auto _builtin_options_type = _o->builtin_options.type;
+ auto _builtin_options = _o->builtin_options.Pack(_fbb);
+ auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0;
+ auto _custom_options_format = _o->custom_options_format;
+ return tflite::CreateOperator(
+ _fbb,
+ _opcode_index,
+ _inputs,
+ _outputs,
+ _builtin_options_type,
+ _builtin_options,
+ _custom_options,
+ _custom_options_format);
+}
+
+inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new SubGraphT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr<TensorT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } };
+ { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } };
+ { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr<OperatorT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = name(); if (_e) _o->name = _e->str(); };
+}
+
+inline flatbuffers::Offset<SubGraph> SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateSubGraph(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SubGraph> CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _tensors = _o->tensors.size() ? _fbb.CreateVector<flatbuffers::Offset<Tensor>> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
+ auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
+ auto _operators = _o->operators.size() ? _fbb.CreateVector<flatbuffers::Offset<Operator>> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ return tflite::CreateSubGraph(
+ _fbb,
+ _tensors,
+ _inputs,
+ _outputs,
+ _operators,
+ _name);
+}
+
+inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new BufferT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = data(); if (_e) { _o->data.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->data[_i] = _e->Get(_i); } } };
+}
+
+inline flatbuffers::Offset<Buffer> Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateBuffer(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Buffer> CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
+ return tflite::CreateBuffer(
+ _fbb,
+ _data);
+}
+
+inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new ModelT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = version(); _o->version = _e; };
+ { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr<OperatorCodeT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr<SubGraphT>(_e->Get(_i)->UnPack(_resolver)); } } };
+ { auto _e = description(); if (_e) _o->description = _e->str(); };
+ { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr<BufferT>(_e->Get(_i)->UnPack(_resolver)); } } };
+}
+
+inline flatbuffers::Offset<Model> Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateModel(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _version = _o->version;
+ auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0;
+ auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description);
+ auto _buffers = _o->buffers.size() ? _fbb.CreateVector<flatbuffers::Offset<Buffer>> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0;
+ return tflite::CreateModel(
+ _fbb,
+ _version,
+ _operator_codes,
+ _subgraphs,
+ _description,
+ _buffers);
+}
+
+inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) {
+ switch (type) {
+ case BuiltinOptions_NONE: {
+ return true;
+ }
+ case BuiltinOptions_Conv2DOptions: {
+ auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions: {
+ auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions: {
+ auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LSHProjectionOptions: {
+ auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_Pool2DOptions: {
+ auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SVDFOptions: {
+ auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_RNNOptions: {
+ auto ptr = reinterpret_cast<const RNNOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FullyConnectedOptions: {
+ auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SoftmaxOptions: {
+ auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ConcatenationOptions: {
+ auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_AddOptions: {
+ auto ptr = reinterpret_cast<const AddOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_L2NormOptions: {
+ auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions: {
+ auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LSTMOptions: {
+ auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ResizeBilinearOptions: {
+ auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CallOptions: {
+ auto ptr = reinterpret_cast<const CallOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReshapeOptions: {
+ auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SkipGramOptions: {
+ auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SpaceToDepthOptions: {
+ auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions: {
+ auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MulOptions: {
+ auto ptr = reinterpret_cast<const MulOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ default: return false;
+ }
+}
+
+inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
+ if (values->size() != types->size()) return false;
+ for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+ if (!VerifyBuiltinOptions(
+ verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) {
+ switch (type) {
+ case BuiltinOptions_Conv2DOptions: {
+ auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions: {
+ auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions: {
+ auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LSHProjectionOptions: {
+ auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_Pool2DOptions: {
+ auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SVDFOptions: {
+ auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_RNNOptions: {
+ auto ptr = reinterpret_cast<const RNNOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FullyConnectedOptions: {
+ auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SoftmaxOptions: {
+ auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ConcatenationOptions: {
+ auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_AddOptions: {
+ auto ptr = reinterpret_cast<const AddOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_L2NormOptions: {
+ auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions: {
+ auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LSTMOptions: {
+ auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ResizeBilinearOptions: {
+ auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CallOptions: {
+ auto ptr = reinterpret_cast<const CallOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReshapeOptions: {
+ auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SkipGramOptions: {
+ auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SpaceToDepthOptions: {
+ auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions: {
+ auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MulOptions: {
+ auto ptr = reinterpret_cast<const MulOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ default: return nullptr;
+ }
+}
+
+inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const {
+ switch (type) {
+ case BuiltinOptions_Conv2DOptions: {
+ auto ptr = reinterpret_cast<const Conv2DOptionsT *>(value);
+ return CreateConv2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions: {
+ auto ptr = reinterpret_cast<const DepthwiseConv2DOptionsT *>(value);
+ return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions: {
+ auto ptr = reinterpret_cast<const ConcatEmbeddingsOptionsT *>(value);
+ return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LSHProjectionOptions: {
+ auto ptr = reinterpret_cast<const LSHProjectionOptionsT *>(value);
+ return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_Pool2DOptions: {
+ auto ptr = reinterpret_cast<const Pool2DOptionsT *>(value);
+ return CreatePool2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SVDFOptions: {
+ auto ptr = reinterpret_cast<const SVDFOptionsT *>(value);
+ return CreateSVDFOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_RNNOptions: {
+ auto ptr = reinterpret_cast<const RNNOptionsT *>(value);
+ return CreateRNNOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FullyConnectedOptions: {
+ auto ptr = reinterpret_cast<const FullyConnectedOptionsT *>(value);
+ return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SoftmaxOptions: {
+ auto ptr = reinterpret_cast<const SoftmaxOptionsT *>(value);
+ return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ConcatenationOptions: {
+ auto ptr = reinterpret_cast<const ConcatenationOptionsT *>(value);
+ return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_AddOptions: {
+ auto ptr = reinterpret_cast<const AddOptionsT *>(value);
+ return CreateAddOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_L2NormOptions: {
+ auto ptr = reinterpret_cast<const L2NormOptionsT *>(value);
+ return CreateL2NormOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions: {
+ auto ptr = reinterpret_cast<const LocalResponseNormalizationOptionsT *>(value);
+ return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LSTMOptions: {
+ auto ptr = reinterpret_cast<const LSTMOptionsT *>(value);
+ return CreateLSTMOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ResizeBilinearOptions: {
+ auto ptr = reinterpret_cast<const ResizeBilinearOptionsT *>(value);
+ return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CallOptions: {
+ auto ptr = reinterpret_cast<const CallOptionsT *>(value);
+ return CreateCallOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReshapeOptions: {
+ auto ptr = reinterpret_cast<const ReshapeOptionsT *>(value);
+ return CreateReshapeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SkipGramOptions: {
+ auto ptr = reinterpret_cast<const SkipGramOptionsT *>(value);
+ return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SpaceToDepthOptions: {
+ auto ptr = reinterpret_cast<const SpaceToDepthOptionsT *>(value);
+ return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions: {
+ auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptionsT *>(value);
+ return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MulOptions: {
+ auto ptr = reinterpret_cast<const MulOptionsT *>(value);
+ return CreateMulOptions(_fbb, ptr, _rehasher).Union();
+ }
+ default: return 0;
+ }
+}
+
+inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) {
+ switch (type) {
+ case BuiltinOptions_Conv2DOptions: {
+ value = new Conv2DOptionsT(*reinterpret_cast<Conv2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions: {
+ value = new DepthwiseConv2DOptionsT(*reinterpret_cast<DepthwiseConv2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions: {
+ value = new ConcatEmbeddingsOptionsT(*reinterpret_cast<ConcatEmbeddingsOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LSHProjectionOptions: {
+ value = new LSHProjectionOptionsT(*reinterpret_cast<LSHProjectionOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_Pool2DOptions: {
+ value = new Pool2DOptionsT(*reinterpret_cast<Pool2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SVDFOptions: {
+ value = new SVDFOptionsT(*reinterpret_cast<SVDFOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_RNNOptions: {
+ value = new RNNOptionsT(*reinterpret_cast<RNNOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FullyConnectedOptions: {
+ value = new FullyConnectedOptionsT(*reinterpret_cast<FullyConnectedOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SoftmaxOptions: {
+ value = new SoftmaxOptionsT(*reinterpret_cast<SoftmaxOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ConcatenationOptions: {
+ value = new ConcatenationOptionsT(*reinterpret_cast<ConcatenationOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_AddOptions: {
+ value = new AddOptionsT(*reinterpret_cast<AddOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_L2NormOptions: {
+ value = new L2NormOptionsT(*reinterpret_cast<L2NormOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions: {
+ value = new LocalResponseNormalizationOptionsT(*reinterpret_cast<LocalResponseNormalizationOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LSTMOptions: {
+ value = new LSTMOptionsT(*reinterpret_cast<LSTMOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ResizeBilinearOptions: {
+ value = new ResizeBilinearOptionsT(*reinterpret_cast<ResizeBilinearOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CallOptions: {
+ value = new CallOptionsT(*reinterpret_cast<CallOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReshapeOptions: {
+ value = new ReshapeOptionsT(*reinterpret_cast<ReshapeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SkipGramOptions: {
+ value = new SkipGramOptionsT(*reinterpret_cast<SkipGramOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SpaceToDepthOptions: {
+ value = new SpaceToDepthOptionsT(*reinterpret_cast<SpaceToDepthOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions: {
+ value = new EmbeddingLookupSparseOptionsT(*reinterpret_cast<EmbeddingLookupSparseOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MulOptions: {
+ value = new MulOptionsT(*reinterpret_cast<MulOptionsT *>(u.value));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+inline void BuiltinOptionsUnion::Reset() {
+ switch (type) {
+ case BuiltinOptions_Conv2DOptions: {
+ auto ptr = reinterpret_cast<Conv2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions: {
+ auto ptr = reinterpret_cast<DepthwiseConv2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions: {
+ auto ptr = reinterpret_cast<ConcatEmbeddingsOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LSHProjectionOptions: {
+ auto ptr = reinterpret_cast<LSHProjectionOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_Pool2DOptions: {
+ auto ptr = reinterpret_cast<Pool2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SVDFOptions: {
+ auto ptr = reinterpret_cast<SVDFOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_RNNOptions: {
+ auto ptr = reinterpret_cast<RNNOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FullyConnectedOptions: {
+ auto ptr = reinterpret_cast<FullyConnectedOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SoftmaxOptions: {
+ auto ptr = reinterpret_cast<SoftmaxOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ConcatenationOptions: {
+ auto ptr = reinterpret_cast<ConcatenationOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_AddOptions: {
+ auto ptr = reinterpret_cast<AddOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_L2NormOptions: {
+ auto ptr = reinterpret_cast<L2NormOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions: {
+ auto ptr = reinterpret_cast<LocalResponseNormalizationOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LSTMOptions: {
+ auto ptr = reinterpret_cast<LSTMOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ResizeBilinearOptions: {
+ auto ptr = reinterpret_cast<ResizeBilinearOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CallOptions: {
+ auto ptr = reinterpret_cast<CallOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReshapeOptions: {
+ auto ptr = reinterpret_cast<ReshapeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SkipGramOptions: {
+ auto ptr = reinterpret_cast<SkipGramOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SpaceToDepthOptions: {
+ auto ptr = reinterpret_cast<SpaceToDepthOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions: {
+ auto ptr = reinterpret_cast<EmbeddingLookupSparseOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MulOptions: {
+ auto ptr = reinterpret_cast<MulOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ default: break;
+ }
+ value = nullptr;
+ type = BuiltinOptions_NONE;
+}
+
+inline const tflite::Model *GetModel(const void *buf) {
+ return flatbuffers::GetRoot<tflite::Model>(buf);
+}
+
+inline const char *ModelIdentifier() {
+ return "TFL3";
+}
+
+inline bool ModelBufferHasIdentifier(const void *buf) {
+ return flatbuffers::BufferHasIdentifier(
+ buf, ModelIdentifier());
+}
+
+inline bool VerifyModelBuffer(
+ flatbuffers::Verifier &verifier) {
+ return verifier.VerifyBuffer<tflite::Model>(ModelIdentifier());
+}
+
+inline const char *ModelExtension() {
+ return "tflite";
+}
+
+inline void FinishModelBuffer(
+ flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<tflite::Model> root) {
+ fbb.Finish(root, ModelIdentifier());
+}
+
+inline std::unique_ptr<ModelT> UnPackModel(
+ const void *buf,
+ const flatbuffers::resolver_function_t *res = nullptr) {
+ return std::unique_ptr<ModelT>(GetModel(buf)->UnPack(res));
+}
+
+} // namespace tflite
+
+#endif // FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_
diff --git a/tensorflow/contrib/lite/tools/benchmark_model.cc b/tensorflow/contrib/lite/tools/benchmark_model.cc
new file mode 100644
index 0000000000..f80949b23e
--- /dev/null
+++ b/tensorflow/contrib/lite/tools/benchmark_model.cc
@@ -0,0 +1,91 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cstdarg>
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "tensorflow/contrib/lite/tools/mutable_op_resolver.h"
+
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
+#endif
+
+#define LOG(x) std::cerr
+#define CHECK(x) if (!(x)) { LOG(ERROR) << #x << "failed"; exit(1); }
+
+namespace tensorflow {
+namespace benchmark_tflite_model {
+
+std::unique_ptr<tflite::FlatBufferModel> model;
+std::unique_ptr<tflite::Interpreter> interpreter;
+
+void InitImpl(const std::string& graph, const std::vector<int>& sizes,
+ const std::string& input_layer_type, int num_threads) {
+ CHECK(graph.c_str());
+
+ model = tflite::FlatBufferModel::BuildFromFile(graph.c_str());
+ if (!model) {
+ LOG(FATAL) << "Failed to mmap model " << graph;
+ }
+ LOG(INFO) << "Loaded model " << graph;
+ model->error_reporter();
+ LOG(INFO) << "resolved reporter";
+
+#ifdef TFLITE_CUSTOM_OPS_HEADER
+ tflite::MutableOpResolver resolver;
+ RegisterSelectedOps(&resolver);
+#else
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+#endif
+
+ tflite::InterpreterBuilder(*model, resolver)(&interpreter);
+ if (!interpreter) {
+ LOG(FATAL) << "Failed to construct interpreter";
+ }
+
+ if (num_threads != -1) {
+ interpreter->SetNumThreads(num_threads);
+ }
+
+ int input = interpreter->inputs()[0];
+
+ if (input_layer_type != "string") {
+ interpreter->ResizeInputTensor(input, sizes);
+ }
+
+ if (interpreter->AllocateTensors() != kTfLiteOk) {
+ LOG(FATAL) << "Failed to allocate tensors!";
+ }
+}
+
+int Main(int argc, char** argv) {
+ InitImpl("", {}, "", 1);
+ return 0;
+}
+
+} // namespace benchmark_tflite_model
+} // namespace tensorflow
+
+int main(int argc, char** argv) {
+ return tensorflow::benchmark_tflite_model::Main(argc, argv);
+}
diff --git a/tensorflow/contrib/lite/tools/mutable_op_resolver.h b/tensorflow/contrib/lite/tools/mutable_op_resolver.h
index 9546c32427..a51fdaee19 100644
--- a/tensorflow/contrib/lite/tools/mutable_op_resolver.h
+++ b/tensorflow/contrib/lite/tools/mutable_op_resolver.h
@@ -19,6 +19,17 @@ limitations under the License.
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/model.h"
+// Needed to resolve unordered_set hash on older compilers.
+namespace std
+{
+template<>
+ struct hash<tflite::BuiltinOperator> {
+ size_t operator()(const tflite::BuiltinOperator &op) const {
+ return std::hash<int>()(op);
+ }
+ };
+}
+
namespace tflite {
// An OpResolver that is mutable, also used as the op in gen_op_registration.