aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/lite/build_def.bzl408
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h5
-rw-r--r--tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md12
-rw-r--r--tensorflow/contrib/lite/kernels/BUILD15
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h20
-rw-r--r--tensorflow/contrib/lite/kernels/pack.cc131
-rw-r--r--tensorflow/contrib/lite/kernels/pack_test.cc119
-rw-r--r--tensorflow/contrib/lite/kernels/register.cc2
-rw-r--r--tensorflow/contrib/lite/model.cc10
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py38
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc8
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc1
-rw-r--r--tensorflow/contrib/lite/toco/model.h1
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc22
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc10
15 files changed, 599 insertions, 203 deletions
diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl
index 79f7455ad8..7c13f9011e 100644
--- a/tensorflow/contrib/lite/build_def.bzl
+++ b/tensorflow/contrib/lite/build_def.bzl
@@ -1,4 +1,5 @@
"""Generate Flatbuffer binary from json."""
+
load(
"//tensorflow:tensorflow.bzl",
"tf_cc_test",
@@ -6,118 +7,120 @@ load(
)
def tflite_copts():
- """Defines compile time flags."""
- copts = [
- "-DFARMHASH_NO_CXX_STRING",
- ] + select({
- str(Label("//tensorflow:android_arm64")): [
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_arm")): [
- "-mfpu=neon",
- "-mfloat-abi=softfp",
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_x86")): [
- "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
- ],
- str(Label("//tensorflow:ios_x86_64")): [
- "-msse4.1",
- ],
- "//conditions:default": [],
- }) + select({
- str(Label("//tensorflow:with_default_optimizations")): [],
- "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
- })
+ """Defines compile time flags."""
+ copts = [
+ "-DFARMHASH_NO_CXX_STRING",
+ ] + select({
+ str(Label("//tensorflow:android_arm64")): [
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_arm")): [
+ "-mfpu=neon",
+ "-mfloat-abi=softfp",
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_x86")): [
+ "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
+ ],
+ str(Label("//tensorflow:ios_x86_64")): [
+ "-msse4.1",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ str(Label("//tensorflow:with_default_optimizations")): [],
+ "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
+ })
- return copts
+ return copts
LINKER_SCRIPT = "//tensorflow/contrib/lite/java/src/main/native:version_script.lds"
def tflite_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary.
+ """Defines linker flags to reduce size of TFLite binary.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
- "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
+ "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_jni_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary with JNI.
+ """Defines linker flags to reduce size of TFLite binary with JNI.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_linkopts():
- """Defines linker flags to reduce size of TFLite binary."""
- return tflite_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary."""
+ return tflite_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ ],
+ "//conditions:default": [],
+ })
def tflite_jni_linkopts():
- """Defines linker flags to reduce size of TFLite binary with JNI."""
- return tflite_jni_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary with JNI."""
+ return tflite_jni_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
+ ],
+ "//conditions:default": [],
+ })
-def tflite_jni_binary(name,
- copts=tflite_copts(),
- linkopts=tflite_jni_linkopts(),
- linkscript=LINKER_SCRIPT,
- linkshared=1,
- linkstatic=1,
- deps=[]):
- """Builds a jni binary for TFLite."""
- linkopts = linkopts + [
- "-Wl,--version-script", # Export only jni functions & classes.
- "$(location {})".format(linkscript),
- ]
- native.cc_binary(
- name=name,
- copts=copts,
- linkshared=linkshared,
- linkstatic=linkstatic,
- deps= deps + [linkscript],
- linkopts=linkopts)
+def tflite_jni_binary(
+ name,
+ copts = tflite_copts(),
+ linkopts = tflite_jni_linkopts(),
+ linkscript = LINKER_SCRIPT,
+ linkshared = 1,
+ linkstatic = 1,
+ deps = []):
+ """Builds a jni binary for TFLite."""
+ linkopts = linkopts + [
+ "-Wl,--version-script", # Export only jni functions & classes.
+ "$(location {})".format(linkscript),
+ ]
+ native.cc_binary(
+ name = name,
+ copts = copts,
+ linkshared = linkshared,
+ linkstatic = linkstatic,
+ deps = deps + [linkscript],
+ linkopts = linkopts,
+ )
def tflite_cc_shared_object(name,
copts=tflite_copts(),
@@ -134,75 +137,75 @@ def tflite_cc_shared_object(name,
deps=deps)
def tf_to_tflite(name, src, options, out):
- """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
+ """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input graphdef file.
- options: options passed to TOCO.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input graphdef file.
+ options: options passed to TOCO.
+ out: name of the output flatbuffer file.
+ """
- toco_cmdline = " ".join([
- "//tensorflow/contrib/lite/toco:toco",
- "--input_format=TENSORFLOW_GRAPHDEF",
- "--output_format=TFLITE",
- ("--input_file=$(location %s)" % src),
- ("--output_file=$(location %s)" % out),
- ] + options )
- native.genrule(
- name = name,
- srcs=[src],
- outs=[out],
- cmd = toco_cmdline,
- tools= ["//tensorflow/contrib/lite/toco:toco"],
- )
+ toco_cmdline = " ".join([
+ "//tensorflow/contrib/lite/toco:toco",
+ "--input_format=TENSORFLOW_GRAPHDEF",
+ "--output_format=TFLITE",
+ ("--input_file=$(location %s)" % src),
+ ("--output_file=$(location %s)" % out),
+ ] + options)
+ native.genrule(
+ name = name,
+ srcs = [src],
+ outs = [out],
+ cmd = toco_cmdline,
+ tools = ["//tensorflow/contrib/lite/toco:toco"],
+ )
def tflite_to_json(name, src, out):
- """Convert a TF Lite flatbuffer to JSON.
+ """Convert a TF Lite flatbuffer to JSON.
- Args:
- name: Name of rule.
- src: name of the input flatbuffer file.
- out: name of the output JSON file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input flatbuffer file.
+ out: name of the output JSON file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema.fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
- "$(location %s) --raw-binary --strict-json -t" +
- " -o /tmp $(location %s) -- $${TMP}.bin &&" +
- "cp $${TMP}.json $(location %s)")
- % (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema.fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
+ "$(location %s) --raw-binary --strict-json -t" +
+ " -o /tmp $(location %s) -- $${TMP}.bin &&" +
+ "cp $${TMP}.json $(location %s)") %
+ (src, flatc, schema, out),
+ tools = [flatc],
+ )
def json_to_tflite(name, src, out):
- """Convert a JSON file to TF Lite's flatbuffer.
+ """Convert a JSON file to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input JSON file.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input JSON file.
+ out: name of the output flatbuffer file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema_fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
- "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
- " -o /tmp $(location %s) $${TMP}.json &&" +
- "cp $${TMP}.bin $(location %s)")
- % (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema_fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
+ "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
+ " -o /tmp $(location %s) $${TMP}.json &&" +
+ "cp $${TMP}.bin $(location %s)") %
+ (src, flatc, schema, out),
+ tools = [flatc],
+ )
# This is the master list of generated examples that will be made into tests. A
# function called make_XXX_tests() must also appear in generate_examples.py.
@@ -245,6 +248,7 @@ def generated_test_models():
"mul",
"neg",
"not_equal",
+ "pack",
"pad",
"padv2",
"prelu",
@@ -279,58 +283,58 @@ def generated_test_models():
]
def gen_zip_test(name, test_name, **kwargs):
- """Generate a zipped-example test and its dependent zip files.
+ """Generate a zipped-example test and its dependent zip files.
- Args:
- name: Resulting cc_test target name
- test_name: Test targets this model. Comes from the list above.
- **kwargs: tf_cc_test kwargs.
- """
- gen_zipped_test_file(
- name = "zip_%s" % test_name,
- file = "%s.zip" % test_name,
- )
- tf_cc_test(name, **kwargs)
+ Args:
+ name: Resulting cc_test target name
+ test_name: Test targets this model. Comes from the list above.
+ **kwargs: tf_cc_test kwargs.
+ """
+ gen_zipped_test_file(
+ name = "zip_%s" % test_name,
+ file = "%s.zip" % test_name,
+ )
+ tf_cc_test(name, **kwargs)
def gen_zipped_test_file(name, file):
- """Generate a zip file of tests by using :generate_examples.
+ """Generate a zip file of tests by using :generate_examples.
- Args:
- name: Name of output. We will produce "`file`.files" as a target.
- file: The name of one of the generated_examples targets, e.g. "transpose"
- """
- toco = "//tensorflow/contrib/lite/toco:toco"
- native.genrule(
- name = file + ".files",
- cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco
- + " --zip_to_output " + file + " $(@D)"),
- outs = [file],
- tools = [
- ":generate_examples",
- toco,
- ],
- )
+ Args:
+ name: Name of output. We will produce "`file`.files" as a target.
+ file: The name of one of the generated_examples targets, e.g. "transpose"
+ """
+ toco = "//tensorflow/contrib/lite/toco:toco"
+ native.genrule(
+ name = file + ".files",
+ cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco +
+ " --zip_to_output " + file + " $(@D)"),
+ outs = [file],
+ tools = [
+ ":generate_examples",
+ toco,
+ ],
+ )
- native.filegroup(
- name = name,
- srcs = [file],
- )
+ native.filegroup(
+ name = name,
+ srcs = [file],
+ )
def gen_selected_ops(name, model):
- """Generate the library that includes only used ops.
+ """Generate the library that includes only used ops.
- Args:
- name: Name of the generated library.
- model: TFLite model to interpret.
- """
- out = name + "_registration.cc"
- tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
- tflite_path = "//tensorflow/contrib/lite"
- native.genrule(
- name = name,
- srcs = [model],
- outs = [out],
- cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s")
- % (tool, model, out, tflite_path[2:]),
- tools = [tool],
- )
+ Args:
+ name: Name of the generated library.
+ model: TFLite model to interpret.
+ """
+ out = name + "_registration.cc"
+ tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
+ tflite_path = "//tensorflow/contrib/lite"
+ native.genrule(
+ name = name,
+ srcs = [model],
+ outs = [out],
+ cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s") %
+ (tool, model, out, tflite_path[2:]),
+ tools = [tool],
+ )
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index a24aaad7dd..fd16aa1063 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -277,6 +277,11 @@ typedef struct {
bool narrow_range;
} TfLiteFakeQuantParams;
+typedef struct {
+ int values_count;
+ int axis;
+} TfLitePackParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
index 49d00a66ba..967259b7a6 100644
--- a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
+++ b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
@@ -815,6 +815,18 @@ Outputs {
}
```
+**PACK**
+
+```
+Inputs {
+ 0: a list of tensors.
+ 1: an integer.
+}
+Outputs {
+ 0: A tensor of stacked tensors.
+}
+```
+
And these are TensorFlow Lite operations that are present but not ready for
custom models yet:
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index 9549b4445d..c224132cae 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -176,6 +176,7 @@ cc_library(
"mfcc.cc",
"mul.cc",
"neg.cc",
+ "pack.cc",
"pad.cc",
"pooling.cc",
"pow.cc",
@@ -1156,6 +1157,20 @@ tf_cc_test(
],
)
+tf_cc_test(
+ name = "pack_test",
+ size = "small",
+ srcs = ["pack_test.cc"],
+ tags = ["tflite_not_portable_ios"],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:builtin_op_data",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
filegroup(
name = "all_files",
srcs = glob(
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index ef39be3f91..31a54c2b62 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -1860,6 +1860,26 @@ void Concatenation(int concat_dim, const Scalar* const* input_data,
}
}
+template <typename Scalar>
+void Pack(int dim, const Scalar* const* input_data,
+ const Dims<4>* const* input_dims, int inputs_count,
+ Scalar* output_data, const Dims<4>& output_dims) {
+ TFLITE_DCHECK(IsPackedWithoutStrides(output_dims));
+ int outer_size = 1;
+ for (int i = dim + 1; i < 4; i++) {
+ outer_size *= output_dims.sizes[i];
+ }
+ Scalar* output_ptr = output_data;
+ const int copy_size = FlatSize(**input_dims) / outer_size;
+ for (int k = 0; k < outer_size; k++) {
+ for (int i = 0; i < inputs_count; ++i) {
+ memcpy(output_ptr, input_data[i] + k * copy_size,
+ copy_size * sizeof(Scalar));
+ output_ptr += copy_size;
+ }
+ }
+}
+
// TODO(prabhumk): This is the same as the optimized implementation.
// TODO(prabhumk): The quantized implementation of concatentation isn't fully
// quantized as it takes scale as a floating point value. This should be fixed
diff --git a/tensorflow/contrib/lite/kernels/pack.cc b/tensorflow/contrib/lite/kernels/pack.cc
new file mode 100644
index 0000000000..bb3416f6a6
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pack.cc
@@ -0,0 +1,131 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace pack {
+namespace {
+
+constexpr int kOutputTensor = 0;
+
+// Op data for pack op.
+struct OpData {
+ int values_count;
+ int axis;
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ auto* data = new OpData;
+ data->axis = 0;
+ return data;
+}
+
+void Free(TfLiteContext* context, void* buffer) {
+ delete reinterpret_cast<OpData*>(buffer);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ const OpData* data = reinterpret_cast<OpData*>(node->builtin_data);
+
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ const TfLiteTensor* input0 = GetInput(context, node, 0);
+ TF_LITE_ENSURE(context, NumDimensions(input0) < 4);
+ TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis);
+ // TODO(renjieliu): Support negative axis.
+ TF_LITE_ENSURE(context, data->axis >= 0);
+ if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32) {
+ context->ReportError(context,
+ "Currently pack only supports int32 and float32.");
+ return kTfLiteError;
+ }
+ // Make sure all inputs have the same shape and type.
+ for (int i = 1; i < data->values_count; ++i) {
+ const TfLiteTensor* input = GetInput(context, node, i);
+ TF_LITE_ENSURE(context, HaveSameShapes(input0, input));
+ TF_LITE_ENSURE_EQ(context, input0->type, input->type);
+ }
+
+ // Resize output. rank R will become rank R + 1
+ const int dimension_size = NumDimensions(input0) + 1;
+ const TfLiteIntArray* input_shape = input0->dims;
+ TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size);
+ int i = 0;
+ for (int index = 0; index < dimension_size; ++index) {
+ if (index == data->axis) {
+ output_shape->data[index] = data->values_count;
+ } else {
+ output_shape->data[index] = input_shape->data[i++];
+ }
+ }
+
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ TF_LITE_ENSURE_EQ(context, output->type, input0->type);
+
+ return context->ResizeTensor(context, output, output_shape);
+}
+
+template <typename T>
+void PackImpl(TfLiteContext* context, TfLiteNode* node, TfLiteTensor* output,
+ int values_count, int axis) {
+ VectorOfTensors<T> all_inputs(*context, *node->inputs);
+ reference_ops::Pack<T>(RemapDim(NumDimensions(output), axis),
+ all_inputs.data(), all_inputs.dims(), values_count,
+ GetTensorData<T>(output), GetTensorDims(output));
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ const OpData* data = reinterpret_cast<OpData*>(node->builtin_data);
+
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ switch (output->type) {
+ case kTfLiteFloat32: {
+ PackImpl<float>(context, node, output, data->values_count, data->axis);
+ break;
+ }
+ case kTfLiteInt32: {
+ PackImpl<int32_t>(context, node, output, data->values_count, data->axis);
+ break;
+ }
+ default: {
+ context->ReportError(context,
+ "Currently pack only supports int32 and float32.");
+ return kTfLiteError;
+ }
+ }
+
+ return kTfLiteOk;
+}
+
+} // namespace
+} // namespace pack
+
+TfLiteRegistration* Register_PACK() {
+ static TfLiteRegistration r = {pack::Init, pack::Free, pack::Prepare,
+ pack::Eval};
+ return &r;
+}
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/pack_test.cc b/tensorflow/contrib/lite/kernels/pack_test.cc
new file mode 100644
index 0000000000..cb9fed69b1
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pack_test.cc
@@ -0,0 +1,119 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAre;
+
+template <typename T>
+class PackOpModel : public SingleOpModel {
+ public:
+ PackOpModel(const TensorData& input_template, int axis, int values_count) {
+ std::vector<std::vector<int>> all_input_shapes;
+ for (int i = 0; i < values_count; ++i) {
+ all_input_shapes.push_back(input_template.shape);
+ AddInput(input_template);
+ }
+ output_ = AddOutput({input_template.type, /*shape=*/{}, input_template.min,
+ input_template.max});
+ SetBuiltinOp(BuiltinOperator_PACK, BuiltinOptions_PackOptions,
+ CreatePackOptions(builder_, values_count, axis).Union());
+ BuildInterpreter(all_input_shapes);
+ }
+
+ void SetInput(int index, std::initializer_list<T> data) {
+ PopulateTensor(index, data);
+ }
+
+ std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int output_;
+};
+
+TEST(PackOpTest, FloatThreeInputs) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2}}, 0, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(1, 4, 2, 5, 3, 6));
+}
+
+TEST(PackOpTest, FloatThreeInputsDifferentAxis) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2}}, 1, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(1, 2, 3, 4, 5, 6));
+}
+
+TEST(PackOpTest, FloatMultilDimensions) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2, 3}}, 1, 2);
+ model.SetInput(0, {1, 2, 3, 4, 5, 6});
+ model.SetInput(1, {7, 8, 9, 10, 11, 12});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2, 3));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAre(1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12));
+}
+
+TEST(PackOpTest, IntThreeInputs) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2}}, 0, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(1, 4, 2, 5, 3, 6));
+}
+
+TEST(PackOpTest, IntThreeInputsDifferentAxis) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2}}, 1, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
+ EXPECT_THAT(model.GetOutput(), ElementsAre(1, 2, 3, 4, 5, 6));
+}
+
+TEST(PackOpTest, IntMultilDimensions) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2, 3}}, 1, 2);
+ model.SetInput(0, {1, 2, 3, 4, 5, 6});
+ model.SetInput(1, {7, 8, 9, 10, 11, 12});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2, 3));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAre(1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12));
+}
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index f0f2757277..0b70bed308 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -106,6 +106,7 @@ TfLiteRegistration* Register_RSQRT();
TfLiteRegistration* Register_SHAPE();
TfLiteRegistration* Register_POW();
TfLiteRegistration* Register_FAKE_QUANT();
+TfLiteRegistration* Register_PACK();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
@@ -195,6 +196,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_POW, Register_POW());
AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
+ AddBuiltin(BuiltinOperator_PACK, Register_PACK());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index d318591b49..ad9a7de39c 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -705,6 +705,15 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = static_cast<void*>(params);
break;
}
+ case BuiltinOperator_PACK: {
+ TfLitePackParams* params = MallocPOD<TfLitePackParams>();
+ if (auto* pack_params = op->builtin_options_as_PackOptions()) {
+ params->values_count = pack_params->values_count();
+ params->axis = pack_params->axis();
+ }
+ *builtin_data = reinterpret_cast<void*>(params);
+ break;
+ }
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
error_reporter->Report("DELEGATE op shouldn't exist in model.");
@@ -763,7 +772,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_POW:
- case BuiltinOperator_PACK:
break;
}
return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index b3ccc65e85..41ece94237 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -2880,6 +2880,44 @@ def make_sparse_to_dense_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_pack_tests(zip_path):
+ """Make a set of tests to do stack."""
+
+ test_parameters = [{
+ "base_shape": [[3, 4, 3], [3, 4], [5]],
+ "num_tensors": [1, 2, 3, 4, 5, 6],
+ "axis": [0, 1, 2, 3],
+ "additional_shape": [1, 2, 3],
+ }]
+
+ def get_shape(parameters):
+ """Return a tweaked version of 'base_shape'."""
+ axis = parameters["axis"]
+ shape = parameters["base_shape"][:]
+ if axis < len(shape):
+ shape[axis] += parameters["additional_shape"]
+ return shape
+
+ def build_graph(parameters):
+ all_tensors = []
+ for n in range(0, parameters["num_tensors"]):
+ input_tensor = tf.placeholder(
+ dtype=tf.float32, name=("input%d" % n), shape=get_shape(parameters))
+ all_tensors.append(input_tensor)
+ out = tf.stack(all_tensors, parameters["axis"])
+ return all_tensors, [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ all_values = []
+ for _ in range(0, parameters["num_tensors"]):
+ input_values = create_tensor_data(np.float32, get_shape(parameters))
+ all_values.append(input_values)
+ return all_values, sess.run(
+ outputs, feed_dict=dict(zip(inputs, all_values)))
+
+ make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+
+
# Toco binary path provided by the generate rule.
bin_path = None
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
index 3dda536ef7..9848d55c83 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
@@ -193,6 +193,14 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
SetDataTypeForAllOutputs(model, op, data_type);
break;
}
+ case OperatorType::kPack: {
+ const ArrayDataType data_type = model->GetArray(op->inputs[0]).data_type;
+ for (const auto& input : op->inputs) {
+ CHECK(data_type == model->GetArray(input).data_type);
+ }
+ SetDataTypeForAllOutputs(model, op, data_type);
+ break;
+ }
default: {
// These operators produce outputs with the same type as their 1st input
CHECK_GT(op->inputs.size(), 0);
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index 8bb797fe0f..032c863945 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -1529,6 +1529,7 @@ tensorflow::Status ConvertPackOperator(
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
+ op->values_count = HasAttr(node, "N") ? GetIntAttr(node, "N") : num_inputs;
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index 6fe194516d..d629787939 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -1164,6 +1164,7 @@ struct TensorFlowRsqrtOperator : Operator {
// TensorFlow equivalent: Pack
struct PackOperator : Operator {
PackOperator() : Operator(OperatorType::kPack) {}
+ int values_count;
int axis = 0;
ArrayDataType dtype = ArrayDataType::kNone;
};
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 1a1c4b8944..4b2ef756cc 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -1013,6 +1013,26 @@ class ExpandDims
int GetVersion(const Operator& op) const override { return 1; }
};
+class Pack : public BuiltinOperator<PackOperator, ::tflite::PackOptions,
+ ::tflite::BuiltinOptions_PackOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreatePackOptions(*builder, op.values_count, op.axis);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->values_count = options.values_count();
+ op->axis = options.axis();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class Shape
: public BuiltinOperator<TensorFlowShapeOperator, ::tflite::ShapeOptions,
::tflite::BuiltinOptions_ShapeOptions> {
@@ -1256,6 +1276,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
new Shape(::tflite::BuiltinOperator_SHAPE, OperatorType::kShape));
ops.emplace_back(new FakeQuant(::tflite::BuiltinOperator_FAKE_QUANT,
OperatorType::kFakeQuant));
+ ops.emplace_back(
+ new Pack(::tflite::BuiltinOperator_PACK, OperatorType::kPack));
// Custom Operators.
ops.emplace_back(
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index ff2d35b1f5..44de6fbf64 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -452,6 +452,16 @@ TEST_F(OperatorTest, BuiltinSparseToDense) {
EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
}
+TEST_F(OperatorTest, BuiltinPack) {
+ PackOperator op;
+ op.values_count = 3;
+ op.axis = 1;
+ std::unique_ptr<toco::PackOperator> output_toco_op =
+ SerializeAndDeserialize(GetOperator("PACK", OperatorType::kPack), op);
+ EXPECT_EQ(op.values_count, output_toco_op->values_count);
+ EXPECT_EQ(op.axis, output_toco_op->axis);
+}
+
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";