aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Nupur Garg <nupurgarg@google.com>2018-08-02 15:27:08 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-02 15:37:23 -0700
commita95e7b83e6ef21e66e557591a0debe0174b72cef (patch)
tree1ada09126e50d060101859042fec418140ca3fe9
parentdaaaab25fb8aa981fdf76740763714c9ea3f2879 (diff)
Refactored TFLite Eager delegate kernel_test.cc.
PiperOrigin-RevId: 207176261
-rw-r--r--tensorflow/contrib/lite/delegates/eager/BUILD14
-rw-r--r--tensorflow/contrib/lite/delegates/eager/kernel_test.cc197
-rw-r--r--tensorflow/contrib/lite/delegates/eager/test_util.cc164
-rw-r--r--tensorflow/contrib/lite/delegates/eager/test_util.h95
4 files changed, 308 insertions, 162 deletions
diff --git a/tensorflow/contrib/lite/delegates/eager/BUILD b/tensorflow/contrib/lite/delegates/eager/BUILD
index a28707382e..40afa0df41 100644
--- a/tensorflow/contrib/lite/delegates/eager/BUILD
+++ b/tensorflow/contrib/lite/delegates/eager/BUILD
@@ -96,10 +96,20 @@ cc_test(
deps = [
":delegate_data",
":kernel",
+ ":test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "test_util",
+ testonly = True,
+ srcs = ["test_util.cc"],
+ hdrs = ["test_util.h"],
+ deps = [
+ "//tensorflow/c:c_api_internal",
"//tensorflow/contrib/lite/kernels:test_util",
- "//tensorflow/contrib/lite/testing:util",
"@com_google_absl//absl/memory",
- "@com_google_googletest//:gtest",
"@flatbuffers",
],
)
diff --git a/tensorflow/contrib/lite/delegates/eager/kernel_test.cc b/tensorflow/contrib/lite/delegates/eager/kernel_test.cc
index 7d9dddef93..b7bfbb34e4 100644
--- a/tensorflow/contrib/lite/delegates/eager/kernel_test.cc
+++ b/tensorflow/contrib/lite/delegates/eager/kernel_test.cc
@@ -16,26 +16,16 @@ limitations under the License.
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-#include "absl/memory/memory.h"
-#include "third_party/flatbuffers/include/flatbuffers/flexbuffers.h"
#include "tensorflow/contrib/lite/delegates/eager/delegate_data.h"
-#include "tensorflow/contrib/lite/kernels/test_util.h"
-#include "tensorflow/contrib/lite/testing/util.h"
+#include "tensorflow/contrib/lite/delegates/eager/test_util.h"
namespace tflite {
namespace eager {
namespace {
-using tensorflow::protobuf::TextFormat;
using ::testing::ContainsRegex;
using ::testing::ElementsAre;
-// We will use these are custom_names, so they need to be static.
-static const char kIdentity[] = "Identity";
-static const char kUnpack[] = "Unpack";
-static const char kAdd[] = "Add";
-static const char kMul[] = "Mul";
-
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteDelegate* delegate,
const std::vector<int>& supported_nodes) {
TfLiteIntArray* size_and_nodes =
@@ -46,39 +36,18 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteDelegate* delegate,
return kTfLiteOk;
}
-class KernelTest : public ::testing::Test {
+class KernelTest : public testing::EagerModelTest {
public:
KernelTest() {
CHECK(DelegateData::Create(&delegate_data_).ok());
interpreter_.reset(new Interpreter(&error_reporter_));
}
- bool Invoke() { return interpreter_->Invoke() == kTfLiteOk; }
-
- void SetValues(int tensor_index, const std::vector<float>& values) {
- float* v = interpreter_->typed_tensor<float>(tensor_index);
- for (float f : values) {
- *v++ = f;
- }
- }
-
- std::vector<float> GetValues(int tensor_index) {
- TfLiteTensor* o = interpreter_->tensor(tensor_index);
- return std::vector<float>(o->data.f, o->data.f + o->bytes / sizeof(float));
- }
-
- void SetShape(int tensor_index, const std::vector<int>& values) {
- ASSERT_EQ(interpreter_->ResizeInputTensor(tensor_index, values), kTfLiteOk);
- ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
- }
-
- std::vector<int> GetShape(int tensor_index) {
- std::vector<int> result;
- auto* dims = interpreter_->tensor(tensor_index)->dims;
- for (int i = 0; i < dims->size; ++i) {
- result.push_back(dims->data[i]);
- }
- return result;
+ ~KernelTest() override {
+ // The data needs to be released before the interpreter because the
+ // interpreter references the data.
+ delegate_data_.reset();
+ interpreter_.reset();
}
template <typename T>
@@ -99,112 +68,20 @@ class KernelTest : public ::testing::Test {
&delegate_, /*allow_dynamic_tensors=*/true) == kTfLiteOk);
}
- void AddOp(const char* name, const std::vector<int>& inputs,
- const std::vector<int>& outputs) {
- auto attr = [](const string& key, const string& value) {
- return " attr{ key: '" + key + "' value {" + value + "}}";
- };
-
- string attributes;
- if (name == string(kUnpack)) {
- attributes = attr("T", "type: DT_FLOAT") + attr("num", "i: 2") +
- attr("axis", "i: 0");
- } else if (name == string(kIdentity)) {
- attributes = attr("T", "type: DT_FLOAT");
- } else if (name == string(kAdd)) {
- attributes = attr("T", "type: DT_FLOAT");
- } else if (name == string(kMul)) {
- attributes = attr("T", "type: DT_FLOAT");
- }
- AddTfOp(name, attributes, inputs, outputs);
- }
-
- void AddTensors(int num_tensors, const std::vector<int>& inputs,
- const std::vector<int>& outputs) {
- interpreter_->AddTensors(num_tensors);
- for (int i = 0; i < num_tensors; ++i) {
- TfLiteQuantizationParams quant;
- CHECK_EQ(interpreter_->SetTensorParametersReadWrite(i, kTfLiteFloat32,
- /*name=*/"",
- /*dims=*/{3}, quant),
- kTfLiteOk);
- }
-
- CHECK_EQ(interpreter_->SetInputs(inputs), kTfLiteOk);
- CHECK_EQ(interpreter_->SetOutputs(outputs), kTfLiteOk);
- }
-
- const TestErrorReporter& error_reporter() const { return error_reporter_; }
-
- void AddTfLiteOp(const char* name, const std::vector<int>& inputs,
- const std::vector<int>& outputs) {
- CHECK_EQ(string(name), kMul); // can only add MUL
- static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
- reg.builtin_code = BuiltinOperator_MUL;
- reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
- auto* i0 = &context->tensors[node->inputs->data[0]];
- auto* o = &context->tensors[node->outputs->data[0]];
- return context->ResizeTensor(context, o, TfLiteIntArrayCopy(i0->dims));
- };
- reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
- auto* i0 = &context->tensors[node->inputs->data[0]];
- auto* i1 = &context->tensors[node->inputs->data[1]];
- auto* o = &context->tensors[node->outputs->data[0]];
- for (int i = 0; i < o->bytes / sizeof(float); ++i) {
- o->data.f[i] = i0->data.f[i] * i1->data.f[i];
- }
- return kTfLiteOk;
- };
-
- CHECK_EQ(interpreter_->AddNodeWithParameters(inputs, outputs, nullptr, 0,
- nullptr, &reg),
- kTfLiteOk);
- }
-
private:
- void AddTfOp(const char* name, const string& nodedef_str,
- const std::vector<int>& inputs,
- const std::vector<int>& outputs) {
- static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
- reg.builtin_code = BuiltinOperator_CUSTOM;
- reg.custom_name = name;
-
- tensorflow::NodeDef nodedef;
- CHECK(TextFormat::ParseFromString(nodedef_str + " op: '" + name + "'",
- &nodedef));
- string serialized_nodedef;
- CHECK(nodedef.SerializeToString(&serialized_nodedef));
- flexbuffers::Builder fbb;
- fbb.Vector([&]() {
- fbb.String(nodedef.op());
- fbb.String(serialized_nodedef);
- });
- fbb.Finish();
-
- flexbuffers_.push_back(fbb.GetBuffer());
- auto& buffer = flexbuffers_.back();
- CHECK_EQ(interpreter_->AddNodeWithParameters(
- inputs, outputs, reinterpret_cast<const char*>(buffer.data()),
- buffer.size(), nullptr, &reg),
- kTfLiteOk);
- }
-
- std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<DelegateData> delegate_data_;
TfLiteDelegate delegate_;
- std::vector<std::vector<uint8_t>> flexbuffers_;
- TestErrorReporter error_reporter_;
};
TEST_F(KernelTest, FullGraph) {
// Define the graph.
- AddTensors(9, {0, 3}, {8});
+ AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
- AddOp(kUnpack, {0}, {1, 2});
- AddOp(kUnpack, {3}, {4, 5});
- AddOp(kAdd, {1, 4}, {6});
- AddOp(kAdd, {2, 5}, {7});
- AddOp(kMul, {6, 7}, {8});
+ AddTfOp(testing::kUnpack, {0}, {1, 2});
+ AddTfOp(testing::kUnpack, {3}, {4, 5});
+ AddTfOp(testing::kAdd, {1, 4}, {6});
+ AddTfOp(testing::kAdd, {2, 5}, {7});
+ AddTfOp(testing::kMul, {6, 7}, {8});
// Apply Delegate.
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
@@ -224,8 +101,8 @@ TEST_F(KernelTest, FullGraph) {
}
TEST_F(KernelTest, BadTensorFlowOp) {
- AddTensors(2, {0}, {1});
- AddOp("NonExistentOp", {0}, {1});
+ AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
+ AddTfOp(testing::kNonExistent, {0}, {1});
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0});
@@ -240,8 +117,8 @@ TEST_F(KernelTest, BadTensorFlowOp) {
}
TEST_F(KernelTest, BadNumberOfOutputs) {
- AddTensors(3, {0}, {1, 2});
- AddOp(kIdentity, {0}, {1, 2});
+ AddTensors(3, {0}, {1, 2}, kTfLiteFloat32, {3});
+ AddTfOp(testing::kIdentity, {0}, {1, 2});
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0});
@@ -256,10 +133,10 @@ TEST_F(KernelTest, BadNumberOfOutputs) {
}
TEST_F(KernelTest, IncompatibleNodeDef) {
- AddTensors(2, {0}, {1});
+ AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
- // Cast is a TF op, but we don't add the proper nodedef to it in AddOp.
- AddOp("Cast", {0}, {1});
+ // Cast is a TF op, but we don't add the proper nodedef to it in AddTfOp.
+ AddTfOp(testing::kIncompatibleNodeDef, {0}, {1});
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0});
@@ -274,11 +151,11 @@ TEST_F(KernelTest, IncompatibleNodeDef) {
}
TEST_F(KernelTest, WrongSetOfNodes) {
- AddTensors(4, {0}, {3});
- AddOp(kUnpack, {0}, {1, 2});
- AddTfLiteOp(kMul, {1, 2}, {3});
+ AddTensors(4, {0}, {3}, kTfLiteFloat32, {3});
+ AddTfOp(testing::kUnpack, {0}, {1, 2});
+ AddTfLiteMulOp({1, 2}, {3});
- // Specify that kMul (#1) is supported when it actually isn't.
+ // Specify that testing::kMul (#1) is supported when it actually isn't.
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0, 1});
});
@@ -292,13 +169,13 @@ TEST_F(KernelTest, WrongSetOfNodes) {
}
TEST_F(KernelTest, MixedGraph) {
- AddTensors(9, {0, 3}, {8});
+ AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
- AddOp(kUnpack, {0}, {1, 2});
- AddOp(kUnpack, {3}, {4, 5});
- AddOp(kAdd, {1, 4}, {6});
- AddOp(kAdd, {2, 5}, {7});
- AddTfLiteOp(kMul, {6, 7}, {8});
+ AddTfOp(testing::kUnpack, {0}, {1, 2});
+ AddTfOp(testing::kUnpack, {3}, {4, 5});
+ AddTfOp(testing::kAdd, {1, 4}, {6});
+ AddTfOp(testing::kAdd, {2, 5}, {7});
+ AddTfLiteMulOp({6, 7}, {8});
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0, 1, 2, 3});
@@ -316,16 +193,16 @@ TEST_F(KernelTest, MixedGraph) {
}
TEST_F(KernelTest, SplitGraph) {
- AddTensors(10, {0}, {9});
+ AddTensors(10, {0}, {9}, kTfLiteFloat32, {3});
- AddOp(kUnpack, {0}, {1, 2});
- AddOp(kAdd, {1, 2}, {3});
- AddOp(kUnpack, {3}, {4, 5});
+ AddTfOp(testing::kUnpack, {0}, {1, 2});
+ AddTfOp(testing::kAdd, {1, 2}, {3});
+ AddTfOp(testing::kUnpack, {3}, {4, 5});
- AddTfLiteOp(kMul, {4, 5}, {6});
+ AddTfLiteMulOp({4, 5}, {6});
- AddOp(kUnpack, {6}, {7, 8});
- AddOp(kAdd, {7, 8}, {9});
+ AddTfOp(testing::kUnpack, {6}, {7, 8});
+ AddTfOp(testing::kAdd, {7, 8}, {9});
ConfigureDelegate([](TfLiteContext* context, TfLiteDelegate* delegate) {
return GenericPrepare(context, delegate, {0, 1, 2, 4, 5});
diff --git a/tensorflow/contrib/lite/delegates/eager/test_util.cc b/tensorflow/contrib/lite/delegates/eager/test_util.cc
new file mode 100644
index 0000000000..9ea802e34b
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/test_util.cc
@@ -0,0 +1,164 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/lite/delegates/eager/test_util.h"
+
+#include "absl/memory/memory.h"
+#include "third_party/flatbuffers/include/flatbuffers/flexbuffers.h"
+
+namespace tflite {
+namespace eager {
+namespace testing {
+
+namespace {
+// These are used as custom_names in TFLite ops, so they need to be static.
+// Names need to start with "Eager" in order to work with the Eager delegate
+// requirements.
+static constexpr char kIdentityName[] = "EagerIdentity";
+static constexpr char kUnpackName[] = "EagerUnpack";
+static constexpr char kAddName[] = "EagerAdd";
+static constexpr char kMulName[] = "EagerMul";
+} // namespace
+
+bool EagerModelTest::Invoke() { return interpreter_->Invoke() == kTfLiteOk; }
+
+void EagerModelTest::SetValues(int tensor_index,
+ const std::vector<float>& values) {
+ float* v = interpreter_->typed_tensor<float>(tensor_index);
+ for (float f : values) {
+ *v++ = f;
+ }
+}
+
+std::vector<float> EagerModelTest::GetValues(int tensor_index) {
+ TfLiteTensor* o = interpreter_->tensor(tensor_index);
+ return std::vector<float>(o->data.f, o->data.f + o->bytes / sizeof(float));
+}
+
+void EagerModelTest::SetShape(int tensor_index,
+ const std::vector<int>& values) {
+ ASSERT_EQ(interpreter_->ResizeInputTensor(tensor_index, values), kTfLiteOk);
+ ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
+}
+
+std::vector<int> EagerModelTest::GetShape(int tensor_index) {
+ std::vector<int> result;
+ auto* dims = interpreter_->tensor(tensor_index)->dims;
+ result.reserve(dims->size);
+ for (int i = 0; i < dims->size; ++i) {
+ result.push_back(dims->data[i]);
+ }
+ return result;
+}
+
+void EagerModelTest::AddTensors(int num_tensors, const std::vector<int>& inputs,
+ const std::vector<int>& outputs,
+ const TfLiteType& type,
+ const std::vector<int>& dims) {
+ interpreter_->AddTensors(num_tensors);
+ for (int i = 0; i < num_tensors; ++i) {
+ TfLiteQuantizationParams quant;
+ CHECK_EQ(interpreter_->SetTensorParametersReadWrite(i, type,
+ /*name=*/"",
+ /*dims=*/dims, quant),
+ kTfLiteOk);
+ }
+
+ CHECK_EQ(interpreter_->SetInputs(inputs), kTfLiteOk);
+ CHECK_EQ(interpreter_->SetOutputs(outputs), kTfLiteOk);
+}
+
+void EagerModelTest::AddTfLiteMulOp(const std::vector<int>& inputs,
+ const std::vector<int>& outputs) {
+ static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
+ reg.builtin_code = BuiltinOperator_MUL;
+ reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
+ auto* i0 = &context->tensors[node->inputs->data[0]];
+ auto* o = &context->tensors[node->outputs->data[0]];
+ return context->ResizeTensor(context, o, TfLiteIntArrayCopy(i0->dims));
+ };
+ reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
+ auto* i0 = &context->tensors[node->inputs->data[0]];
+ auto* i1 = &context->tensors[node->inputs->data[1]];
+ auto* o = &context->tensors[node->outputs->data[0]];
+ for (int i = 0; i < o->bytes / sizeof(float); ++i) {
+ o->data.f[i] = i0->data.f[i] * i1->data.f[i];
+ }
+ return kTfLiteOk;
+ };
+
+ CHECK_EQ(interpreter_->AddNodeWithParameters(inputs, outputs, nullptr, 0,
+ nullptr, &reg),
+ kTfLiteOk);
+}
+
+void EagerModelTest::AddTfOp(TfOpType op, const std::vector<int>& inputs,
+ const std::vector<int>& outputs) {
+ auto attr = [](const string& key, const string& value) {
+ return " attr{ key: '" + key + "' value {" + value + "}}";
+ };
+
+ if (op == kUnpack) {
+ string attributes = attr("T", "type: DT_FLOAT") + attr("num", "i: 2") +
+ attr("axis", "i: 0");
+ AddTfOp(kUnpackName, "Unpack", attributes, inputs, outputs);
+ } else if (op == kIdentity) {
+ string attributes = attr("T", "type: DT_FLOAT");
+ AddTfOp(kIdentityName, "Identity", attributes, inputs, outputs);
+ } else if (op == kAdd) {
+ string attributes = attr("T", "type: DT_FLOAT");
+ AddTfOp(kAddName, "Add", attributes, inputs, outputs);
+ } else if (op == kMul) {
+ string attributes = attr("T", "type: DT_FLOAT");
+ AddTfOp(kMulName, "Mul", attributes, inputs, outputs);
+ } else if (op == kNonExistent) {
+ AddTfOp("NonExistentOp", "NonExistentOp", "", inputs, outputs);
+ } else if (op == kIncompatibleNodeDef) {
+ // "Cast" op is created without attributes - making it incompatible.
+ AddTfOp("EagerCast", "Cast", "", inputs, outputs);
+ }
+}
+
+void EagerModelTest::AddTfOp(const char* tflite_name, const string& tf_name,
+ const string& nodedef_str,
+ const std::vector<int>& inputs,
+ const std::vector<int>& outputs) {
+ static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
+ reg.builtin_code = BuiltinOperator_CUSTOM;
+ reg.custom_name = tflite_name;
+
+ tensorflow::NodeDef nodedef;
+ CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
+ nodedef_str + " op: '" + tf_name + "'", &nodedef));
+ string serialized_nodedef;
+ CHECK(nodedef.SerializeToString(&serialized_nodedef));
+ flexbuffers::Builder fbb;
+ fbb.Vector([&]() {
+ fbb.String(nodedef.op());
+ fbb.String(serialized_nodedef);
+ });
+ fbb.Finish();
+
+ flexbuffers_.push_back(fbb.GetBuffer());
+ auto& buffer = flexbuffers_.back();
+ CHECK_EQ(interpreter_->AddNodeWithParameters(
+ inputs, outputs, reinterpret_cast<const char*>(buffer.data()),
+ buffer.size(), nullptr, &reg),
+ kTfLiteOk);
+}
+
+} // namespace testing
+} // namespace eager
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/test_util.h b/tensorflow/contrib/lite/delegates/eager/test_util.h
new file mode 100644
index 0000000000..369009715e
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/test_util.h
@@ -0,0 +1,95 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_TEST_UTIL_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_TEST_UTIL_H_
+
+#include "tensorflow/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+
+namespace tflite {
+namespace eager {
+namespace testing {
+
+enum TfOpType {
+ kUnpack,
+ kIdentity,
+ kAdd,
+ kMul,
+ // Represents an op that does not exist in TensorFlow.
+ kNonExistent,
+ // Represents an valid TensorFlow op where the NodeDef is incompatible.
+ kIncompatibleNodeDef,
+};
+
+// This class creates models with TF and TFLite ops. In order to use this class
+// to test the Eager delegate, implement a function that calls
+// interpreter->ModifyGraphWithDelegate.
+class EagerModelTest : public ::testing::Test {
+ public:
+ EagerModelTest() {}
+ ~EagerModelTest() {}
+
+ bool Invoke();
+
+ // Sets the tensor's values at the given index.
+ void SetValues(int tensor_index, const std::vector<float>& values);
+
+ // Returns the tensor's values at the given index.
+ std::vector<float> GetValues(int tensor_index);
+
+ // Sets the tensor's shape at the given index.
+ void SetShape(int tensor_index, const std::vector<int>& values);
+
+ // Returns the tensor's shape at the given index.
+ std::vector<int> GetShape(int tensor_index);
+
+ const TestErrorReporter& error_reporter() const { return error_reporter_; }
+
+ // Adds `num_tensor` tensors to the model. `inputs` contains the indices of
+ // the input tensors and `outputs` contains the indices of the output
+ // tensors. All tensors are set to have `type` and `dims`.
+ void AddTensors(int num_tensors, const std::vector<int>& inputs,
+ const std::vector<int>& outputs, const TfLiteType& type,
+ const std::vector<int>& dims);
+
+ // Adds a TFLite Mul op. `inputs` contains the indices of the input tensors
+ // and `outputs` contains the indices of the output tensors.
+ void AddTfLiteMulOp(const std::vector<int>& inputs,
+ const std::vector<int>& outputs);
+
+ // Adds a TensorFlow op. `inputs` contains the indices of the
+ // input tensors and `outputs` contains the indices of the output tensors.
+ // This function is limited to the set of ops defined in TfOpType.
+ void AddTfOp(TfOpType op, const std::vector<int>& inputs,
+ const std::vector<int>& outputs);
+
+ protected:
+ std::unique_ptr<Interpreter> interpreter_;
+ TestErrorReporter error_reporter_;
+
+ private:
+ void AddTfOp(const char* tflite_name, const string& tf_name,
+ const string& nodedef_str, const std::vector<int>& inputs,
+ const std::vector<int>& outputs);
+
+ std::vector<std::vector<uint8_t>> flexbuffers_;
+};
+
+} // namespace testing
+} // namespace eager
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_TEST_UTIL_H_