/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/contrib/lite/delegates/flex/test_util.h" #include "absl/memory/memory.h" #include "flatbuffers/flexbuffers.h" // TF:flatbuffers #include "tensorflow/contrib/lite/string.h" namespace tflite { namespace flex { namespace testing { bool FlexModelTest::Invoke() { return interpreter_->Invoke() == kTfLiteOk; } void FlexModelTest::SetShape(int tensor_index, const std::vector& values) { ASSERT_EQ(interpreter_->ResizeInputTensor(tensor_index, values), kTfLiteOk); ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk); } std::vector FlexModelTest::GetShape(int tensor_index) { std::vector result; auto* dims = interpreter_->tensor(tensor_index)->dims; result.reserve(dims->size); for (int i = 0; i < dims->size; ++i) { result.push_back(dims->data[i]); } return result; } TfLiteType FlexModelTest::GetType(int tensor_index) { return interpreter_->tensor(tensor_index)->type; } void FlexModelTest::AddTensors(int num_tensors, const std::vector& inputs, const std::vector& outputs, TfLiteType type, const std::vector& dims) { interpreter_->AddTensors(num_tensors); for (int i = 0; i < num_tensors; ++i) { TfLiteQuantizationParams quant; // Suppress explicit output type specification to ensure type inference // works properly. if (std::find(outputs.begin(), outputs.end(), i) != outputs.end()) { type = kTfLiteFloat32; } CHECK_EQ(interpreter_->SetTensorParametersReadWrite(i, type, /*name=*/"", /*dims=*/dims, quant), kTfLiteOk); } CHECK_EQ(interpreter_->SetInputs(inputs), kTfLiteOk); CHECK_EQ(interpreter_->SetOutputs(outputs), kTfLiteOk); } void FlexModelTest::AddTfLiteMulOp(const std::vector& inputs, const std::vector& outputs) { static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.builtin_code = BuiltinOperator_MUL; reg.prepare = [](TfLiteContext* context, TfLiteNode* node) { auto* i0 = &context->tensors[node->inputs->data[0]]; auto* o = &context->tensors[node->outputs->data[0]]; return context->ResizeTensor(context, o, TfLiteIntArrayCopy(i0->dims)); }; reg.invoke = [](TfLiteContext* context, TfLiteNode* node) { auto* i0 = &context->tensors[node->inputs->data[0]]; auto* i1 = &context->tensors[node->inputs->data[1]]; auto* o = &context->tensors[node->outputs->data[0]]; for (int i = 0; i < o->bytes / sizeof(float); ++i) { o->data.f[i] = i0->data.f[i] * i1->data.f[i]; } return kTfLiteOk; }; CHECK_EQ(interpreter_->AddNodeWithParameters(inputs, outputs, nullptr, 0, nullptr, ®), kTfLiteOk); } void FlexModelTest::AddTfOp(TfOpType op, const std::vector& inputs, const std::vector& outputs) { auto attr = [](const string& key, const string& value) { return " attr{ key: '" + key + "' value {" + value + "}}"; }; // Crude type attribution, will need fleshing out as more tests are added. // TODO(b/113613439): Use nodedef string utilities to properly handle // all types. string type_attribute = attr("T", "type: DT_FLOAT"); if (interpreter_->tensor(inputs[0])->type == kTfLiteInt32) { type_attribute = attr("T", "type: DT_INT32"); } if (op == kUnpack) { string attributes = type_attribute + attr("num", "i: 2") + attr("axis", "i: 0"); AddTfOp("FlexUnpack", "Unpack", attributes, inputs, outputs); } else if (op == kIdentity) { string attributes = type_attribute; AddTfOp("FlexIdentity", "Identity", attributes, inputs, outputs); } else if (op == kAdd) { string attributes = type_attribute; AddTfOp("FlexAdd", "Add", attributes, inputs, outputs); } else if (op == kMul) { string attributes = type_attribute; AddTfOp("FlexMul", "Mul", attributes, inputs, outputs); } else if (op == kNonExistent) { AddTfOp("NonExistentOp", "NonExistentOp", "", inputs, outputs); } else if (op == kIncompatibleNodeDef) { // "Cast" op is created without attributes - making it incompatible. AddTfOp("FlexCast", "Cast", "", inputs, outputs); } } void FlexModelTest::AddTfOp(const char* tflite_name, const string& tf_name, const string& nodedef_str, const std::vector& inputs, const std::vector& outputs) { static TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; reg.builtin_code = BuiltinOperator_CUSTOM; reg.custom_name = tflite_name; tensorflow::NodeDef nodedef; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( nodedef_str + " op: '" + tf_name + "'", &nodedef)); string serialized_nodedef; CHECK(nodedef.SerializeToString(&serialized_nodedef)); flexbuffers::Builder fbb; fbb.Vector([&]() { fbb.String(nodedef.op()); fbb.String(serialized_nodedef); }); fbb.Finish(); flexbuffers_.push_back(fbb.GetBuffer()); auto& buffer = flexbuffers_.back(); CHECK_EQ(interpreter_->AddNodeWithParameters( inputs, outputs, reinterpret_cast(buffer.data()), buffer.size(), nullptr, ®), kTfLiteOk); } } // namespace testing } // namespace flex } // namespace tflite