diff options
Diffstat (limited to 'tensorflow/contrib/lite/interpreter_test.cc')
-rw-r--r-- | tensorflow/contrib/lite/interpreter_test.cc | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/tensorflow/contrib/lite/interpreter_test.cc b/tensorflow/contrib/lite/interpreter_test.cc index 6f13b43ebf..4f7fb36696 100644 --- a/tensorflow/contrib/lite/interpreter_test.cc +++ b/tensorflow/contrib/lite/interpreter_test.cc @@ -276,6 +276,57 @@ TEST(BasicInterpreter, NoOpInterpreter) { ASSERT_EQ(interpreter.Invoke(), kTfLiteOk); } +TEST(BasicInterpreter, RedundantAllocateTensors) { + Interpreter interpreter; + ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk); + ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk); + + ASSERT_EQ(interpreter.SetTensorParametersReadWrite( + 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()), + kTfLiteOk); + + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + const auto data_raw = interpreter.tensor(0)->data.raw; + ASSERT_NE(data_raw, nullptr); + + // A redundant allocation request should have no impact. + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + ASSERT_EQ(interpreter.tensor(0)->data.raw, data_raw); +} + +TEST(BasicInterpreter, RedundantAllocateTensorsWithDynamicInputs) { + Interpreter interpreter; + TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr}; + ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk); + interpreter.SetInputs({0}); + interpreter.SetOutputs({1}); + interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®); + + ASSERT_EQ(interpreter.SetTensorParametersReadWrite( + 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()), + kTfLiteOk); + ASSERT_EQ(interpreter.SetTensorParametersReadWrite( + 1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()), + kTfLiteOk); + + // Configure the input tensor as dynamic. + interpreter.tensor(0)->data.raw = nullptr; + interpreter.tensor(0)->allocation_type = kTfLiteDynamic; + + ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}), + kTfLiteOk); + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr); + + // Reset the output tensor's buffer. + interpreter.tensor(1)->data.raw = nullptr; + + // A redundant allocation request should be honored, as the input tensor + // was marked dynamic. + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr); +} + TEST(BasicInterpreter, ResizingTensors) { Interpreter interpreter; ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk); @@ -333,6 +384,37 @@ TEST(BasicInterpreter, ResizingTensors) { tensor->data.f[15] = 0.123f; } +TEST(BasicInterpreter, NoopResizingTensors) { + Interpreter interpreter; + ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk); + ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk); + ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk); + + ASSERT_EQ(interpreter.SetTensorParametersReadWrite( + 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()), + kTfLiteOk); + + int t = interpreter.inputs()[0]; + TfLiteTensor* tensor = interpreter.tensor(t); + + ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk); + EXPECT_EQ(tensor->bytes, 6 * sizeof(float)); + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + tensor->data.f[5] = 0.123f; + + // Resizing to the same size should not trigger re-allocation. + ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk); + EXPECT_EQ(tensor->bytes, 6 * sizeof(float)); + ASSERT_NE(tensor->data.raw, nullptr); + ASSERT_EQ(tensor->data.f[5], 0.123f); + + // Explicitly allocating should be a no-op, as no resize was performed. + ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); + EXPECT_EQ(tensor->bytes, 6 * sizeof(float)); + ASSERT_NE(tensor->data.raw, nullptr); + ASSERT_EQ(tensor->data.f[5], 0.123f); +} + TEST(BasicInterpreter, OneOpInterpreter) { Interpreter interpreter; ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk); |