aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/testing/generate_testspec.cc
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/lite/testing/generate_testspec.cc')
-rw-r--r--tensorflow/contrib/lite/testing/generate_testspec.cc93
1 files changed, 57 insertions, 36 deletions
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.cc b/tensorflow/contrib/lite/testing/generate_testspec.cc
index c0c861ff6d..f29c188e6c 100644
--- a/tensorflow/contrib/lite/testing/generate_testspec.cc
+++ b/tensorflow/contrib/lite/testing/generate_testspec.cc
@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include <iostream>
+
#include "tensorflow/contrib/lite/testing/generate_testspec.h"
#include "tensorflow/contrib/lite/testing/join.h"
#include "tensorflow/contrib/lite/testing/split.h"
@@ -25,7 +27,7 @@ namespace testing {
template <typename T>
void GenerateCsv(const std::vector<int>& shape, float min, float max,
string* out) {
- auto random_float = [](int min, int max) {
+ auto random_float = [](float min, float max) {
static unsigned int seed;
return min + (max - min) * static_cast<float>(rand_r(&seed)) / RAND_MAX;
};
@@ -37,16 +39,10 @@ void GenerateCsv(const std::vector<int>& shape, float min, float max,
*out = Join(data.data(), data.size(), ",");
}
-bool GenerateTestSpecFromTensorflowModel(
- std::iostream& stream, const string& tensorflow_model_path,
- const string& tflite_model_path, const std::vector<string>& input_layer,
+std::vector<string> GenerateInputValues(
+ const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
- const std::vector<string>& input_layer_shape,
- const std::vector<string>& output_layer) {
- CHECK_EQ(input_layer.size(), input_layer_type.size());
- CHECK_EQ(input_layer.size(), input_layer_shape.size());
-
- // Generate inputs.
+ const std::vector<string>& input_layer_shape) {
std::vector<string> input_values;
input_values.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
@@ -73,57 +69,82 @@ bool GenerateTestSpecFromTensorflowModel(
default:
fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n",
type, input_layer_type[i].c_str());
- return false;
+ input_values.clear();
+ return input_values;
}
}
+ return input_values;
+}
+
+bool GenerateTestSpecFromTensorflowModel(
+ std::iostream& stream, const string& tensorflow_model_path,
+ const string& tflite_model_path, int num_invocations,
+ const std::vector<string>& input_layer,
+ const std::vector<string>& input_layer_type,
+ const std::vector<string>& input_layer_shape,
+ const std::vector<string>& output_layer) {
+ CHECK_EQ(input_layer.size(), input_layer_type.size());
+ CHECK_EQ(input_layer.size(), input_layer_shape.size());
// Invoke tensorflow model.
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
output_layer);
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.LoadModel(tensorflow_model_path);
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
- for (int i = 0; i < input_values.size(); i++) {
- runner.SetInput(i, input_values[i]);
- if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
- return false;
- }
- }
-
- runner.Invoke();
- if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
- return false;
- }
-
- // Write test spec.
+ // Write first part of test spec, defining model and input shapes.
stream << "load_model: " << tflite_model_path << "\n";
stream << "reshape {\n";
for (const auto& shape : input_layer_shape) {
stream << " input: \"" << shape << "\"\n";
}
stream << "}\n";
- stream << "invoke {\n";
- for (const auto& value : input_values) {
- stream << " input: \"" << value << "\"\n";
- }
- for (int i = 0; i < output_layer.size(); i++) {
- stream << " output: \"" << runner.ReadOutput(i) << "\"\n";
+
+ // Generate inputs.
+ for (int i = 0; i < num_invocations; ++i) {
+ // Note that the input values are random, so each invocation will have a
+ // different set.
+ std::vector<string> input_values =
+ GenerateInputValues(input_layer, input_layer_type, input_layer_shape);
+ if (input_values.empty()) return false;
+
+ // Run TensorFlow.
+ for (int j = 0; j < input_values.size(); j++) {
+ runner.SetInput(j, input_values[j]);
+ if (!runner.IsValid()) {
+ std::cerr << runner.GetErrorMessage() << std::endl;
+ return false;
+ }
+ }
+
+ runner.Invoke();
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
+
+ // Write second part of test spec, with inputs and outputs.
+ stream << "invoke {\n";
+ for (const auto& value : input_values) {
+ stream << " input: \"" << value << "\"\n";
+ }
+ for (int j = 0; j < output_layer.size(); j++) {
+ stream << " output: \"" << runner.ReadOutput(j) << "\"\n";
+ if (!runner.IsValid()) {
+ std::cerr << runner.GetErrorMessage() << std::endl;
+ return false;
+ }
+ }
+ stream << "}\n";
}
- stream << "}\n";
return true;
}