diff options
author | 2018-07-02 10:09:16 -0700 | |
---|---|---|
committer | 2018-07-02 10:09:16 -0700 | |
commit | e35d9ae50c5bb9ebc6e8e52ab937410fba2030fd (patch) | |
tree | 8fbadbee72dba3cdf4b94fa16d6878fb4e5b3fde /tensorflow/contrib/lite/testing/generate_testspec.cc | |
parent | a7b7aa856f34bf2e44fbeb91d817742c61483618 (diff) | |
parent | 28b8525b417d5b0a1d0a4905e5e3237ef5b502ef (diff) |
Merge commit for internal changes
Diffstat (limited to 'tensorflow/contrib/lite/testing/generate_testspec.cc')
-rw-r--r-- | tensorflow/contrib/lite/testing/generate_testspec.cc | 85 |
1 files changed, 52 insertions, 33 deletions
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.cc b/tensorflow/contrib/lite/testing/generate_testspec.cc index c0c861ff6d..c1092e4d25 100644 --- a/tensorflow/contrib/lite/testing/generate_testspec.cc +++ b/tensorflow/contrib/lite/testing/generate_testspec.cc @@ -25,7 +25,7 @@ namespace testing { template <typename T> void GenerateCsv(const std::vector<int>& shape, float min, float max, string* out) { - auto random_float = [](int min, int max) { + auto random_float = [](float min, float max) { static unsigned int seed; return min + (max - min) * static_cast<float>(rand_r(&seed)) / RAND_MAX; }; @@ -37,16 +37,10 @@ void GenerateCsv(const std::vector<int>& shape, float min, float max, *out = Join(data.data(), data.size(), ","); } -bool GenerateTestSpecFromTensorflowModel( - std::iostream& stream, const string& tensorflow_model_path, - const string& tflite_model_path, const std::vector<string>& input_layer, +std::vector<string> GenerateInputValues( + const std::vector<string>& input_layer, const std::vector<string>& input_layer_type, - const std::vector<string>& input_layer_shape, - const std::vector<string>& output_layer) { - CHECK_EQ(input_layer.size(), input_layer_type.size()); - CHECK_EQ(input_layer.size(), input_layer_shape.size()); - - // Generate inputs. + const std::vector<string>& input_layer_shape) { std::vector<string> input_values; input_values.resize(input_layer.size()); for (int i = 0; i < input_layer.size(); i++) { @@ -73,9 +67,22 @@ bool GenerateTestSpecFromTensorflowModel( default: fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n", type, input_layer_type[i].c_str()); - return false; + input_values.clear(); + return input_values; } } + return input_values; +} + +bool GenerateTestSpecFromTensorflowModel( + std::iostream& stream, const string& tensorflow_model_path, + const string& tflite_model_path, int num_invocations, + const std::vector<string>& input_layer, + const std::vector<string>& input_layer_type, + const std::vector<string>& input_layer_shape, + const std::vector<string>& output_layer) { + CHECK_EQ(input_layer.size(), input_layer_type.size()); + CHECK_EQ(input_layer.size(), input_layer_shape.size()); // Invoke tensorflow model. TfDriver runner(input_layer, input_layer_type, input_layer_shape, @@ -91,39 +98,51 @@ bool GenerateTestSpecFromTensorflowModel( return false; } - for (int i = 0; i < input_values.size(); i++) { - runner.SetInput(i, input_values[i]); - if (!runner.IsValid()) { - cerr << runner.GetErrorMessage() << endl; - return false; - } - } - - runner.Invoke(); - if (!runner.IsValid()) { - cerr << runner.GetErrorMessage() << endl; - return false; - } - - // Write test spec. + // Write first part of test spec, defining model and input shapes. stream << "load_model: " << tflite_model_path << "\n"; stream << "reshape {\n"; for (const auto& shape : input_layer_shape) { stream << " input: \"" << shape << "\"\n"; } stream << "}\n"; - stream << "invoke {\n"; - for (const auto& value : input_values) { - stream << " input: \"" << value << "\"\n"; - } - for (int i = 0; i < output_layer.size(); i++) { - stream << " output: \"" << runner.ReadOutput(i) << "\"\n"; + + // Generate inputs. + for (int i = 0; i < num_invocations; ++i) { + // Note that the input values are random, so each invocation will have a + // different set. + std::vector<string> input_values = + GenerateInputValues(input_layer, input_layer_type, input_layer_shape); + if (input_values.empty()) return false; + + // Run TensorFlow. + for (int j = 0; j < input_values.size(); j++) { + runner.SetInput(j, input_values[j]); + if (!runner.IsValid()) { + cerr << runner.GetErrorMessage() << endl; + return false; + } + } + + runner.Invoke(); if (!runner.IsValid()) { cerr << runner.GetErrorMessage() << endl; return false; } + + // Write second part of test spec, with inputs and outputs. + stream << "invoke {\n"; + for (const auto& value : input_values) { + stream << " input: \"" << value << "\"\n"; + } + for (int j = 0; j < output_layer.size(); j++) { + stream << " output: \"" << runner.ReadOutput(j) << "\"\n"; + if (!runner.IsValid()) { + cerr << runner.GetErrorMessage() << endl; + return false; + } + } + stream << "}\n"; } - stream << "}\n"; return true; } |