aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp/qps/driver.cc
diff options
context:
space:
mode:
Diffstat (limited to 'test/cpp/qps/driver.cc')
-rw-r--r--test/cpp/qps/driver.cc61
1 files changed, 52 insertions, 9 deletions
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index c87ad6461d..2583ceb819 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -52,6 +52,7 @@
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/qps_worker.h"
+#include "test/cpp/qps/stats.h"
using std::list;
using std::thread;
@@ -115,6 +116,47 @@ static deque<string> get_workers(const string& name) {
}
}
+// helpers for postprocess_scenario_result
+static double WallTime(ClientStats s) { return s.time_elapsed(); }
+static double SystemTime(ClientStats s) { return s.time_system(); }
+static double UserTime(ClientStats s) { return s.time_user(); }
+static double ServerWallTime(ServerStats s) { return s.time_elapsed(); }
+static double ServerSystemTime(ServerStats s) { return s.time_system(); }
+static double ServerUserTime(ServerStats s) { return s.time_user(); }
+static int Cores(int n) { return n; }
+
+// Postprocess ScenarioResult and populate result summary.
+static void postprocess_scenario_result(ScenarioResult* result) {
+ Histogram histogram;
+ histogram.MergeProto(result->latencies());
+
+ auto qps = histogram.Count() / average(result->client_stats(), WallTime);
+ auto qps_per_server_core = qps / sum(result->server_cores(), Cores);
+
+ result->mutable_summary()->set_qps(qps);
+ result->mutable_summary()->set_qps_per_server_core(qps_per_server_core);
+ result->mutable_summary()->set_latency_50(histogram.Percentile(50));
+ result->mutable_summary()->set_latency_90(histogram.Percentile(90));
+ result->mutable_summary()->set_latency_95(histogram.Percentile(95));
+ result->mutable_summary()->set_latency_99(histogram.Percentile(99));
+ result->mutable_summary()->set_latency_999(histogram.Percentile(99.9));
+
+ auto server_system_time = 100.0 *
+ sum(result->server_stats(), ServerSystemTime) /
+ sum(result->server_stats(), ServerWallTime);
+ auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) /
+ sum(result->server_stats(), ServerWallTime);
+ auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) /
+ sum(result->client_stats(), WallTime);
+ auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) /
+ sum(result->client_stats(), WallTime);
+
+ result->mutable_summary()->set_server_system_time(server_system_time);
+ result->mutable_summary()->set_server_user_time(server_user_time);
+ result->mutable_summary()->set_client_system_time(client_system_time);
+ result->mutable_summary()->set_client_user_time(client_user_time);
+}
+
// Namespace for classes and functions used only in RunScenario
// Using this rather than local definitions to workaround gcc-4.4 limitations
// regarding using templates without linkage
@@ -343,8 +385,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
// Finish a run
std::unique_ptr<ScenarioResult> result(new ScenarioResult);
- result->client_config = result_client_config;
- result->server_config = result_server_config;
+ Histogram merged_latencies;
+
gpr_log(GPR_INFO, "Finishing clients");
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Write(client_mark));
@@ -353,9 +395,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Read(&client_status));
const auto& stats = client_status.stats();
- result->latencies.MergeProto(stats.latencies());
- result->client_resources.emplace_back(
- stats.time_elapsed(), stats.time_user(), stats.time_system(), -1);
+ merged_latencies.MergeProto(stats.latencies());
+ result->add_client_stats()->CopyFrom(stats);
GPR_ASSERT(!client->stream->Read(&client_status));
}
for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
@@ -363,6 +404,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
delete[] clients;
+ merged_latencies.FillProto(result->mutable_latencies());
+
gpr_log(GPR_INFO, "Finishing servers");
for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
GPR_ASSERT(server->stream->Write(server_mark));
@@ -370,10 +413,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
GPR_ASSERT(server->stream->Read(&server_status));
- const auto& stats = server_status.stats();
- result->server_resources.emplace_back(
- stats.time_elapsed(), stats.time_user(), stats.time_system(),
- server_status.cores());
+ result->add_server_stats()->CopyFrom(server_status.stats());
+ result->add_server_cores(server_status.cores());
GPR_ASSERT(!server->stream->Read(&server_status));
}
for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
@@ -381,6 +422,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
delete[] servers;
+
+ postprocess_scenario_result(result.get());
return result;
}