diff options
author | yang-g <yangg@google.com> | 2016-04-20 16:38:26 -0700 |
---|---|---|
committer | yang-g <yangg@google.com> | 2016-04-20 16:38:26 -0700 |
commit | e2914023068da2ded0f825cd1790b1c70f14e0a5 (patch) | |
tree | abbade79a6e2b15dbe3748129d80ab3c18b19b9d /test/cpp | |
parent | 25df28ef75ba99e5d16743be7310c2920ddd8a32 (diff) | |
parent | 2aec20120020741ee64fcd22042c2e56d4cf0a5b (diff) |
Merge remote-tracking branch 'upstream/master' into proto_comments
Diffstat (limited to 'test/cpp')
-rw-r--r-- | test/cpp/qps/client.h | 14 | ||||
-rw-r--r-- | test/cpp/qps/driver.cc | 61 | ||||
-rw-r--r-- | test/cpp/qps/driver.h | 23 | ||||
-rw-r--r-- | test/cpp/qps/interarrival.h | 56 | ||||
-rw-r--r-- | test/cpp/qps/qps_driver.cc | 17 | ||||
-rw-r--r-- | test/cpp/qps/qps_interarrival_test.cc | 6 | ||||
-rw-r--r-- | test/cpp/qps/qps_json_driver.cc | 6 | ||||
-rw-r--r-- | test/cpp/qps/report.cc | 117 | ||||
-rw-r--r-- | test/cpp/qps/report.h | 26 | ||||
-rw-r--r-- | test/cpp/util/benchmark_config.cc | 9 |
10 files changed, 106 insertions, 229 deletions
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h index e958141d4e..5a9027a4a2 100644 --- a/test/cpp/qps/client.h +++ b/test/cpp/qps/client.h @@ -173,20 +173,6 @@ class Client { random_dist.reset( new ExpDist(load.poisson().offered_load() / num_threads)); break; - case LoadParams::kUniform: - random_dist.reset( - new UniformDist(load.uniform().interarrival_lo() * num_threads, - load.uniform().interarrival_hi() * num_threads)); - break; - case LoadParams::kDeterm: - random_dist.reset( - new DetDist(num_threads / load.determ().offered_load())); - break; - case LoadParams::kPareto: - random_dist.reset( - new ParetoDist(load.pareto().interarrival_base() * num_threads, - load.pareto().alpha())); - break; default: GPR_ASSERT(false); } diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index c87ad6461d..2583ceb819 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -52,6 +52,7 @@ #include "test/cpp/qps/driver.h" #include "test/cpp/qps/histogram.h" #include "test/cpp/qps/qps_worker.h" +#include "test/cpp/qps/stats.h" using std::list; using std::thread; @@ -115,6 +116,47 @@ static deque<string> get_workers(const string& name) { } } +// helpers for postprocess_scenario_result +static double WallTime(ClientStats s) { return s.time_elapsed(); } +static double SystemTime(ClientStats s) { return s.time_system(); } +static double UserTime(ClientStats s) { return s.time_user(); } +static double ServerWallTime(ServerStats s) { return s.time_elapsed(); } +static double ServerSystemTime(ServerStats s) { return s.time_system(); } +static double ServerUserTime(ServerStats s) { return s.time_user(); } +static int Cores(int n) { return n; } + +// Postprocess ScenarioResult and populate result summary. +static void postprocess_scenario_result(ScenarioResult* result) { + Histogram histogram; + histogram.MergeProto(result->latencies()); + + auto qps = histogram.Count() / average(result->client_stats(), WallTime); + auto qps_per_server_core = qps / sum(result->server_cores(), Cores); + + result->mutable_summary()->set_qps(qps); + result->mutable_summary()->set_qps_per_server_core(qps_per_server_core); + result->mutable_summary()->set_latency_50(histogram.Percentile(50)); + result->mutable_summary()->set_latency_90(histogram.Percentile(90)); + result->mutable_summary()->set_latency_95(histogram.Percentile(95)); + result->mutable_summary()->set_latency_99(histogram.Percentile(99)); + result->mutable_summary()->set_latency_999(histogram.Percentile(99.9)); + + auto server_system_time = 100.0 * + sum(result->server_stats(), ServerSystemTime) / + sum(result->server_stats(), ServerWallTime); + auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / + sum(result->server_stats(), ServerWallTime); + auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / + sum(result->client_stats(), WallTime); + auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / + sum(result->client_stats(), WallTime); + + result->mutable_summary()->set_server_system_time(server_system_time); + result->mutable_summary()->set_server_user_time(server_user_time); + result->mutable_summary()->set_client_system_time(client_system_time); + result->mutable_summary()->set_client_user_time(client_user_time); +} + // Namespace for classes and functions used only in RunScenario // Using this rather than local definitions to workaround gcc-4.4 limitations // regarding using templates without linkage @@ -343,8 +385,8 @@ std::unique_ptr<ScenarioResult> RunScenario( // Finish a run std::unique_ptr<ScenarioResult> result(new ScenarioResult); - result->client_config = result_client_config; - result->server_config = result_server_config; + Histogram merged_latencies; + gpr_log(GPR_INFO, "Finishing clients"); for (auto client = &clients[0]; client != &clients[num_clients]; client++) { GPR_ASSERT(client->stream->Write(client_mark)); @@ -353,9 +395,8 @@ std::unique_ptr<ScenarioResult> RunScenario( for (auto client = &clients[0]; client != &clients[num_clients]; client++) { GPR_ASSERT(client->stream->Read(&client_status)); const auto& stats = client_status.stats(); - result->latencies.MergeProto(stats.latencies()); - result->client_resources.emplace_back( - stats.time_elapsed(), stats.time_user(), stats.time_system(), -1); + merged_latencies.MergeProto(stats.latencies()); + result->add_client_stats()->CopyFrom(stats); GPR_ASSERT(!client->stream->Read(&client_status)); } for (auto client = &clients[0]; client != &clients[num_clients]; client++) { @@ -363,6 +404,8 @@ std::unique_ptr<ScenarioResult> RunScenario( } delete[] clients; + merged_latencies.FillProto(result->mutable_latencies()); + gpr_log(GPR_INFO, "Finishing servers"); for (auto server = &servers[0]; server != &servers[num_servers]; server++) { GPR_ASSERT(server->stream->Write(server_mark)); @@ -370,10 +413,8 @@ std::unique_ptr<ScenarioResult> RunScenario( } for (auto server = &servers[0]; server != &servers[num_servers]; server++) { GPR_ASSERT(server->stream->Read(&server_status)); - const auto& stats = server_status.stats(); - result->server_resources.emplace_back( - stats.time_elapsed(), stats.time_user(), stats.time_system(), - server_status.cores()); + result->add_server_stats()->CopyFrom(server_status.stats()); + result->add_server_cores(server_status.cores()); GPR_ASSERT(!server->stream->Read(&server_status)); } for (auto server = &servers[0]; server != &servers[num_servers]; server++) { @@ -381,6 +422,8 @@ std::unique_ptr<ScenarioResult> RunScenario( } delete[] servers; + + postprocess_scenario_result(result.get()); return result; } diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h index 21e51529d5..3a5cf138f1 100644 --- a/test/cpp/qps/driver.h +++ b/test/cpp/qps/driver.h @@ -41,29 +41,6 @@ namespace grpc { namespace testing { -class ResourceUsage { - public: - ResourceUsage(double w, double u, double s, int c) - : wall_time_(w), user_time_(u), system_time_(s), cores_(c) {} - double wall_time() const { return wall_time_; } - double user_time() const { return user_time_; } - double system_time() const { return system_time_; } - int cores() const { return cores_; } - - private: - double wall_time_; - double user_time_; - double system_time_; - int cores_; -}; - -struct ScenarioResult { - Histogram latencies; - std::vector<ResourceUsage> client_resources; - std::vector<ResourceUsage> server_resources; - ClientConfig client_config; - ServerConfig server_config; -}; std::unique_ptr<ScenarioResult> RunScenario( const grpc::testing::ClientConfig& client_config, size_t num_clients, diff --git a/test/cpp/qps/interarrival.h b/test/cpp/qps/interarrival.h index 0cc78533ce..0980d5e8ba 100644 --- a/test/cpp/qps/interarrival.h +++ b/test/cpp/qps/interarrival.h @@ -82,62 +82,6 @@ class ExpDist GRPC_FINAL : public RandomDistInterface { double lambda_recip_; }; -// UniformDist implements a random distribution that has -// interarrival time uniformly spread between [lo,hi). The -// mean interarrival time is (lo+hi)/2. For more information, -// see http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 - -class UniformDist GRPC_FINAL : public RandomDistInterface { - public: - UniformDist(double lo, double hi) : lo_(lo), range_(hi - lo) {} - ~UniformDist() GRPC_OVERRIDE {} - double transform(double uni) const GRPC_OVERRIDE { - return uni * range_ + lo_; - } - - private: - double lo_; - double range_; -}; - -// DetDist provides a random distribution with interarrival time -// of val. Note that this is not additive, so using this on multiple -// flows of control (threads within the same client or separate -// clients) will not preserve any deterministic interarrival gap across -// requests. - -class DetDist GRPC_FINAL : public RandomDistInterface { - public: - explicit DetDist(double val) : val_(val) {} - ~DetDist() GRPC_OVERRIDE {} - double transform(double uni) const GRPC_OVERRIDE { return val_; } - - private: - double val_; -}; - -// ParetoDist provides a random distribution with interarrival time -// spread according to a Pareto (heavy-tailed) distribution. In this -// model, many interarrival times are close to the base, but a sufficient -// number will be high (up to infinity) as to disturb the mean. It is a -// good representation of the response times of data center jobs. See -// http://en.wikipedia.org/wiki/Pareto_distribution - -class ParetoDist GRPC_FINAL : public RandomDistInterface { - public: - ParetoDist(double base, double alpha) - : base_(base), alpha_recip_(1.0 / alpha) {} - ~ParetoDist() GRPC_OVERRIDE {} - double transform(double uni) const GRPC_OVERRIDE { - // Note: Use 1.0-uni above to avoid div by zero if uni is 0 - return base_ / pow(1.0 - uni, alpha_recip_); - } - - private: - double base_; - double alpha_recip_; -}; - // A class library for generating pseudo-random interarrival times // in an efficient re-entrant way. The random table is built at construction // time, and each call must include the thread id of the invoker diff --git a/test/cpp/qps/qps_driver.cc b/test/cpp/qps/qps_driver.cc index e412c6919a..e4683e475f 100644 --- a/test/cpp/qps/qps_driver.cc +++ b/test/cpp/qps/qps_driver.cc @@ -68,11 +68,6 @@ DEFINE_string(client_type, "SYNC_CLIENT", "Client type"); DEFINE_int32(async_client_threads, 1, "Async client threads"); DEFINE_double(poisson_load, -1.0, "Poisson offered load (qps)"); -DEFINE_double(uniform_lo, -1.0, "Uniform low interarrival time (us)"); -DEFINE_double(uniform_hi, -1.0, "Uniform high interarrival time (us)"); -DEFINE_double(determ_load, -1.0, "Deterministic offered load (qps)"); -DEFINE_double(pareto_base, -1.0, "Pareto base interarrival time (us)"); -DEFINE_double(pareto_alpha, -1.0, "Pareto alpha value"); DEFINE_int32(client_core_limit, -1, "Limit on client cores to use"); @@ -85,7 +80,6 @@ using grpc::testing::ServerConfig; using grpc::testing::ClientType; using grpc::testing::ServerType; using grpc::testing::RpcType; -using grpc::testing::ResourceUsage; using grpc::testing::SecurityParams; namespace grpc { @@ -138,17 +132,6 @@ static void QpsDriver() { if (FLAGS_poisson_load > 0.0) { auto poisson = client_config.mutable_load_params()->mutable_poisson(); poisson->set_offered_load(FLAGS_poisson_load); - } else if (FLAGS_uniform_lo > 0.0) { - auto uniform = client_config.mutable_load_params()->mutable_uniform(); - uniform->set_interarrival_lo(FLAGS_uniform_lo / 1e6); - uniform->set_interarrival_hi(FLAGS_uniform_hi / 1e6); - } else if (FLAGS_determ_load > 0.0) { - auto determ = client_config.mutable_load_params()->mutable_determ(); - determ->set_offered_load(FLAGS_determ_load); - } else if (FLAGS_pareto_base > 0.0) { - auto pareto = client_config.mutable_load_params()->mutable_pareto(); - pareto->set_interarrival_base(FLAGS_pareto_base / 1e6); - pareto->set_alpha(FLAGS_pareto_alpha); } else { client_config.mutable_load_params()->mutable_closed_loop(); // No further load parameters to set up for closed loop diff --git a/test/cpp/qps/qps_interarrival_test.cc b/test/cpp/qps/qps_interarrival_test.cc index 48585af756..4055c8a718 100644 --- a/test/cpp/qps/qps_interarrival_test.cc +++ b/test/cpp/qps/qps_interarrival_test.cc @@ -63,14 +63,8 @@ static void RunTest(RandomDistInterface &&r, int threads, std::string title) { } using grpc::testing::ExpDist; -using grpc::testing::DetDist; -using grpc::testing::UniformDist; -using grpc::testing::ParetoDist; int main(int argc, char **argv) { RunTest(ExpDist(10.0), 5, std::string("Exponential(10)")); - RunTest(DetDist(5.0), 5, std::string("Det(5)")); - RunTest(UniformDist(0.0, 10.0), 5, std::string("Uniform(0,10)")); - RunTest(ParetoDist(1.0, 1.0), 5, std::string("Pareto(1,1)")); return 0; } diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 8943a43ba8..e9266a5711 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -102,12 +102,16 @@ static void QpsDriver() { for (int i = 0; i < scenarios.scenarios_size(); i++) { const Scenario &scenario = scenarios.scenarios(i); std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - const auto result = + auto result = RunScenario(scenario.client_config(), scenario.num_clients(), scenario.server_config(), scenario.num_servers(), scenario.warmup_seconds(), scenario.benchmark_seconds(), scenario.spawn_local_worker_count()); + // Amend the result with scenario config. Eventually we should adjust + // RunScenario contract so we don't need to touch the result here. + result->mutable_scenario()->CopyFrom(scenario); + GetReporter()->ReportQPS(*result); GetReporter()->ReportQPSPerCore(*result); GetReporter()->ReportLatency(*result); diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index b230eb441e..3ae41399cf 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -33,6 +33,11 @@ #include "test/cpp/qps/report.h" +#include <fstream> + +#include <google/protobuf/util/json_util.h> +#include <google/protobuf/util/type_resolver_util.h> + #include <grpc/support/log.h> #include "test/cpp/qps/driver.h" #include "test/cpp/qps/stats.h" @@ -40,11 +45,6 @@ namespace grpc { namespace testing { -static double WallTime(ResourceUsage u) { return u.wall_time(); } -static double UserTime(ResourceUsage u) { return u.user_time(); } -static double SystemTime(ResourceUsage u) { return u.system_time(); } -static int Cores(ResourceUsage u) { return u.cores(); } - void CompositeReporter::add(std::unique_ptr<Reporter> reporter) { reporters_.emplace_back(std::move(reporter)); } @@ -74,102 +74,63 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) { } void GprLogReporter::ReportQPS(const ScenarioResult& result) { - gpr_log( - GPR_INFO, "QPS: %.1f", - result.latencies.Count() / average(result.client_resources, WallTime)); + gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps()); } void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { - auto qps = - result.latencies.Count() / average(result.client_resources, WallTime); - - gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps, - qps / sum(result.server_resources, Cores)); + gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", result.summary().qps(), + result.summary().qps_per_server_core()); } void GprLogReporter::ReportLatency(const ScenarioResult& result) { gpr_log(GPR_INFO, "Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us", - result.latencies.Percentile(50) / 1000, - result.latencies.Percentile(90) / 1000, - result.latencies.Percentile(95) / 1000, - result.latencies.Percentile(99) / 1000, - result.latencies.Percentile(99.9) / 1000); + result.summary().latency_50() / 1000, + result.summary().latency_90() / 1000, + result.summary().latency_95() / 1000, + result.summary().latency_99() / 1000, + result.summary().latency_999() / 1000); } void GprLogReporter::ReportTimes(const ScenarioResult& result) { gpr_log(GPR_INFO, "Server system time: %.2f%%", - 100.0 * sum(result.server_resources, SystemTime) / - sum(result.server_resources, WallTime)); + result.summary().server_system_time()); gpr_log(GPR_INFO, "Server user time: %.2f%%", - 100.0 * sum(result.server_resources, UserTime) / - sum(result.server_resources, WallTime)); + result.summary().server_user_time()); gpr_log(GPR_INFO, "Client system time: %.2f%%", - 100.0 * sum(result.client_resources, SystemTime) / - sum(result.client_resources, WallTime)); + result.summary().client_system_time()); gpr_log(GPR_INFO, "Client user time: %.2f%%", - 100.0 * sum(result.client_resources, UserTime) / - sum(result.client_resources, WallTime)); + result.summary().client_user_time()); } -void PerfDbReporter::ReportQPS(const ScenarioResult& result) { - auto qps = - result.latencies.Count() / average(result.client_resources, WallTime); - - perf_db_client_.setQps(qps); - perf_db_client_.setConfigs(result.client_config, result.server_config); +void JsonReporter::ReportQPS(const ScenarioResult& result) { + std::unique_ptr<google::protobuf::util::TypeResolver> type_resolver( + google::protobuf::util::NewTypeResolverForDescriptorPool( + "type.googleapis.com", + google::protobuf::DescriptorPool::generated_pool())); + grpc::string binary; + grpc::string json_string; + result.SerializeToString(&binary); + auto status = BinaryToJsonString( + type_resolver.get(), "type.googleapis.com/grpc.testing.ScenarioResult", + binary, &json_string); + GPR_ASSERT(status.ok()); + + std::ofstream output_file(report_file_); + output_file << json_string; + output_file.close(); } -void PerfDbReporter::ReportQPSPerCore(const ScenarioResult& result) { - auto qps = - result.latencies.Count() / average(result.client_resources, WallTime); - - auto qps_per_core = qps / sum(result.server_resources, Cores); - - perf_db_client_.setQps(qps); - perf_db_client_.setQpsPerCore(qps_per_core); - perf_db_client_.setConfigs(result.client_config, result.server_config); -} - -void PerfDbReporter::ReportLatency(const ScenarioResult& result) { - perf_db_client_.setLatencies(result.latencies.Percentile(50) / 1000, - result.latencies.Percentile(90) / 1000, - result.latencies.Percentile(95) / 1000, - result.latencies.Percentile(99) / 1000, - result.latencies.Percentile(99.9) / 1000); - perf_db_client_.setConfigs(result.client_config, result.server_config); +void JsonReporter::ReportQPSPerCore(const ScenarioResult& result) { + // NOP - all reporting is handled by ReportQPS. } -void PerfDbReporter::ReportTimes(const ScenarioResult& result) { - const double server_system_time = 100.0 * - sum(result.server_resources, SystemTime) / - sum(result.server_resources, WallTime); - const double server_user_time = 100.0 * - sum(result.server_resources, UserTime) / - sum(result.server_resources, WallTime); - const double client_system_time = 100.0 * - sum(result.client_resources, SystemTime) / - sum(result.client_resources, WallTime); - const double client_user_time = 100.0 * - sum(result.client_resources, UserTime) / - sum(result.client_resources, WallTime); - - perf_db_client_.setTimes(server_system_time, server_user_time, - client_system_time, client_user_time); - perf_db_client_.setConfigs(result.client_config, result.server_config); +void JsonReporter::ReportLatency(const ScenarioResult& result) { + // NOP - all reporting is handled by ReportQPS. } -void PerfDbReporter::SendData() { - // send data to performance database - bool data_state = - perf_db_client_.sendData(hashed_id_, test_name_, sys_info_, tag_); - - // check state of data sending - if (data_state) { - gpr_log(GPR_INFO, "Data sent to performance database successfully"); - } else { - gpr_log(GPR_INFO, "Data could not be sent to performance database"); - } +void JsonReporter::ReportTimes(const ScenarioResult& result) { + // NOP - all reporting is handled by ReportQPS. } } // namespace testing diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index 5caf3fe69a..8f04d84124 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -104,33 +104,19 @@ class GprLogReporter : public Reporter { void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; }; -/** Reporter for performance database tool */ -class PerfDbReporter : public Reporter { +/** Dumps the report to a JSON file. */ +class JsonReporter : public Reporter { public: - PerfDbReporter(const string& name, const string& hashed_id, - const string& test_name, const string& sys_info, - const string& server_address, const string& tag) - : Reporter(name), - hashed_id_(hashed_id), - test_name_(test_name), - sys_info_(sys_info), - tag_(tag) { - perf_db_client_.init(grpc::CreateChannel( - server_address, grpc::InsecureChannelCredentials())); - } - ~PerfDbReporter() GRPC_OVERRIDE { SendData(); }; + JsonReporter(const string& name, const string& report_file) + : Reporter(name), report_file_(report_file) {} private: - PerfDbClient perf_db_client_; - std::string hashed_id_; - std::string test_name_; - std::string sys_info_; - std::string tag_; void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; - void SendData(); + + const string report_file_; }; } // namespace testing diff --git a/test/cpp/util/benchmark_config.cc b/test/cpp/util/benchmark_config.cc index 746d3d7ae6..6fc864069e 100644 --- a/test/cpp/util/benchmark_config.cc +++ b/test/cpp/util/benchmark_config.cc @@ -37,8 +37,8 @@ DEFINE_bool(enable_log_reporter, true, "Enable reporting of benchmark results through GprLog"); -DEFINE_bool(report_metrics_db, false, - "True if metrics to be reported to performance database"); +DEFINE_string(scenario_result_file, "", + "Write JSON benchmark report to the file specified."); DEFINE_string(hashed_id, "", "Hash of the user id"); @@ -71,10 +71,9 @@ static std::shared_ptr<Reporter> InitBenchmarkReporters() { composite_reporter->add( std::unique_ptr<Reporter>(new GprLogReporter("LogReporter"))); } - if (FLAGS_report_metrics_db) { + if (FLAGS_scenario_result_file != "") { composite_reporter->add(std::unique_ptr<Reporter>( - new PerfDbReporter("PerfDbReporter", FLAGS_hashed_id, FLAGS_test_name, - FLAGS_sys_info, FLAGS_server_address, FLAGS_tag))); + new JsonReporter("JsonReporter", FLAGS_scenario_result_file))); } return std::shared_ptr<Reporter>(composite_reporter); |