diff options
author | David Garcia Quintas <dgq@google.com> | 2017-09-18 20:01:27 -0700 |
---|---|---|
committer | David Garcia Quintas <dgq@google.com> | 2017-09-18 20:01:27 -0700 |
commit | 588e36a3e3b43276f573c466d88c3dd60bd5be16 (patch) | |
tree | 0df5700f866fb816b0830cada3edcb09977e93ed /test | |
parent | 7d764673ef9a18503339747ab8ee002739f6e329 (diff) |
Get rid of magic numbers
Diffstat (limited to 'test')
-rw-r--r-- | test/cpp/end2end/grpclb_end2end_test.cc | 45 |
1 files changed, 33 insertions, 12 deletions
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index 6896b54f1b..77ed155292 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -890,15 +890,22 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { TEST_F(SingleBalancerTest, Drop) { const size_t kNumRpcsPerAddress = 100; + const int num_of_drop_by_rate_limiting_addresses = 1; + const int num_of_drop_by_load_balancing_addresses = 2; + const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses + + num_of_drop_by_load_balancing_addresses; + const int num_total_addresses = num_backends_ + num_of_drop_addresses; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends( - GetBackendPorts(), {{"rate_limiting", 1}, {"load_balancing", 2}}), + GetBackendPorts(), + {{"rate_limiting", num_of_drop_by_rate_limiting_addresses}, + {"load_balancing", num_of_drop_by_load_balancing_addresses}}), 0); // Wait until all backends are ready. WaitForAllBackends(); // Send kNumRpcsPerAddress RPCs for each server and drop address. size_t num_drops = 0; - for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) { + for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) { EchoResponse response; const Status status = SendRpc(&response); if (!status.ok() && @@ -910,7 +917,7 @@ TEST_F(SingleBalancerTest, Drop) { EXPECT_EQ(response.message(), kMessage_); } } - EXPECT_EQ(kNumRpcsPerAddress * 3, num_drops); + EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops); // Each backend should have gotten 100 requests. for (size_t i = 0; i < backends_.size(); ++i) { @@ -925,9 +932,12 @@ TEST_F(SingleBalancerTest, Drop) { TEST_F(SingleBalancerTest, DropAllFirst) { // All registered addresses are marked as "drop". + const int num_of_drop_by_rate_limiting_addresses = 1; + const int num_of_drop_by_load_balancing_addresses = 1; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends( - {}, {{"rate_limiting", 1}, {"load_balancing", 1}}), + {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses}, + {"load_balancing", num_of_drop_by_load_balancing_addresses}}), 0); const Status status = SendRpc(); EXPECT_FALSE(status.ok()); @@ -938,9 +948,12 @@ TEST_F(SingleBalancerTest, DropAll) { ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), 0); + const int num_of_drop_by_rate_limiting_addresses = 1; + const int num_of_drop_by_load_balancing_addresses = 1; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends( - {}, {{"rate_limiting", 1}, {"load_balancing", 1}}), + {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses}, + {"load_balancing", num_of_drop_by_load_balancing_addresses}}), 1000); // First call succeeds. @@ -996,20 +1009,27 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) { const size_t kNumRpcsPerAddress = 3; + const int num_of_drop_by_rate_limiting_addresses = 2; + const int num_of_drop_by_load_balancing_addresses = 1; + const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses + + num_of_drop_by_load_balancing_addresses; + const int num_total_addresses = num_backends_ + num_of_drop_addresses; ScheduleResponseForBalancer( 0, BalancerServiceImpl::BuildResponseForBackends( - GetBackendPorts(), {{"rate_limiting", 2}, {"load_balancing", 1}}), + GetBackendPorts(), + {{"rate_limiting", num_of_drop_by_rate_limiting_addresses}, + {"load_balancing", num_of_drop_by_load_balancing_addresses}}), 0); // Wait until all backends are ready. int num_warmup_ok = 0; int num_warmup_failure = 0; int num_warmup_drops = 0; std::tie(num_warmup_ok, num_warmup_failure, num_warmup_drops) = - WaitForAllBackends(num_backends_ + 3 /* num_requests_multiple_of */); + WaitForAllBackends(num_total_addresses /* num_requests_multiple_of */); const int num_total_warmup_requests = num_warmup_ok + num_warmup_failure + num_warmup_drops; size_t num_drops = 0; - for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) { + for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) { EchoResponse response; const Status status = SendRpc(&response); if (!status.ok() && @@ -1021,7 +1041,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) { EXPECT_EQ(response.message(), kMessage_); } } - EXPECT_EQ(kNumRpcsPerAddress * 3, num_drops); + EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops); // Each backend should have gotten 100 requests. for (size_t i = 0; i < backends_.size(); ++i) { EXPECT_EQ(kNumRpcsPerAddress, @@ -1035,10 +1055,10 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) { const ClientStats client_stats = WaitForLoadReports(); EXPECT_EQ( - kNumRpcsPerAddress * (num_backends_ + 3) + num_total_warmup_requests, + kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests, client_stats.num_calls_started); EXPECT_EQ( - kNumRpcsPerAddress * (num_backends_ + 3) + num_total_warmup_requests, + kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests, client_stats.num_calls_finished); EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send); EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_warmup_ok, @@ -1046,7 +1066,8 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) { // The number of warmup request is a multiple of the number of addresses. // Therefore, all addresses in the scheduled balancer response are hit the // same number of times. - const int num_times_drop_addresses_hit = num_warmup_drops / 3; + const int num_times_drop_addresses_hit = + num_warmup_drops / num_of_drop_addresses; EXPECT_THAT( client_stats.drop_token_counts, ::testing::ElementsAre( |