aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorGravatar Juanli Shen <aspirinsjl@gmail.com>2018-02-07 14:51:20 -0800
committerGravatar Juanli Shen <juanlishen@google.com>2018-02-12 11:35:40 -0800
commit776490af0eae820fb8919b2ea6f245aff197a1ab (patch)
treeaa4c439abd4774e2c34a06a573d4ce47f14c14e9 /test
parent86accb1768cc860a7579bb5fff9bd3c2ba7274af (diff)
Revert "Revert "grpclb re-resolution""
Diffstat (limited to 'test')
-rw-r--r--test/cpp/end2end/grpclb_end2end_test.cc161
1 files changed, 148 insertions, 13 deletions
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index 89c9c2bf76..d84ec60a7b 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -192,6 +192,7 @@ class BalancerServiceImpl : public BalancerService {
gpr_log(GPR_INFO, "LB[%p]: recv msg '%s'", this,
request.DebugString().c_str());
+ // TODO(juanlishen): Initial response should always be the first response.
if (client_load_reporting_interval_seconds_ > 0) {
LoadBalanceResponse initial_response;
initial_response.mutable_initial_response()
@@ -444,7 +445,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
void WaitForBackend(size_t backend_idx) {
do {
- CheckRpcSendOk();
+ SendRpc();
} while (backends_[backend_idx]->request_count() == 0);
ResetBackendCounters();
}
@@ -663,9 +664,6 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
// and sent two responses.
EXPECT_EQ(2U, balancer_servers_[0].service_->response_count());
-
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
TEST_F(SingleBalancerTest, Fallback) {
@@ -874,8 +872,6 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
// machinery to either update the LB responses "on the fly" or instruct
// backends which ports to restart on.
CheckRpcSendFailure();
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
class UpdatesTest : public GrpclbEnd2endTest {
@@ -939,8 +935,6 @@ TEST_F(UpdatesTest, UpdateBalancers) {
EXPECT_EQ(1U, balancer_servers_[1].service_->response_count());
EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
// Send an update with the same set of LBs as the one in SetUp() in order to
@@ -1012,9 +1006,6 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
// doesn't assign the second backend.
EXPECT_EQ(0U, backend_servers_[1].service_->request_count());
balancers_[0]->NotifyDoneWithServerlists();
-
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
@@ -1097,8 +1088,152 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
EXPECT_LE(balancer_servers_[1].service_->response_count(), 2U);
EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(UpdatesTest, ReresolveDeadBalancer) {
+ std::vector<AddressData> addresses;
+ addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""});
+ SetNextResolution(addresses);
+ addresses.clear();
+ addresses.emplace_back(AddressData{balancer_servers_[1].port_, true, ""});
+ SetNextResolutionUponError(addresses);
+ const std::vector<int> first_backend{GetBackendPorts()[0]};
+ const std::vector<int> second_backend{GetBackendPorts()[1]};
+
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
+ // Start servers and send 10 RPCs per server.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+ // All 10 requests should have gone to the first backend.
+ EXPECT_EQ(10U, backend_servers_[0].service_->request_count());
+
+ // Kill backend 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
+ if (backends_[0]->Shutdown()) backend_servers_[0].Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
+
+ CheckRpcSendFailure();
+
+ balancers_[1]->NotifyDoneWithServerlists();
+ balancers_[2]->NotifyDoneWithServerlists();
+ EXPECT_EQ(0U, balancer_servers_[1].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[1].service_->response_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
+
+ // Kill balancer 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
+ balancers_[0]->NotifyDoneWithServerlists();
+ if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
+
+ balancers_[0]->NotifyDoneWithServerlists();
+ balancers_[1]->NotifyDoneWithServerlists();
+ balancers_[2]->NotifyDoneWithServerlists();
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
+ // Balancer 1 may have received a request if re-resolution is done quickly
+ // enough.
+ EXPECT_GE(balancer_servers_[1].service_->request_count(), 0U);
+ EXPECT_GE(balancer_servers_[1].service_->response_count(), 0U);
+ EXPECT_LE(balancer_servers_[1].service_->request_count(), 1U);
+ EXPECT_LE(balancer_servers_[1].service_->response_count(), 1U);
+ EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
+
+ // Wait until re-resolution has finished, as signaled by the second backend
+ // receiving a request.
+ WaitForBackend(1);
+
+ // This is serviced by the new serverlist.
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+ // All 10 requests should have gone to the second backend.
+ EXPECT_EQ(10U, backend_servers_[1].service_->request_count());
+
+ balancers_[0]->NotifyDoneWithServerlists();
+ balancers_[1]->NotifyDoneWithServerlists();
+ balancers_[2]->NotifyDoneWithServerlists();
+ EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
+ EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
+ EXPECT_EQ(1U, balancer_servers_[1].service_->request_count());
+ EXPECT_EQ(1U, balancer_servers_[1].service_->response_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
+}
+
+TEST_F(UpdatesTest, ReresolveDeadBackend) {
+ ResetStub(500);
+ // The first resolution contains the addresses of a balancer that never
+ // responds, and a fallback backend.
+ std::vector<AddressData> addresses;
+ addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""});
+ addresses.emplace_back(AddressData{backend_servers_[0].port_, false, ""});
+ SetNextResolution(addresses);
+ // The re-resolution result will contain a balancer address.
+ addresses.clear();
+ addresses.emplace_back(AddressData{balancer_servers_[1].port_, true, ""});
+ SetNextResolutionUponError(addresses);
+ const std::vector<int> second_backend{backend_servers_[1].port_};
+
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
+ // Start servers and send 10 RPCs per server.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+ // All 10 requests should have gone to the fallback backend.
+ EXPECT_EQ(10U, backend_servers_[0].service_->request_count());
+
+ // Kill backend 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
+ if (backends_[0]->Shutdown()) backend_servers_[0].Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
+
+ balancers_[0]->NotifyDoneWithServerlists();
+ balancers_[1]->NotifyDoneWithServerlists();
+ balancers_[2]->NotifyDoneWithServerlists();
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
+ // but didn't send any response.
+ EXPECT_EQ(0U, balancer_servers_[0].service_->response_count());
+ // Balancer 1 may have received a request if re-resolution is done quickly
+ // enough.
+ EXPECT_GE(balancer_servers_[1].service_->request_count(), 0U);
+ EXPECT_GE(balancer_servers_[1].service_->response_count(), 0U);
+ EXPECT_LE(balancer_servers_[1].service_->request_count(), 1U);
+ EXPECT_LE(balancer_servers_[1].service_->response_count(), 1U);
+ EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
+
+ // Wait until re-resolution has finished, as signaled by the second backend
+ // receiving a request.
+ WaitForBackend(1);
+
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+ // All 10 requests should have gone to the second backend.
+ EXPECT_EQ(10U, backend_servers_[1].service_->request_count());
+
+ balancers_[0]->NotifyDoneWithServerlists();
+ balancers_[1]->NotifyDoneWithServerlists();
+ balancers_[2]->NotifyDoneWithServerlists();
+ EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[0].service_->response_count());
+ EXPECT_EQ(1U, balancer_servers_[1].service_->request_count());
+ EXPECT_EQ(1U, balancer_servers_[1].service_->response_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
+ EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
}
TEST_F(SingleBalancerTest, Drop) {