aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/end2end/client_lb_end2end_test.cc21
-rw-r--r--test/cpp/end2end/grpclb_end2end_test.cc40
-rw-r--r--test/cpp/grpclb/grpclb_test.cc7
3 files changed, 53 insertions, 15 deletions
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index f8bb12fde1..c25f65d37a 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -573,15 +573,28 @@ TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
CheckRpcSendOk();
}
// Kill all servers
+ gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown(false);
}
- // Client request should fail.
- CheckRpcSendFailure();
+ gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
+ gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
+ // Client requests should fail. Send enough to tickle all subchannels.
+ for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure();
+ gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
// Bring servers back up on the same port (we aren't recreating the channel).
+ gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
StartServers(kNumServers, ports);
- // Client request should succeed.
- CheckRpcSendOk();
+ gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
+ gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
+ // Client request should eventually (but still fairly soon) succeed.
+ const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ while (gpr_time_cmp(deadline, now) > 0) {
+ if (SendRpc().ok()) break;
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ }
+ GPR_ASSERT(gpr_time_cmp(deadline, now) > 0);
}
} // namespace
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index c15ab88da1..bbf3da4663 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -353,11 +353,6 @@ class GrpclbEnd2endTest : public ::testing::Test {
"balancer", server_host_, balancers_.back().get()));
}
ResetStub();
- std::vector<AddressData> addresses;
- for (size_t i = 0; i < balancer_servers_.size(); ++i) {
- addresses.emplace_back(AddressData{balancer_servers_[i].port_, true, ""});
- }
- SetNextResolution(addresses);
}
void TearDown() override {
@@ -370,6 +365,14 @@ class GrpclbEnd2endTest : public ::testing::Test {
grpc_fake_resolver_response_generator_unref(response_generator_);
}
+ void SetNextResolutionAllBalancers() {
+ std::vector<AddressData> addresses;
+ for (size_t i = 0; i < balancer_servers_.size(); ++i) {
+ addresses.emplace_back(AddressData{balancer_servers_[i].port_, true, ""});
+ }
+ SetNextResolution(addresses);
+ }
+
void ResetStub(int fallback_timeout = 0) {
ChannelArguments args;
args.SetGrpclbFallbackTimeout(fallback_timeout);
@@ -581,6 +584,7 @@ class SingleBalancerTest : public GrpclbEnd2endTest {
};
TEST_F(SingleBalancerTest, Vanilla) {
+ SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -608,6 +612,7 @@ TEST_F(SingleBalancerTest, Vanilla) {
}
TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
+ SetNextResolutionAllBalancers();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const int kCallDeadlineMs = 1000 * grpc_test_slowdown_factor();
@@ -645,6 +650,7 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
}
TEST_F(SingleBalancerTest, Fallback) {
+ SetNextResolutionAllBalancers();
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendInResolution = backends_.size() / 2;
@@ -711,6 +717,7 @@ TEST_F(SingleBalancerTest, Fallback) {
}
TEST_F(SingleBalancerTest, FallbackUpdate) {
+ SetNextResolutionAllBalancers();
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendInResolution = backends_.size() / 3;
@@ -818,6 +825,7 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
}
TEST_F(SingleBalancerTest, BackendsRestart) {
+ SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -857,6 +865,7 @@ class UpdatesTest : public GrpclbEnd2endTest {
};
TEST_F(UpdatesTest, UpdateBalancers) {
+ SetNextResolutionAllBalancers();
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
ScheduleResponseForBalancer(
@@ -919,6 +928,7 @@ TEST_F(UpdatesTest, UpdateBalancers) {
// verify that the LB channel inside grpclb keeps the initial connection (which
// by definition is also present in the update).
TEST_F(UpdatesTest, UpdateBalancersRepeated) {
+ SetNextResolutionAllBalancers();
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[0]};
@@ -989,6 +999,9 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
}
TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
+ std::vector<AddressData> addresses;
+ addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""});
+ SetNextResolution(addresses);
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
@@ -1030,7 +1043,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
- std::vector<AddressData> addresses;
+ addresses.clear();
addresses.emplace_back(AddressData{balancer_servers_[1].port_, true, ""});
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolution(addresses);
@@ -1055,8 +1068,14 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
balancers_[2]->NotifyDoneWithServerlists();
EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
- EXPECT_EQ(1U, balancer_servers_[1].service_->request_count());
- EXPECT_EQ(1U, balancer_servers_[1].service_->response_count());
+ // The second balancer, published as part of the first update, may end up
+ // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer
+ // firing races with the arrival of the update containing the second
+ // balancer.
+ EXPECT_GE(balancer_servers_[1].service_->request_count(), 1U);
+ EXPECT_GE(balancer_servers_[1].service_->response_count(), 1U);
+ EXPECT_LE(balancer_servers_[1].service_->request_count(), 2U);
+ EXPECT_LE(balancer_servers_[1].service_->response_count(), 2U);
EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
// Check LB policy name for the channel.
@@ -1064,6 +1083,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
}
TEST_F(SingleBalancerTest, Drop) {
+ SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 2;
@@ -1107,6 +1127,7 @@ TEST_F(SingleBalancerTest, Drop) {
}
TEST_F(SingleBalancerTest, DropAllFirst) {
+ SetNextResolutionAllBalancers();
// All registered addresses are marked as "drop".
const int num_of_drop_by_rate_limiting_addresses = 1;
const int num_of_drop_by_load_balancing_addresses = 1;
@@ -1122,6 +1143,7 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
}
TEST_F(SingleBalancerTest, DropAll) {
+ SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
0);
@@ -1152,6 +1174,7 @@ class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest {
};
TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
+ SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -1186,6 +1209,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
}
TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
+ SetNextResolutionAllBalancers();
const size_t kNumRpcsPerAddress = 3;
const int num_of_drop_by_rate_limiting_addresses = 2;
const int num_of_drop_by_load_balancing_addresses = 1;
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index ca846c72fd..a469fbb7e3 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -703,14 +703,15 @@ static test_fixture setup_test_fixture(int lb_server_update_delay_ms) {
tf.lb_backends[i].lb_token_prefix = "";
}
setup_server("127.0.0.1", &tf.lb_backends[i]);
- gpr_thd_new(&tf.lb_backends[i].tid, fork_backend_server, &tf.lb_backends[i],
- &options);
+ gpr_thd_new(&tf.lb_backends[i].tid, "grpclb_backend", fork_backend_server,
+ &tf.lb_backends[i], &options);
}
tf.lb_server.lb_token_prefix = LB_TOKEN_PREFIX;
tf.lb_server.balancer_name = BALANCERS_NAME;
setup_server("127.0.0.1", &tf.lb_server);
- gpr_thd_new(&tf.lb_server.tid, fork_lb_server, &tf.lb_server, &options);
+ gpr_thd_new(&tf.lb_server.tid, "grpclb_server", fork_lb_server, &tf.lb_server,
+ &options);
setup_client(&tf.lb_server, tf.lb_backends, &tf.client);
return tf;
}