aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/end2end/async_end2end_test.cc6
-rw-r--r--test/cpp/end2end/client_lb_end2end_test.cc51
-rw-r--r--test/cpp/end2end/end2end_test.cc16
-rw-r--r--test/cpp/end2end/grpclb_end2end_test.cc310
-rw-r--r--test/cpp/microbenchmarks/bm_chttp2_transport.cc166
-rw-r--r--test/cpp/microbenchmarks/bm_fullstack_trickle.cc5
6 files changed, 255 insertions, 299 deletions
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index e841a702d4..41090d161a 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -266,6 +266,7 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
}
void TearDown() override {
+ gpr_tls_set(&g_is_async_end2end_test, 0);
server_->Shutdown();
void* ignored_tag;
bool ignored_ok;
@@ -274,7 +275,6 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
;
stub_.reset();
poll_overrider_.reset();
- gpr_tls_set(&g_is_async_end2end_test, 0);
grpc_recycle_unused_port(port_);
}
@@ -396,6 +396,7 @@ TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
ResetStub();
SendRpc(1);
EXPECT_EQ(0, notify);
+ gpr_tls_set(&g_is_async_end2end_test, 0);
server_->Shutdown();
wait_thread.join();
EXPECT_EQ(1, notify);
@@ -404,8 +405,9 @@ TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
TEST_P(AsyncEnd2endTest, ShutdownThenWait) {
ResetStub();
SendRpc(1);
- server_->Shutdown();
+ std::thread t([this]() { server_->Shutdown(); });
server_->Wait();
+ t.join();
}
// Test a simple RPC using the async version of Next
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index 54408db600..c236f76e89 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -226,6 +226,31 @@ class ClientLbEnd2endTest : public ::testing::Test {
ResetCounters();
}
+ bool SeenAllServers() {
+ for (const auto& server : servers_) {
+ if (server->service_.request_count() == 0) return false;
+ }
+ return true;
+ }
+
+ // Updates \a connection_order by appending to it the index of the newly
+ // connected server. Must be called after every single RPC.
+ void UpdateConnectionOrder(
+ const std::vector<std::unique_ptr<ServerData>>& servers,
+ std::vector<int>* connection_order) {
+ for (size_t i = 0; i < servers.size(); ++i) {
+ if (servers[i]->service_.request_count() == 1) {
+ // Was the server index known? If not, update connection_order.
+ const auto it =
+ std::find(connection_order->begin(), connection_order->end(), i);
+ if (it == connection_order->end()) {
+ connection_order->push_back(i);
+ return;
+ }
+ }
+ }
+ }
+
const grpc::string server_host_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
@@ -370,13 +395,23 @@ TEST_F(ClientLbEnd2endTest, RoundRobin) {
ports.emplace_back(server->port_);
}
SetNextResolution(ports);
- for (size_t i = 0; i < servers_.size(); ++i) {
+ // Wait until all backends are ready.
+ do {
CheckRpcSendOk();
- }
- // One request should have gone to each server.
+ } while (!SeenAllServers());
+ ResetCounters();
+ // "Sync" to the end of the list. Next sequence of picks will start at the
+ // first server (index 0).
+ WaitForServer(servers_.size() - 1);
+ std::vector<int> connection_order;
for (size_t i = 0; i < servers_.size(); ++i) {
- EXPECT_EQ(1, servers_[i]->service_.request_count());
+ CheckRpcSendOk();
+ UpdateConnectionOrder(servers_, &connection_order);
}
+ // Backends should be iterated over in the order in which the addresses were
+ // given.
+ const auto expected = std::vector<int>{0, 1, 2};
+ EXPECT_EQ(expected, connection_order);
// Check LB policy name for the channel.
EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
}
@@ -529,13 +564,9 @@ TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
StartServers(kNumServers, ports);
ResetStub("round_robin");
SetNextResolution(ports);
- // Send one RPC per backend and make sure they are used in order.
- // Note: This relies on the fact that the subchannels are reported in
- // state READY in the order in which the addresses are specified,
- // which is only true because the backends are all local.
- for (size_t i = 0; i < servers_.size(); ++i) {
+ // Send a number of RPCs, which succeed.
+ for (size_t i = 0; i < 100; ++i) {
CheckRpcSendOk();
- EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
}
// Kill all servers
for (size_t i = 0; i < servers_.size(); ++i) {
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 1f4861a7e6..e54cd03ca2 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -757,6 +757,22 @@ TEST_P(End2endTest, RequestStreamTwoRequests) {
EXPECT_TRUE(s.ok());
}
+TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
+ EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
+ stream->WritesDone();
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), "hellohello");
+ EXPECT_TRUE(s.ok());
+}
+
TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
ResetStub();
EchoRequest request;
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index b5cff664f6..570a3d1067 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -332,7 +332,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
num_backends_(num_backends),
num_balancers_(num_balancers),
client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds) {}
+ client_load_reporting_interval_seconds),
+ kRequestMessage_("Live long and prosper.") {}
void SetUp() override {
response_generator_ = grpc_fake_resolver_response_generator_create();
@@ -378,6 +379,10 @@ class GrpclbEnd2endTest : public ::testing::Test {
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
+ void ResetBackendCounters() {
+ for (const auto& backend : backends_) backend->ResetCounters();
+ }
+
ClientStats WaitForLoadReports() {
ClientStats client_stats;
for (const auto& balancer : balancers_) {
@@ -386,6 +391,27 @@ class GrpclbEnd2endTest : public ::testing::Test {
return client_stats;
}
+ bool SeenAllBackends() {
+ for (const auto& backend : backends_) {
+ if (backend->request_count() == 0) return false;
+ }
+ return true;
+ }
+
+ void WaitForAllBackends() {
+ while (!SeenAllBackends()) {
+ CheckRpcSendOk();
+ }
+ ResetBackendCounters();
+ }
+
+ void WaitForBackend(size_t backend_idx) {
+ do {
+ CheckRpcSendOk();
+ } while (backends_[backend_idx]->request_count() == 0);
+ ResetBackendCounters();
+ }
+
struct AddressData {
int port;
bool is_balancer;
@@ -429,20 +455,31 @@ class GrpclbEnd2endTest : public ::testing::Test {
balancers_.at(i)->add_response(response, delay_ms);
}
- std::vector<std::pair<Status, EchoResponse>> SendRpc(const string& message,
- int num_rpcs,
- int timeout_ms = 1000) {
- std::vector<std::pair<Status, EchoResponse>> results;
+ Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000) {
+ const bool local_response = (response == nullptr);
+ if (local_response) response = new EchoResponse;
EchoRequest request;
- EchoResponse response;
- request.set_message(message);
- for (int i = 0; i < num_rpcs; i++) {
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
- Status status = stub_->Echo(&context, request, &response);
- results.push_back(std::make_pair(status, response));
+ request.set_message(kRequestMessage_);
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
+ Status status = stub_->Echo(&context, request, response);
+ if (local_response) delete response;
+ return status;
+ }
+
+ void CheckRpcSendOk(const size_t times = 1) {
+ for (size_t i = 0; i < times; ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response);
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ EXPECT_EQ(response.message(), kRequestMessage_);
}
- return results;
+ }
+
+ void CheckRpcSendFailure() {
+ const Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
}
template <typename T>
@@ -499,14 +536,12 @@ class GrpclbEnd2endTest : public ::testing::Test {
const int client_load_reporting_interval_seconds_;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
-
std::vector<std::unique_ptr<BackendServiceImpl>> backends_;
std::vector<std::unique_ptr<BalancerServiceImpl>> balancers_;
-
std::vector<ServerThread<BackendService>> backend_servers_;
std::vector<ServerThread<BalancerService>> balancer_servers_;
-
grpc_fake_resolver_response_generator* response_generator_;
+ const grpc::string kRequestMessage_;
};
class SingleBalancerTest : public GrpclbEnd2endTest {
@@ -521,17 +556,12 @@ TEST_F(SingleBalancerTest, Vanilla) {
0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
- // Send 100 RPCs per server.
- const auto& statuses_and_responses =
- SendRpc(kMessage_, kNumRpcsPerAddress * num_backends_);
-
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
+
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
@@ -561,8 +591,7 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
- const auto& statuses_and_responses =
- SendRpc(kMessage_, num_backends_, kCallDeadlineMs);
+ CheckRpcSendOk(num_backends_);
const auto ellapsed_ms =
std::chrono::duration_cast<std::chrono::milliseconds>(
system_clock::now() - t0);
@@ -576,13 +605,6 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(1U, backend_servers_[i].service_->request_count());
}
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
balancers_[0]->NotifyDoneWithServerlists();
// The balancer got a single request.
EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
@@ -593,70 +615,6 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
-TEST_F(SingleBalancerTest, RepeatedServerlist) {
- constexpr int kServerlistDelayMs = 100;
-
- // Send a serverlist right away.
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // ... and the same one a bit later.
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- kServerlistDelayMs);
-
- // Send num_backends/2 requests.
- auto statuses_and_responses = SendRpc(kMessage_, num_backends_ / 2);
- // only the first half of the backends will receive them.
- for (size_t i = 0; i < backends_.size(); ++i) {
- if (i < backends_.size() / 2)
- EXPECT_EQ(1U, backend_servers_[i].service_->request_count())
- << "for backend #" << i;
- else
- EXPECT_EQ(0U, backend_servers_[i].service_->request_count())
- << "for backend #" << i;
- }
- EXPECT_EQ(statuses_and_responses.size(), num_backends_ / 2);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
-
- // Wait for the (duplicated) serverlist update.
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(kServerlistDelayMs * 1.1, GPR_TIMESPAN)));
-
- // Verify the LB has sent two responses.
- EXPECT_EQ(2U, balancer_servers_[0].service_->response_count());
-
- // Some more calls to complete the total number of backends.
- statuses_and_responses = SendRpc(
- kMessage_,
- num_backends_ / 2 + (num_backends_ & 0x1) /* extra one if num_bes odd */);
- // Because a duplicated serverlist should have no effect, all backends must
- // have been hit once now.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(1U, backend_servers_[i].service_->request_count());
- }
- EXPECT_EQ(statuses_and_responses.size(), num_backends_ / 2);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
- balancers_[0]->NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
TEST_F(SingleBalancerTest, BackendsRestart) {
const size_t kNumRpcsPerAddress = 100;
ScheduleResponseForBalancer(
@@ -664,21 +622,8 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
- // Send 100 RPCs per server.
- auto statuses_and_responses =
- SendRpc(kMessage_, kNumRpcsPerAddress * num_backends_);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress,
- backend_servers_[i].service_->request_count());
- }
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
balancers_[0]->NotifyDoneWithServerlists();
// The balancer got a single request.
EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
@@ -687,11 +632,7 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
for (size_t i = 0; i < backends_.size(); ++i) {
if (backends_[i]->Shutdown()) backend_servers_[i].Shutdown();
}
- statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- EXPECT_FALSE(status.ok());
- }
+ CheckRpcSendFailure();
for (size_t i = 0; i < num_backends_; ++i) {
backends_.emplace_back(new BackendServiceImpl());
backend_servers_.emplace_back(ServerThread<BackendService>(
@@ -703,11 +644,7 @@ TEST_F(SingleBalancerTest, BackendsRestart) {
// TODO(dgq): implement the "backend restart" component as well. We need extra
// machinery to either update the LB responses "on the fly" or instruct
// backends which ports to restart on.
- statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- EXPECT_FALSE(status.ok());
- }
+ CheckRpcSendFailure();
// Check LB policy name for the channel.
EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
}
@@ -727,13 +664,9 @@ TEST_F(UpdatesTest, UpdateBalancers) {
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- auto statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backend_servers_[0].service_->request_count());
@@ -758,22 +691,12 @@ TEST_F(UpdatesTest, UpdateBalancers) {
// Wait until update has been processed, as signaled by the second backend
// receiving a request.
EXPECT_EQ(0U, backend_servers_[1].service_->request_count());
- do {
- auto statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
- } while (backend_servers_[1].service_->request_count() == 0);
+ WaitForBackend(1);
backend_servers_[1].service_->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backend_servers_[1].service_->request_count());
@@ -804,13 +727,9 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- auto statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backend_servers_[0].service_->request_count());
@@ -837,11 +756,7 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
- statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
+ CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// grpclb continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
@@ -860,11 +775,7 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
- statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
+ CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// grpclb continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
@@ -886,12 +797,8 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- auto statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backend_servers_[0].service_->request_count());
@@ -903,12 +810,8 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// This is serviced by the existing RR policy
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should again have gone to the first backend.
EXPECT_EQ(20U, backend_servers_[0].service_->request_count());
EXPECT_EQ(0U, backend_servers_[1].service_->request_count());
@@ -935,23 +838,13 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// receiving a request. In the meantime, the client continues to be serviced
// (by the first backend) without interruption.
EXPECT_EQ(0U, backend_servers_[1].service_->request_count());
- do {
- auto statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
- } while (backend_servers_[1].service_->request_count() == 0);
+ WaitForBackend(1);
// This is serviced by the existing RR policy
backend_servers_[1].service_->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
- statuses_and_responses = SendRpc(kMessage_, 10);
+ CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
- for (const auto& status_and_response : statuses_and_responses) {
- EXPECT_TRUE(status_and_response.first.ok());
- EXPECT_EQ(status_and_response.second.message(), kMessage_);
- }
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backend_servers_[1].service_->request_count());
@@ -974,14 +867,11 @@ TEST_F(SingleBalancerTest, Drop) {
0, BalancerServiceImpl::BuildResponseForBackends(
GetBackendPorts(), {{"rate_limiting", 1}, {"load_balancing", 2}}),
0);
- // Send 100 RPCs for each server and drop address.
- const auto& statuses_and_responses =
- SendRpc(kMessage_, kNumRpcsPerAddress * (num_backends_ + 3));
-
+ // Send kNumRpcsPerAddress RPCs for each server and drop address.
size_t num_drops = 0;
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
+ for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
@@ -1010,12 +900,9 @@ TEST_F(SingleBalancerTest, DropAllFirst) {
0, BalancerServiceImpl::BuildResponseForBackends(
{}, {{"rate_limiting", 1}, {"load_balancing", 1}}),
0);
- const auto& statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- EXPECT_FALSE(status.ok());
- EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
- }
+ const Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
}
TEST_F(SingleBalancerTest, DropAll) {
@@ -1028,21 +915,13 @@ TEST_F(SingleBalancerTest, DropAll) {
1000);
// First call succeeds.
- auto statuses_and_responses = SendRpc(kMessage_, 1);
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
+ CheckRpcSendOk();
// But eventually, the update with only dropped servers is processed and calls
// fail.
+ Status status;
do {
- statuses_and_responses = SendRpc(kMessage_, 1);
- ASSERT_EQ(statuses_and_responses.size(), 1UL);
- } while (statuses_and_responses[0].first.ok());
- const Status& status = statuses_and_responses[0].first;
+ status = SendRpc();
+ } while (status.ok());
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
}
@@ -1057,18 +936,8 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
ScheduleResponseForBalancer(
0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
0);
- // Send 100 RPCs per server.
- const auto& statuses_and_responses =
- SendRpc(kMessage_, kNumRpcsPerAddress * num_backends_);
-
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kMessage_);
- }
-
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
@@ -1096,14 +965,11 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
0, BalancerServiceImpl::BuildResponseForBackends(
GetBackendPorts(), {{"rate_limiting", 2}, {"load_balancing", 1}}),
0);
- // Send 100 RPCs for each server and drop address.
- const auto& statuses_and_responses =
- SendRpc(kMessage_, kNumRpcsPerAddress * (num_backends_ + 3));
size_t num_drops = 0;
- for (const auto& status_and_response : statuses_and_responses) {
- const Status& status = status_and_response.first;
- const EchoResponse& response = status_and_response.second;
+ for (size_t i = 0; i < kNumRpcsPerAddress * (num_backends_ + 3); ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index a08837f0a1..6f9dee7822 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -29,6 +29,7 @@
extern "C" {
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -154,23 +155,59 @@ class Fixture {
grpc_transport *t_;
};
-static void DoNothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+class Closure : public grpc_closure {
+ public:
+ virtual ~Closure() {}
+};
+
+template <class F>
+std::unique_ptr<Closure> MakeClosure(
+ F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
+ struct C : public Closure {
+ C(const F &f, grpc_closure_scheduler *sched) : f_(f) {
+ GRPC_CLOSURE_INIT(this, Execute, this, sched);
+ }
+ F f_;
+ static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ static_cast<C *>(arg)->f_(exec_ctx, error);
+ }
+ };
+ return std::unique_ptr<Closure>(new C(f, sched));
+}
+
+template <class F>
+grpc_closure *MakeOnceClosure(
+ F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
+ struct C : public grpc_closure {
+ C(const F &f) : f_(f) {}
+ F f_;
+ static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ static_cast<C *>(arg)->f_(exec_ctx, error);
+ delete static_cast<C *>(arg);
+ }
+ };
+ auto *c = new C{f};
+ return GRPC_CLOSURE_INIT(c, C::Execute, c, sched);
+}
class Stream {
public:
Stream(Fixture *f) : f_(f) {
- GRPC_STREAM_REF_INIT(&refcount_, 1, DoNothing, nullptr, "test_stream");
stream_size_ = grpc_transport_stream_size(f->transport());
stream_ = gpr_malloc(stream_size_);
arena_ = gpr_arena_create(4096);
}
~Stream() {
+ gpr_event_wait(&done_, gpr_inf_future(GPR_CLOCK_REALTIME));
gpr_free(stream_);
gpr_arena_destroy(arena_);
}
void Init(benchmark::State &state) {
+ GRPC_STREAM_REF_INIT(&refcount_, 1, &Stream::FinishDestroy, this,
+ "test_stream");
+ gpr_event_init(&done_);
memset(stream_, 0, stream_size_);
if ((state.iterations() & 0xffff) == 0) {
gpr_arena_destroy(arena_);
@@ -181,13 +218,17 @@ class Stream {
NULL, arena_);
}
- void DestroyThen(grpc_closure *closure) {
- grpc_transport_destroy_stream(f_->exec_ctx(), f_->transport(),
- static_cast<grpc_stream *>(stream_), closure);
+ void DestroyThen(grpc_exec_ctx *exec_ctx, grpc_closure *closure) {
+ destroy_closure_ = closure;
+#ifndef NDEBUG
+ grpc_stream_unref(exec_ctx, &refcount_, "DestroyThen");
+#else
+ grpc_stream_unref(exec_ctx, &refcount_);
+#endif
}
- void Op(grpc_transport_stream_op_batch *op) {
- grpc_transport_perform_stream_op(f_->exec_ctx(), f_->transport(),
+ void Op(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op) {
+ grpc_transport_perform_stream_op(exec_ctx, f_->transport(),
static_cast<grpc_stream *>(stream_), op);
}
@@ -196,48 +237,24 @@ class Stream {
}
private:
+ static void FinishDestroy(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ auto stream = static_cast<Stream *>(arg);
+ grpc_transport_destroy_stream(exec_ctx, stream->f_->transport(),
+ static_cast<grpc_stream *>(stream->stream_),
+ stream->destroy_closure_);
+ gpr_event_set(&stream->done_, (void *)1);
+ }
+
Fixture *f_;
grpc_stream_refcount refcount_;
gpr_arena *arena_;
size_t stream_size_;
void *stream_;
+ grpc_closure *destroy_closure_ = nullptr;
+ gpr_event done_;
};
-class Closure : public grpc_closure {
- public:
- virtual ~Closure() {}
-};
-
-template <class F>
-std::unique_ptr<Closure> MakeClosure(
- F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
- struct C : public Closure {
- C(const F &f, grpc_closure_scheduler *sched) : f_(f) {
- GRPC_CLOSURE_INIT(this, Execute, this, sched);
- }
- F f_;
- static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- static_cast<C *>(arg)->f_(exec_ctx, error);
- }
- };
- return std::unique_ptr<Closure>(new C(f, sched));
-}
-
-template <class F>
-grpc_closure *MakeOnceClosure(
- F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) {
- struct C : public grpc_closure {
- C(const F &f) : f_(f) {}
- F f_;
- static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- static_cast<C *>(arg)->f_(exec_ctx, error);
- delete static_cast<C *>(arg);
- }
- };
- auto *c = new C{f};
- return GRPC_CLOSURE_INIT(c, C::Execute, c, sched);
-}
-
////////////////////////////////////////////////////////////////////////////////
// Benchmarks
//
@@ -246,11 +263,18 @@ static void BM_StreamCreateDestroy(benchmark::State &state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
Stream s(&f);
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload op_payload;
+ memset(&op, 0, sizeof(op));
+ op.cancel_stream = true;
+ op.payload = &op_payload;
+ op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
std::unique_ptr<Closure> next =
MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
if (!state.KeepRunning()) return;
s.Init(state);
- s.DestroyThen(next.get());
+ s.Op(exec_ctx, &op);
+ s.DestroyThen(exec_ctx, next.get());
});
GRPC_CLOSURE_RUN(f.exec_ctx(), next.get(), GRPC_ERROR_NONE);
f.FlushExecCtx();
@@ -314,14 +338,14 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) {
op.on_complete = done.get();
op.send_initial_metadata = true;
op.payload->send_initial_metadata.send_initial_metadata = &b;
- s.Op(&op);
+ s.Op(exec_ctx, &op);
});
done = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
reset_op();
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
- s.Op(&op);
- s.DestroyThen(start.get());
+ s.Op(exec_ctx, &op);
+ s.DestroyThen(exec_ctx, start.get());
});
GRPC_CLOSURE_SCHED(f.exec_ctx(), start.get(), GRPC_ERROR_NONE);
f.FlushExecCtx();
@@ -348,22 +372,28 @@ static void BM_TransportEmptyOp(benchmark::State &state) {
if (!state.KeepRunning()) return;
reset_op();
op.on_complete = c.get();
- s.Op(&op);
+ s.Op(exec_ctx, &op);
});
GRPC_CLOSURE_SCHED(f.exec_ctx(), c.get(), GRPC_ERROR_NONE);
f.FlushExecCtx();
- s.DestroyThen(
- MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
+ reset_op();
+ op.cancel_stream = true;
+ op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
+ s.Op(f.exec_ctx(), &op);
+ s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
+ grpc_error *error) {}));
f.FlushExecCtx();
track_counters.Finish(state);
}
BENCHMARK(BM_TransportEmptyOp);
+std::vector<std::unique_ptr<gpr_event>> done_events;
+
static void BM_TransportStreamSend(benchmark::State &state) {
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
- Stream s(&f);
- s.Init(state);
+ auto s = std::unique_ptr<Stream>(new Stream(&f));
+ s->Init(state);
grpc_transport_stream_op_batch op;
grpc_transport_stream_op_batch_payload op_payload;
memset(&op_payload, 0, sizeof(op_payload));
@@ -390,11 +420,17 @@ static void BM_TransportStreamSend(benchmark::State &state) {
grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i])));
}
+ gpr_event *bm_done = new gpr_event;
+ gpr_event_init(bm_done);
+
std::unique_ptr<Closure> c =
MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) {
- if (!state.KeepRunning()) return;
+ if (!state.KeepRunning()) {
+ gpr_event_set(bm_done, (void *)1);
+ return;
+ }
// force outgoing window to be yuge
- s.chttp2_stream()->flow_control.remote_window_delta =
+ s->chttp2_stream()->flow_control.remote_window_delta =
1024 * 1024 * 1024;
f.chttp2_transport()->flow_control.remote_window = 1024 * 1024 * 1024;
grpc_slice_buffer_stream_init(&send_stream, &send_buffer, 0);
@@ -402,23 +438,27 @@ static void BM_TransportStreamSend(benchmark::State &state) {
op.on_complete = c.get();
op.send_message = true;
op.payload->send_message.send_message = &send_stream.base;
- s.Op(&op);
+ s->Op(exec_ctx, &op);
});
reset_op();
op.send_initial_metadata = true;
op.payload->send_initial_metadata.send_initial_metadata = &b;
op.on_complete = c.get();
- s.Op(&op);
+ s->Op(f.exec_ctx(), &op);
f.FlushExecCtx();
+ gpr_event_wait(bm_done, gpr_inf_future(GPR_CLOCK_REALTIME));
+ done_events.emplace_back(bm_done);
+
reset_op();
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
- s.Op(&op);
- s.DestroyThen(
- MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
+ s->Op(f.exec_ctx(), &op);
+ s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
+ grpc_error *error) {}));
f.FlushExecCtx();
+ s.reset();
track_counters.Finish(state);
grpc_metadata_batch_destroy(f.exec_ctx(), &b);
grpc_slice_buffer_destroy(&send_buffer);
@@ -535,7 +575,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
op.recv_message = true;
op.payload->recv_message.recv_message = &recv_stream;
op.payload->recv_message.recv_message_ready = drain_start.get();
- s.Op(&op);
+ s.Op(exec_ctx, &op);
f.PushInput(grpc_slice_ref(incoming_data));
});
@@ -578,7 +618,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
op.payload->recv_initial_metadata.recv_initial_metadata_ready =
do_nothing.get();
op.on_complete = c.get();
- s.Op(&op);
+ s.Op(f.exec_ctx(), &op);
f.PushInput(SLICE_FROM_BUFFER(
"\x00\x00\x00\x04\x00\x00\x00\x00\x00"
// Generated using:
@@ -596,9 +636,9 @@ static void BM_TransportStreamRecv(benchmark::State &state) {
reset_op();
op.cancel_stream = true;
op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
- s.Op(&op);
- s.DestroyThen(
- MakeOnceClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}));
+ s.Op(f.exec_ctx(), &op);
+ s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx,
+ grpc_error *error) {}));
f.FlushExecCtx();
track_counters.Finish(state);
grpc_metadata_batch_destroy(f.exec_ctx(), &b);
diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
index 135b4710ce..59fb29dd60 100644
--- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
@@ -105,7 +105,7 @@ class TrickledCHTTP2 : public EndpointPairFixture {
(double)state.iterations());
}
- void Log(int64_t iteration) {
+ void Log(int64_t iteration) GPR_ATTRIBUTE_NO_TSAN {
auto now = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_);
grpc_chttp2_transport* client =
reinterpret_cast<grpc_chttp2_transport*>(client_transport_);
@@ -193,7 +193,8 @@ class TrickledCHTTP2 : public EndpointPairFixture {
return p;
}
- void UpdateStats(grpc_chttp2_transport* t, Stats* s, size_t backlog) {
+ void UpdateStats(grpc_chttp2_transport* t, Stats* s,
+ size_t backlog) GPR_ATTRIBUTE_NO_TSAN {
if (backlog == 0) {
if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != NULL) {
s->streams_stalled_due_to_stream_flow_control++;