aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/cpp
diff options
context:
space:
mode:
authorGravatar yang-g <yangg@google.com>2017-02-17 09:55:03 -0800
committerGravatar yang-g <yangg@google.com>2017-02-17 09:55:03 -0800
commit55c6ebfd7a9f8077557a43dee9bf67d5c60528cb (patch)
treebac81627fcc53203050c4362b7036e7c486c44d0 /test/cpp
parent076bac017b200accb8071e569b471f118fc3ba32 (diff)
parent13e185419cd177b7fb552601665e43820321a96b (diff)
Merge branch 'master' into health
Diffstat (limited to 'test/cpp')
-rw-r--r--test/cpp/codegen/codegen_test_full.cc2
-rw-r--r--test/cpp/codegen/proto_utils_test.cc93
-rw-r--r--test/cpp/grpclb/grpclb_test.cc89
-rw-r--r--test/cpp/interop/client.cc2
-rw-r--r--test/cpp/interop/http2_client.cc2
-rw-r--r--test/cpp/interop/reconnect_interop_client.cc2
-rw-r--r--test/cpp/microbenchmarks/bm_closure.cc356
-rw-r--r--test/cpp/microbenchmarks/bm_fullstack.cc244
8 files changed, 740 insertions, 50 deletions
diff --git a/test/cpp/codegen/codegen_test_full.cc b/test/cpp/codegen/codegen_test_full.cc
index d6e2416b55..bc19fc9669 100644
--- a/test/cpp/codegen/codegen_test_full.cc
+++ b/test/cpp/codegen/codegen_test_full.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2016, Google Inc.
+ * Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/test/cpp/codegen/proto_utils_test.cc b/test/cpp/codegen/proto_utils_test.cc
new file mode 100644
index 0000000000..1daa142b50
--- /dev/null
+++ b/test/cpp/codegen/proto_utils_test.cc
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/impl/codegen/proto_utils.h>
+#include <grpc++/impl/grpc_library.h>
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace internal {
+
+static GrpcLibraryInitializer g_gli_initializer;
+
+// Provide access to GrpcBufferWriter internals.
+class GrpcBufferWriterPeer {
+ public:
+ explicit GrpcBufferWriterPeer(internal::GrpcBufferWriter* writer)
+ : writer_(writer) {}
+ bool have_backup() const { return writer_->have_backup_; }
+ const grpc_slice& backup_slice() const { return writer_->backup_slice_; }
+ const grpc_slice& slice() const { return writer_->slice_; }
+
+ private:
+ GrpcBufferWriter* writer_;
+};
+
+class ProtoUtilsTest : public ::testing::Test {};
+
+// Regression test for a memory corruption bug where a series of
+// GrpcBufferWriter Next()/Backup() invocations could result in a dangling
+// pointer returned by Next() due to the interaction between grpc_slice inlining
+// and GRPC_SLICE_START_PTR.
+TEST_F(ProtoUtilsTest, BackupNext) {
+ // Ensure the GrpcBufferWriter internals are initialized.
+ g_gli_initializer.summon();
+
+ grpc_byte_buffer* bp;
+ GrpcBufferWriter writer(&bp, 8192);
+ GrpcBufferWriterPeer peer(&writer);
+
+ void* data;
+ int size;
+ // Allocate a slice.
+ ASSERT_TRUE(writer.Next(&data, &size));
+ EXPECT_EQ(8192, size);
+ // Return a single byte. Before the fix that this test acts as a regression
+ // for, this would have resulted in an inlined backup slice.
+ writer.BackUp(1);
+ EXPECT_TRUE(!peer.have_backup());
+ // On the next allocation, the slice is non-inlined.
+ ASSERT_TRUE(writer.Next(&data, &size));
+ EXPECT_TRUE(peer.slice().refcount != NULL);
+
+ // Cleanup.
+ g_core_codegen_interface->grpc_byte_buffer_destroy(bp);
+}
+
+} // namespace internal
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index 4b8a434c78..89ed9249ad 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -52,8 +52,10 @@
#include <grpc++/impl/codegen/config.h>
extern "C" {
#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/support/tmpfile.h"
#include "src/core/lib/surface/channel.h"
@@ -110,6 +112,7 @@ typedef struct server_fixture {
grpc_call *server_call;
grpc_completion_queue *cq;
char *servers_hostport;
+ const char *balancer_name;
int port;
const char *lb_token_prefix;
gpr_thd_id tid;
@@ -201,10 +204,12 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
&request_metadata_recv, sf->cq, sf->cq,
tag(200));
GPR_ASSERT(GRPC_CALL_OK == error);
- gpr_log(GPR_INFO, "LB Server[%s] up", sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) up", sf->servers_hostport,
+ sf->balancer_name);
CQ_EXPECT_COMPLETION(cqv, tag(200), 1);
cq_verify(cqv);
- gpr_log(GPR_INFO, "LB Server[%s] after tag 200", sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) after tag 200", sf->servers_hostport,
+ sf->balancer_name);
// make sure we've received the initial metadata from the grpclb request.
GPR_ASSERT(request_metadata_recv.count > 0);
@@ -221,7 +226,8 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(202), 1);
cq_verify(cqv);
- gpr_log(GPR_INFO, "LB Server[%s] after RECV_MSG", sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) after RECV_MSG", sf->servers_hostport,
+ sf->balancer_name);
// validate initial request.
grpc_byte_buffer_reader bbr;
@@ -250,7 +256,8 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(201), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
- gpr_log(GPR_INFO, "LB Server[%s] after tag 201", sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) after tag 201", sf->servers_hostport,
+ sf->balancer_name);
for (int i = 0; i < 2; i++) {
if (i == 0) {
@@ -276,13 +283,14 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(203), 1);
cq_verify(cqv);
- gpr_log(GPR_INFO, "LB Server[%s] after SEND_MESSAGE, iter %d",
- sf->servers_hostport, i);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) after SEND_MESSAGE, iter %d",
+ sf->servers_hostport, sf->balancer_name, i);
grpc_byte_buffer_destroy(response_payload);
grpc_slice_unref(response_payload_slice);
}
- gpr_log(GPR_INFO, "LB Server[%s] shutting down", sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) shutting down", sf->servers_hostport,
+ sf->balancer_name);
op = ops;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
@@ -299,8 +307,8 @@ static void start_lb_server(server_fixture *sf, int *ports, size_t nports,
CQ_EXPECT_COMPLETION(cqv, tag(201), 1);
CQ_EXPECT_COMPLETION(cqv, tag(204), 1);
cq_verify(cqv);
- gpr_log(GPR_INFO, "LB Server[%s] after tag 204. All done. LB server out",
- sf->servers_hostport);
+ gpr_log(GPR_INFO, "LB Server[%s](%s) after tag 204. All done. LB server out",
+ sf->servers_hostport, sf->balancer_name);
grpc_call_destroy(s);
@@ -561,10 +569,38 @@ static void perform_request(client_fixture *cf) {
gpr_free(peer);
}
-static void setup_client(const char *server_hostport, client_fixture *cf) {
+#define BALANCERS_NAME "lb.name"
+static void setup_client(const server_fixture *lb_server,
+ const server_fixture *backends, client_fixture *cf) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ char *lb_uri;
+ // The grpclb LB policy will be automatically selected by virtue of
+ // the fact that the returned addresses are balancer addresses.
+ gpr_asprintf(&lb_uri, "test:///%s?lb_enabled=1&balancer_names=%s",
+ lb_server->servers_hostport, lb_server->balancer_name);
+
+ grpc_arg expected_target_arg;
+ expected_target_arg.type = GRPC_ARG_STRING;
+ expected_target_arg.key =
+ const_cast<char *>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS);
+
+ char *expected_target_names = NULL;
+ const char *backends_name = lb_server->servers_hostport;
+ gpr_asprintf(&expected_target_names, "%s;%s", backends_name, BALANCERS_NAME);
+
+ expected_target_arg.value.string = const_cast<char *>(expected_target_names);
+ grpc_channel_args *args =
+ grpc_channel_args_copy_and_add(NULL, &expected_target_arg, 1);
+ gpr_free(expected_target_names);
+
cf->cq = grpc_completion_queue_create(NULL);
- cf->server_uri = gpr_strdup(server_hostport);
- cf->client = grpc_insecure_channel_create(cf->server_uri, NULL, NULL);
+ cf->server_uri = lb_uri;
+ grpc_channel_credentials *fake_creds =
+ grpc_fake_transport_security_credentials_create();
+ cf->client =
+ grpc_secure_channel_create(fake_creds, cf->server_uri, args, NULL);
+ grpc_channel_credentials_unref(&exec_ctx, fake_creds);
+ grpc_channel_args_destroy(&exec_ctx, args);
}
static void teardown_client(client_fixture *cf) {
@@ -591,10 +627,14 @@ static void setup_server(const char *host, server_fixture *sf) {
gpr_join_host_port(&sf->servers_hostport, host, sf->port);
}
+ grpc_server_credentials *server_creds =
+ grpc_fake_transport_security_server_credentials_create();
+
sf->server = grpc_server_create(NULL, NULL);
grpc_server_register_completion_queue(sf->server, sf->cq, NULL);
- GPR_ASSERT((assigned_port = grpc_server_add_insecure_http2_port(
- sf->server, sf->servers_hostport)) > 0);
+ GPR_ASSERT((assigned_port = grpc_server_add_secure_http2_port(
+ sf->server, sf->servers_hostport, server_creds)) > 0);
+ grpc_server_credentials_release(server_creds);
GPR_ASSERT(sf->port == assigned_port);
grpc_server_start(sf->server);
}
@@ -656,17 +696,10 @@ static test_fixture setup_test_fixture(int lb_server_update_delay_ms) {
}
tf.lb_server.lb_token_prefix = LB_TOKEN_PREFIX;
+ tf.lb_server.balancer_name = BALANCERS_NAME;
setup_server("127.0.0.1", &tf.lb_server);
gpr_thd_new(&tf.lb_server.tid, fork_lb_server, &tf.lb_server, &options);
-
- char *server_uri;
- // The grpclb LB policy will be automatically selected by virtue of
- // the fact that the returned addresses are balancer addresses.
- gpr_asprintf(&server_uri, "test:///%s?lb_enabled=1",
- tf.lb_server.servers_hostport);
- setup_client(server_uri, &tf.client);
- gpr_free(server_uri);
-
+ setup_client(&tf.lb_server, tf.lb_backends, &tf.client);
return tf;
}
@@ -711,8 +744,9 @@ TEST(GrpclbTest, Updates) {
// batch 1. All subsequent picks will come from the second half of the
// backends, those coming in the LB update.
tf_result = grpc::test_update(800);
- GPR_ASSERT(tf_result.lb_backends[0].num_calls_serviced == 1);
- GPR_ASSERT(tf_result.lb_backends[1].num_calls_serviced == 0);
+ GPR_ASSERT(tf_result.lb_backends[0].num_calls_serviced +
+ tf_result.lb_backends[1].num_calls_serviced ==
+ 1);
GPR_ASSERT(tf_result.lb_backends[2].num_calls_serviced +
tf_result.lb_backends[3].num_calls_serviced >
0);
@@ -728,8 +762,9 @@ TEST(GrpclbTest, Updates) {
// update. In any case, the total number of serviced calls must again be equal
// to four across all the backends.
tf_result = grpc::test_update(2500);
- GPR_ASSERT(tf_result.lb_backends[0].num_calls_serviced >= 1);
- GPR_ASSERT(tf_result.lb_backends[1].num_calls_serviced == 1);
+ GPR_ASSERT(tf_result.lb_backends[0].num_calls_serviced +
+ tf_result.lb_backends[1].num_calls_serviced >=
+ 2);
GPR_ASSERT(tf_result.lb_backends[2].num_calls_serviced +
tf_result.lb_backends[3].num_calls_serviced >
0);
diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc
index 8a00b61cef..5688ab7971 100644
--- a/test/cpp/interop/client.cc
+++ b/test/cpp/interop/client.cc
@@ -51,7 +51,7 @@ DEFINE_bool(use_tls, false, "Whether to use tls.");
DEFINE_string(custom_credentials_type, "", "User provided credentials type.");
DEFINE_bool(use_test_ca, false, "False to use SSL roots for google");
DEFINE_int32(server_port, 0, "Server port.");
-DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
+DEFINE_string(server_host, "localhost", "Server host to connect to");
DEFINE_string(server_host_override, "foo.test.google.fr",
"Override the server host which is sent in HTTP header");
DEFINE_string(
diff --git a/test/cpp/interop/http2_client.cc b/test/cpp/interop/http2_client.cc
index 38aee43b26..b96e9fac36 100644
--- a/test/cpp/interop/http2_client.cc
+++ b/test/cpp/interop/http2_client.cc
@@ -223,7 +223,7 @@ bool Http2Client::DoMaxStreams() {
} // namespace grpc
DEFINE_int32(server_port, 0, "Server port.");
-DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
+DEFINE_string(server_host, "localhost", "Server host to connect to");
DEFINE_string(test_case, "rst_after_header",
"Configure different test cases. Valid options are:\n\n"
"goaway\n"
diff --git a/test/cpp/interop/reconnect_interop_client.cc b/test/cpp/interop/reconnect_interop_client.cc
index 797e52c744..1c2f606637 100644
--- a/test/cpp/interop/reconnect_interop_client.cc
+++ b/test/cpp/interop/reconnect_interop_client.cc
@@ -48,7 +48,7 @@
DEFINE_int32(server_control_port, 0, "Server port for control rpcs.");
DEFINE_int32(server_retry_port, 0, "Server port for testing reconnection.");
-DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
+DEFINE_string(server_host, "localhost", "Server host to connect to");
DEFINE_int32(max_reconnect_backoff_ms, 0,
"Maximum backoff time, or 0 for default.");
diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc
new file mode 100644
index 0000000000..80d6610e13
--- /dev/null
+++ b/test/cpp/microbenchmarks/bm_closure.cc
@@ -0,0 +1,356 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* Test various closure related operations */
+
+#include <grpc/grpc.h>
+
+extern "C" {
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+}
+
+#include "third_party/benchmark/include/benchmark/benchmark.h"
+
+static class InitializeStuff {
+ public:
+ InitializeStuff() { grpc_init(); }
+ ~InitializeStuff() { grpc_shutdown(); }
+} initialize_stuff;
+
+static void BM_NoOpExecCtx(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+}
+BENCHMARK(BM_NoOpExecCtx);
+
+static void BM_WellFlushed(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_WellFlushed);
+
+static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+
+static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) {
+ grpc_closure c;
+ while (state.KeepRunning()) {
+ benchmark::DoNotOptimize(
+ grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx));
+ }
+}
+BENCHMARK(BM_ClosureInitAgainstExecCtx);
+
+static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ grpc_closure c;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ benchmark::DoNotOptimize(grpc_closure_init(
+ &c, DoNothing, NULL, grpc_combiner_scheduler(combiner, false)));
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureInitAgainstCombiner);
+
+static void BM_ClosureRunOnExecCtx(benchmark::State& state) {
+ grpc_closure c;
+ grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_run(&exec_ctx, &c, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureRunOnExecCtx);
+
+static void BM_ClosureCreateAndRun(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_run(&exec_ctx, grpc_closure_create(DoNothing, NULL,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureCreateAndRun);
+
+static void BM_ClosureInitAndRun(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure c;
+ while (state.KeepRunning()) {
+ grpc_closure_run(&exec_ctx, grpc_closure_init(&c, DoNothing, NULL,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureInitAndRun);
+
+static void BM_ClosureSchedOnExecCtx(benchmark::State& state) {
+ grpc_closure c;
+ grpc_closure_init(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSchedOnExecCtx);
+
+static void BM_ClosureSched2OnExecCtx(benchmark::State& state) {
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure_init(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched2OnExecCtx);
+
+static void BM_ClosureSched3OnExecCtx(benchmark::State& state) {
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure c3;
+ grpc_closure_init(&c1, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&c2, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&c3, DoNothing, NULL, grpc_schedule_on_exec_ctx);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched3OnExecCtx);
+
+static void BM_AcquireMutex(benchmark::State& state) {
+ // for comparison with the combiner stuff below
+ gpr_mu mu;
+ gpr_mu_init(&mu);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ gpr_mu_lock(&mu);
+ DoNothing(&exec_ctx, NULL, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&mu);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_AcquireMutex);
+
+static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ grpc_closure c;
+ grpc_closure_init(&c, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSchedOnCombiner);
+
+static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure_init(&c1, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_closure_init(&c2, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched2OnCombiner);
+
+static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure c3;
+ grpc_closure_init(&c1, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_closure_init(&c2, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_closure_init(&c3, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner, false));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched3OnCombiner);
+
+static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
+ grpc_combiner* combiner1 = grpc_combiner_create(NULL);
+ grpc_combiner* combiner2 = grpc_combiner_create(NULL);
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure_init(&c1, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner1, false));
+ grpc_closure_init(&c2, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner2, false));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished");
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched2OnTwoCombiners);
+
+static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
+ grpc_combiner* combiner1 = grpc_combiner_create(NULL);
+ grpc_combiner* combiner2 = grpc_combiner_create(NULL);
+ grpc_closure c1;
+ grpc_closure c2;
+ grpc_closure c3;
+ grpc_closure c4;
+ grpc_closure_init(&c1, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner1, false));
+ grpc_closure_init(&c2, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner2, false));
+ grpc_closure_init(&c3, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner1, false));
+ grpc_closure_init(&c4, DoNothing, NULL,
+ grpc_combiner_scheduler(combiner2, false));
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ while (state.KeepRunning()) {
+ grpc_closure_sched(&exec_ctx, &c1, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c2, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c3, GRPC_ERROR_NONE);
+ grpc_closure_sched(&exec_ctx, &c4, GRPC_ERROR_NONE);
+ grpc_exec_ctx_flush(&exec_ctx);
+ }
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished");
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureSched4OnTwoCombiners);
+
+// Helper that continuously reschedules the same closure against something until
+// the benchmark is complete
+class Rescheduler {
+ public:
+ Rescheduler(benchmark::State& state, grpc_closure_scheduler* scheduler)
+ : state_(state) {
+ grpc_closure_init(&closure_, Step, this, scheduler);
+ }
+
+ void ScheduleFirst(grpc_exec_ctx* exec_ctx) {
+ grpc_closure_sched(exec_ctx, &closure_, GRPC_ERROR_NONE);
+ }
+
+ void ScheduleFirstAgainstDifferentScheduler(
+ grpc_exec_ctx* exec_ctx, grpc_closure_scheduler* scheduler) {
+ grpc_closure_sched(exec_ctx, grpc_closure_create(Step, this, scheduler),
+ GRPC_ERROR_NONE);
+ }
+
+ private:
+ benchmark::State& state_;
+ grpc_closure closure_;
+
+ static void Step(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ Rescheduler* self = static_cast<Rescheduler*>(arg);
+ if (self->state_.KeepRunning()) {
+ grpc_closure_sched(exec_ctx, &self->closure_, GRPC_ERROR_NONE);
+ }
+ }
+};
+
+static void BM_ClosureReschedOnExecCtx(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ Rescheduler r(state, grpc_schedule_on_exec_ctx);
+ r.ScheduleFirst(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureReschedOnExecCtx);
+
+static void BM_ClosureReschedOnCombiner(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ Rescheduler r(state, grpc_combiner_scheduler(combiner, false));
+ r.ScheduleFirst(&exec_ctx);
+ grpc_exec_ctx_flush(&exec_ctx);
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureReschedOnCombiner);
+
+static void BM_ClosureReschedOnCombinerFinally(benchmark::State& state) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_combiner* combiner = grpc_combiner_create(NULL);
+ Rescheduler r(state, grpc_combiner_finally_scheduler(combiner, false));
+ r.ScheduleFirstAgainstDifferentScheduler(
+ &exec_ctx, grpc_combiner_scheduler(combiner, false));
+ grpc_exec_ctx_flush(&exec_ctx);
+ GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+BENCHMARK(BM_ClosureReschedOnCombinerFinally);
+
+BENCHMARK_MAIN();
diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc
index dd180de48a..c63de0ce0a 100644
--- a/test/cpp/microbenchmarks/bm_fullstack.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack.cc
@@ -46,6 +46,7 @@
extern "C" {
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/endpoint_pair.h"
@@ -54,9 +55,12 @@ extern "C" {
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
+#include "test/core/util/memory_counters.h"
#include "test/core/util/passthru_endpoint.h"
#include "test/core/util/port.h"
+#include "test/core/util/trickle_endpoint.h"
}
+#include "src/core/lib/profiling/timers.h"
#include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
@@ -67,6 +71,7 @@ namespace testing {
static class InitializeStuff {
public:
InitializeStuff() {
+ grpc_memory_counters_init();
init_lib_.init();
rq_ = grpc_resource_quota_create("bm");
}
@@ -94,7 +99,42 @@ static void ApplyCommonChannelArguments(ChannelArguments* c) {
c->SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, INT_MAX);
}
-class FullstackFixture {
+#ifdef GPR_MU_COUNTERS
+extern "C" gpr_atm grpc_mu_locks;
+#endif
+
+class BaseFixture {
+ public:
+ void Finish(benchmark::State& s) {
+ std::ostringstream out;
+ this->AddToLabel(out, s);
+#ifdef GPR_MU_COUNTERS
+ out << " locks/iter:" << ((double)(gpr_atm_no_barrier_load(&grpc_mu_locks) -
+ mu_locks_at_start_) /
+ (double)s.iterations());
+#endif
+ grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot();
+ out << " allocs/iter:"
+ << ((double)(counters_at_end.total_allocs_absolute -
+ counters_at_start_.total_allocs_absolute) /
+ (double)s.iterations());
+ auto label = out.str();
+ if (label.length() && label[0] == ' ') {
+ label = label.substr(1);
+ }
+ s.SetLabel(label);
+ }
+
+ virtual void AddToLabel(std::ostream& out, benchmark::State& s) = 0;
+
+ private:
+#ifdef GPR_MU_COUNTERS
+ const size_t mu_locks_at_start_ = gpr_atm_no_barrier_load(&grpc_mu_locks);
+#endif
+ grpc_memory_counters counters_at_start_ = grpc_memory_counters_snapshot();
+};
+
+class FullstackFixture : public BaseFixture {
public:
FullstackFixture(Service* service, const grpc::string& address) {
ServerBuilder b;
@@ -130,7 +170,7 @@ class TCP : public FullstackFixture {
public:
TCP(Service* service) : FullstackFixture(service, MakeAddress()) {}
- void Finish(benchmark::State& state) {}
+ void AddToLabel(std::ostream& out, benchmark::State& state) {}
private:
static grpc::string MakeAddress() {
@@ -145,7 +185,7 @@ class UDS : public FullstackFixture {
public:
UDS(Service* service) : FullstackFixture(service, MakeAddress()) {}
- void Finish(benchmark::State& state) {}
+ void AddToLabel(std::ostream& out, benchmark::State& state) override {}
private:
static grpc::string MakeAddress() {
@@ -157,9 +197,10 @@ class UDS : public FullstackFixture {
}
};
-class EndpointPairFixture {
+class EndpointPairFixture : public BaseFixture {
public:
- EndpointPairFixture(Service* service, grpc_endpoint_pair endpoints) {
+ EndpointPairFixture(Service* service, grpc_endpoint_pair endpoints)
+ : endpoint_pair_(endpoints) {
ServerBuilder b;
cq_ = b.AddCompletionQueue(true);
b.RegisterService(service);
@@ -172,7 +213,7 @@ class EndpointPairFixture {
{
const grpc_channel_args* server_args =
grpc_server_get_channel_args(server_->c_server());
- grpc_transport* transport = grpc_create_chttp2_transport(
+ server_transport_ = grpc_create_chttp2_transport(
&exec_ctx, server_args, endpoints.server, 0 /* is_client */);
grpc_pollset** pollsets;
@@ -183,9 +224,9 @@ class EndpointPairFixture {
grpc_endpoint_add_to_pollset(&exec_ctx, endpoints.server, pollsets[i]);
}
- grpc_server_setup_transport(&exec_ctx, server_->c_server(), transport,
- NULL, server_args);
- grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
+ grpc_server_setup_transport(&exec_ctx, server_->c_server(),
+ server_transport_, NULL, server_args);
+ grpc_chttp2_transport_start_reading(&exec_ctx, server_transport_, NULL);
}
/* create channel */
@@ -195,12 +236,13 @@ class EndpointPairFixture {
ApplyCommonChannelArguments(&args);
grpc_channel_args c_args = args.c_channel_args();
- grpc_transport* transport =
+ client_transport_ =
grpc_create_chttp2_transport(&exec_ctx, &c_args, endpoints.client, 1);
- GPR_ASSERT(transport);
- grpc_channel* channel = grpc_channel_create(
- &exec_ctx, "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
- grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
+ GPR_ASSERT(client_transport_);
+ grpc_channel* channel =
+ grpc_channel_create(&exec_ctx, "target", &c_args,
+ GRPC_CLIENT_DIRECT_CHANNEL, client_transport_);
+ grpc_chttp2_transport_start_reading(&exec_ctx, client_transport_, NULL);
channel_ = CreateChannelInternal("", channel);
}
@@ -220,6 +262,11 @@ class EndpointPairFixture {
ServerCompletionQueue* cq() { return cq_.get(); }
std::shared_ptr<Channel> channel() { return channel_; }
+ protected:
+ grpc_endpoint_pair endpoint_pair_;
+ grpc_transport* client_transport_;
+ grpc_transport* server_transport_;
+
private:
std::unique_ptr<Server> server_;
std::unique_ptr<ServerCompletionQueue> cq_;
@@ -233,7 +280,7 @@ class SockPair : public EndpointPairFixture {
"test", initialize_stuff.rq(), 8192)) {
}
- void Finish(benchmark::State& state) {}
+ void AddToLabel(std::ostream& out, benchmark::State& state) {}
};
class InProcessCHTTP2 : public EndpointPairFixture {
@@ -241,11 +288,9 @@ class InProcessCHTTP2 : public EndpointPairFixture {
InProcessCHTTP2(Service* service)
: EndpointPairFixture(service, MakeEndpoints()) {}
- void Finish(benchmark::State& state) {
- std::ostringstream out;
- out << "writes/iteration:"
+ void AddToLabel(std::ostream& out, benchmark::State& state) {
+ out << " writes/iter:"
<< ((double)stats_.num_writes / (double)state.iterations());
- state.SetLabel(out.str());
}
private:
@@ -259,6 +304,75 @@ class InProcessCHTTP2 : public EndpointPairFixture {
}
};
+class TrickledCHTTP2 : public EndpointPairFixture {
+ public:
+ TrickledCHTTP2(Service* service, size_t megabits_per_second)
+ : EndpointPairFixture(service, MakeEndpoints(megabits_per_second)) {}
+
+ void AddToLabel(std::ostream& out, benchmark::State& state) {
+ out << " writes/iter:"
+ << ((double)stats_.num_writes / (double)state.iterations())
+ << " cli_transport_stalls/iter:"
+ << ((double)
+ client_stats_.streams_stalled_due_to_transport_flow_control /
+ (double)state.iterations())
+ << " cli_stream_stalls/iter:"
+ << ((double)client_stats_.streams_stalled_due_to_stream_flow_control /
+ (double)state.iterations())
+ << " svr_transport_stalls/iter:"
+ << ((double)
+ server_stats_.streams_stalled_due_to_transport_flow_control /
+ (double)state.iterations())
+ << " svr_stream_stalls/iter:"
+ << ((double)server_stats_.streams_stalled_due_to_stream_flow_control /
+ (double)state.iterations());
+ }
+
+ void Step() {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ size_t client_backlog =
+ grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client);
+ size_t server_backlog =
+ grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.server);
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ UpdateStats((grpc_chttp2_transport*)client_transport_, &client_stats_,
+ client_backlog);
+ UpdateStats((grpc_chttp2_transport*)server_transport_, &server_stats_,
+ server_backlog);
+ }
+
+ private:
+ grpc_passthru_endpoint_stats stats_;
+ struct Stats {
+ int streams_stalled_due_to_stream_flow_control = 0;
+ int streams_stalled_due_to_transport_flow_control = 0;
+ };
+ Stats client_stats_;
+ Stats server_stats_;
+
+ grpc_endpoint_pair MakeEndpoints(size_t kilobits) {
+ grpc_endpoint_pair p;
+ grpc_passthru_endpoint_create(&p.client, &p.server, initialize_stuff.rq(),
+ &stats_);
+ double bytes_per_second = 125.0 * kilobits;
+ p.client = grpc_trickle_endpoint_create(p.client, bytes_per_second);
+ p.server = grpc_trickle_endpoint_create(p.server, bytes_per_second);
+ return p;
+ }
+
+ void UpdateStats(grpc_chttp2_transport* t, Stats* s, size_t backlog) {
+ if (backlog == 0) {
+ if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != NULL) {
+ s->streams_stalled_due_to_stream_flow_control++;
+ }
+ if (t->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != NULL) {
+ s->streams_stalled_due_to_transport_flow_control++;
+ }
+ }
+ }
+};
+
/*******************************************************************************
* CONTEXT MUTATORS
*/
@@ -402,6 +516,7 @@ static void BM_UnaryPingPong(benchmark::State& state) {
std::unique_ptr<EchoTestService::Stub> stub(
EchoTestService::NewStub(fixture->channel()));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
recv_response.Clear();
ClientContext cli_ctx;
ClientContextMutator cli_ctx_mut(&cli_ctx);
@@ -583,6 +698,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
}
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(0)); // Start client send
response_rw.Read(&recv_request, tag(1)); // Start server recv
request_rw->Read(&recv_response, tag(2)); // Start client recv
@@ -655,6 +771,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
}
response_rw.Read(&recv_request, tag(0));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@@ -712,6 +829,7 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
}
request_rw->Read(&recv_response, tag(0));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
response_rw.Write(send_response, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@@ -738,6 +856,81 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
state.SetBytesProcessed(state.range(0) * state.iterations());
}
+static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok) {
+ while (true) {
+ switch (fixture->cq()->AsyncNext(
+ t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(100, GPR_TIMESPAN)))) {
+ case CompletionQueue::TIMEOUT:
+ fixture->Step();
+ break;
+ case CompletionQueue::SHUTDOWN:
+ GPR_ASSERT(false);
+ break;
+ case CompletionQueue::GOT_EVENT:
+ return;
+ }
+ }
+}
+
+static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
+ EchoTestService::AsyncService service;
+ std::unique_ptr<TrickledCHTTP2> fixture(
+ new TrickledCHTTP2(&service, state.range(1)));
+ {
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ if (state.range(0) > 0) {
+ send_response.set_message(std::string(state.range(0), 'a'));
+ }
+ Status recv_status;
+ ServerContext svr_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx);
+ service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(),
+ fixture->cq(), tag(0));
+ std::unique_ptr<EchoTestService::Stub> stub(
+ EchoTestService::NewStub(fixture->channel()));
+ ClientContext cli_ctx;
+ auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1));
+ int need_tags = (1 << 0) | (1 << 1);
+ void* t;
+ bool ok;
+ while (need_tags) {
+ TrickleCQNext(fixture.get(), &t, &ok);
+ GPR_ASSERT(ok);
+ int i = (int)(intptr_t)t;
+ GPR_ASSERT(need_tags & (1 << i));
+ need_tags &= ~(1 << i);
+ }
+ request_rw->Read(&recv_response, tag(0));
+ while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
+ response_rw.Write(send_response, tag(1));
+ while (true) {
+ TrickleCQNext(fixture.get(), &t, &ok);
+ if (t == tag(0)) {
+ request_rw->Read(&recv_response, tag(0));
+ } else if (t == tag(1)) {
+ break;
+ } else {
+ GPR_ASSERT(false);
+ }
+ }
+ }
+ response_rw.Finish(Status::OK, tag(1));
+ need_tags = (1 << 0) | (1 << 1);
+ while (need_tags) {
+ TrickleCQNext(fixture.get(), &t, &ok);
+ int i = (int)(intptr_t)t;
+ GPR_ASSERT(need_tags & (1 << i));
+ need_tags &= ~(1 << i);
+ }
+ }
+ fixture->Finish(state);
+ fixture.reset();
+ state.SetBytesProcessed(state.range(0) * state.iterations());
+}
+
/*******************************************************************************
* CONFIGURATIONS
*/
@@ -827,6 +1020,19 @@ BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, SockPair)
BENCHMARK_TEMPLATE(BM_PumpStreamServerToClient, InProcessCHTTP2)
->Range(0, 128 * 1024 * 1024);
+static void TrickleArgs(benchmark::internal::Benchmark* b) {
+ for (int i = 1; i <= 128 * 1024 * 1024; i *= 8) {
+ for (int j = 1; j <= 128 * 1024 * 1024; j *= 8) {
+ double expected_time =
+ static_cast<double>(14 + i) / (125.0 * static_cast<double>(j));
+ if (expected_time > 0.01) continue;
+ b->Args({i, j});
+ }
+ }
+}
+
+BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(TrickleArgs);
+
// Generate Args for StreamingPingPong benchmarks. Currently generates args for
// only "small streams" (i.e streams with 0, 1 or 2 messages)
static void StreamingPingPongArgs(benchmark::internal::Benchmark* b) {