diff options
Diffstat (limited to 'test/cpp/microbenchmarks')
-rw-r--r-- | test/cpp/microbenchmarks/bm_arena.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_call_create.cc | 276 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_chttp2_hpack.cc | 84 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_chttp2_transport.cc | 186 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_closure.cc | 16 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_cq.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_cq_multiple_threads.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_error.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_fullstack_trickle.cc | 9 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_metadata.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/bm_pollset.cc | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/fullstack_fixtures.h | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h | 2 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/helpers.cc | 15 | ||||
-rw-r--r-- | test/cpp/microbenchmarks/helpers.h | 2 |
15 files changed, 298 insertions, 306 deletions
diff --git a/test/cpp/microbenchmarks/bm_arena.cc b/test/cpp/microbenchmarks/bm_arena.cc index 165b74670d..5b7c611919 100644 --- a/test/cpp/microbenchmarks/bm_arena.cc +++ b/test/cpp/microbenchmarks/bm_arena.cc @@ -18,9 +18,7 @@ /* Benchmark arenas */ -extern "C" { #include "src/core/lib/support/arena.h" -} #include "test/cpp/microbenchmarks/helpers.h" #include "third_party/benchmark/include/benchmark/benchmark.h" diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index cf9a42e8c6..ec5c1275d1 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -29,7 +29,6 @@ #include <grpc/support/alloc.h> #include <grpc/support/string_util.h> -extern "C" { #include "src/core/ext/filters/client_channel/client_channel.h" #include "src/core/ext/filters/deadline/deadline_filter.h" #include "src/core/ext/filters/http/client/http_client_filter.h" @@ -43,15 +42,14 @@ extern "C" { #include "src/core/lib/profiling/timers.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/transport/transport_impl.h" -} #include "src/cpp/client/create_channel_internal.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "test/cpp/microbenchmarks/helpers.h" -auto &force_library_initialization = Library::get(); +auto& force_library_initialization = Library::get(); -void BM_Zalloc(benchmark::State &state) { +void BM_Zalloc(benchmark::State& state) { // speed of light for call creation is zalloc, so benchmark a few interesting // sizes TrackCounters track_counters; @@ -80,13 +78,13 @@ BENCHMARK(BM_Zalloc) class BaseChannelFixture { public: - BaseChannelFixture(grpc_channel *channel) : channel_(channel) {} + BaseChannelFixture(grpc_channel* channel) : channel_(channel) {} ~BaseChannelFixture() { grpc_channel_destroy(channel_); } - grpc_channel *channel() const { return channel_; } + grpc_channel* channel() const { return channel_; } private: - grpc_channel *const channel_; + grpc_channel* const channel_; }; class InsecureChannel : public BaseChannelFixture { @@ -104,12 +102,12 @@ class LameChannel : public BaseChannelFixture { }; template <class Fixture> -static void BM_CallCreateDestroy(benchmark::State &state) { +static void BM_CallCreateDestroy(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; - grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL); + grpc_completion_queue* cq = grpc_completion_queue_create_for_next(NULL); gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - void *method_hdl = + void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL); while (state.KeepRunning()) { grpc_call_unref(grpc_channel_create_registered_call( @@ -126,11 +124,11 @@ BENCHMARK_TEMPLATE(BM_CallCreateDestroy, LameChannel); //////////////////////////////////////////////////////////////////////////////// // Benchmarks isolating individual filters -static void *tag(int i) { - return reinterpret_cast<void *>(static_cast<intptr_t>(i)); +static void* tag(int i) { + return reinterpret_cast<void*>(static_cast<intptr_t>(i)); } -static void BM_LameChannelCallCreateCpp(benchmark::State &state) { +static void BM_LameChannelCallCreateCpp(benchmark::State& state) { TrackCounters track_counters; auto stub = grpc::testing::EchoTestService::NewStub(grpc::CreateChannelInternal( @@ -145,7 +143,7 @@ static void BM_LameChannelCallCreateCpp(benchmark::State &state) { grpc::ClientContext cli_ctx; auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq); reader->Finish(&recv_response, &recv_status, tag(0)); - void *t; + void* t; bool ok; GPR_ASSERT(cq.Next(&t, &ok)); GPR_ASSERT(ok); @@ -154,16 +152,16 @@ static void BM_LameChannelCallCreateCpp(benchmark::State &state) { } BENCHMARK(BM_LameChannelCallCreateCpp); -static void do_nothing(void *ignored) {} +static void do_nothing(void* ignored) {} -static void BM_LameChannelCallCreateCore(benchmark::State &state) { +static void BM_LameChannelCallCreateCore(benchmark::State& state) { TrackCounters track_counters; - grpc_channel *channel; - grpc_completion_queue *cq; + grpc_channel* channel; + grpc_completion_queue* cq; grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; - grpc_byte_buffer *response_payload_recv = NULL; + grpc_byte_buffer* response_payload_recv = NULL; grpc_status_code status; grpc_slice details; grpc::testing::EchoRequest send_request; @@ -173,22 +171,22 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) { channel = grpc_lame_client_channel_create( "localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah"); cq = grpc_completion_queue_create_for_next(NULL); - void *rc = grpc_channel_register_call( + void* rc = grpc_channel_register_call( channel, "/grpc.testing.EchoTestService/Echo", NULL, NULL); while (state.KeepRunning()) { GPR_TIMER_SCOPE("BenchmarkCycle", 0); - grpc_call *call = grpc_channel_create_registered_call( + grpc_call* call = grpc_channel_create_registered_call( channel, NULL, GRPC_PROPAGATE_DEFAULTS, cq, rc, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); grpc_metadata_array_init(&initial_metadata_recv); grpc_metadata_array_init(&trailing_metadata_recv); - grpc_byte_buffer *request_payload_send = + grpc_byte_buffer* request_payload_send = grpc_raw_byte_buffer_create(&send_request_slice, 1); // Fill in call ops grpc_op ops[6]; memset(ops, 0, sizeof(ops)); - grpc_op *op = ops; + grpc_op* op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op++; @@ -212,7 +210,7 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) { GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops, (size_t)(op - ops), - (void *)1, NULL)); + (void*)1, NULL)); grpc_event ev = grpc_completion_queue_next( cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN); @@ -230,14 +228,14 @@ static void BM_LameChannelCallCreateCore(benchmark::State &state) { } BENCHMARK(BM_LameChannelCallCreateCore); -static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) { +static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) { TrackCounters track_counters; - grpc_channel *channel; - grpc_completion_queue *cq; + grpc_channel* channel; + grpc_completion_queue* cq; grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; - grpc_byte_buffer *response_payload_recv = NULL; + grpc_byte_buffer* response_payload_recv = NULL; grpc_status_code status; grpc_slice details; grpc::testing::EchoRequest send_request; @@ -247,22 +245,22 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) { channel = grpc_lame_client_channel_create( "localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah"); cq = grpc_completion_queue_create_for_next(NULL); - void *rc = grpc_channel_register_call( + void* rc = grpc_channel_register_call( channel, "/grpc.testing.EchoTestService/Echo", NULL, NULL); while (state.KeepRunning()) { GPR_TIMER_SCOPE("BenchmarkCycle", 0); - grpc_call *call = grpc_channel_create_registered_call( + grpc_call* call = grpc_channel_create_registered_call( channel, NULL, GRPC_PROPAGATE_DEFAULTS, cq, rc, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); grpc_metadata_array_init(&initial_metadata_recv); grpc_metadata_array_init(&trailing_metadata_recv); - grpc_byte_buffer *request_payload_send = + grpc_byte_buffer* request_payload_send = grpc_raw_byte_buffer_create(&send_request_slice, 1); // Fill in call ops grpc_op ops[3]; memset(ops, 0, sizeof(ops)); - grpc_op *op = ops; + grpc_op* op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op++; @@ -273,7 +271,7 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) { op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops, (size_t)(op - ops), - (void *)0, NULL)); + (void*)0, NULL)); memset(ops, 0, sizeof(ops)); op = ops; op->op = GRPC_OP_RECV_INITIAL_METADATA; @@ -291,7 +289,7 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) { GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops, (size_t)(op - ops), - (void *)1, NULL)); + (void*)1, NULL)); grpc_event ev = grpc_completion_queue_next( cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN); @@ -313,31 +311,31 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State &state) { } BENCHMARK(BM_LameChannelCallCreateCoreSeparateBatch); -static void FilterDestroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void FilterDestroy(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { gpr_free(arg); } -static void DoNothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} +static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {} class FakeClientChannelFactory : public grpc_client_channel_factory { public: FakeClientChannelFactory() { vtable = &vtable_; } private: - static void NoRef(grpc_client_channel_factory *factory) {} - static void NoUnref(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory) {} - static grpc_subchannel *CreateSubchannel(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory, - const grpc_subchannel_args *args) { + static void NoRef(grpc_client_channel_factory* factory) {} + static void NoUnref(grpc_exec_ctx* exec_ctx, + grpc_client_channel_factory* factory) {} + static grpc_subchannel* CreateSubchannel(grpc_exec_ctx* exec_ctx, + grpc_client_channel_factory* factory, + const grpc_subchannel_args* args) { return nullptr; } - static grpc_channel *CreateClientChannel(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory, - const char *target, + static grpc_channel* CreateClientChannel(grpc_exec_ctx* exec_ctx, + grpc_client_channel_factory* factory, + const char* target, grpc_client_channel_type type, - const grpc_channel_args *args) { + const grpc_channel_args* args) { return nullptr; } @@ -347,11 +345,11 @@ class FakeClientChannelFactory : public grpc_client_channel_factory { const grpc_client_channel_factory_vtable FakeClientChannelFactory::vtable_ = { NoRef, NoUnref, CreateSubchannel, CreateClientChannel}; -static grpc_arg StringArg(const char *key, const char *value) { +static grpc_arg StringArg(const char* key, const char* value) { grpc_arg a; a.type = GRPC_ARG_STRING; - a.key = const_cast<char *>(key); - a.value.string = const_cast<char *>(value); + a.key = const_cast<char*>(key); + a.value.string = const_cast<char*>(value); return a; } @@ -360,45 +358,45 @@ enum FixtureFlags : uint32_t { REQUIRES_TRANSPORT = 2, }; -template <const grpc_channel_filter *kFilter, uint32_t kFlags> +template <const grpc_channel_filter* kFilter, uint32_t kFlags> struct Fixture { - const grpc_channel_filter *filter = kFilter; + const grpc_channel_filter* filter = kFilter; const uint32_t flags = kFlags; }; namespace dummy_filter { -static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) {} +static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_transport_stream_op_batch* op) {} -static void StartTransportOp(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) {} +static void StartTransportOp(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_transport_op* op) {} -static grpc_error *InitCallElem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { +static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + const grpc_call_element_args* args) { return GRPC_ERROR_NONE; } -static void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent) {} +static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_polling_entity* pollent) {} -static void DestroyCallElem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_sched_closure) {} +static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_sched_closure) {} -grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_channel_element_args *args) { +grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, + grpc_channel_element_args* args) { return GRPC_ERROR_NONE; } -void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} +void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {} -void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - const grpc_channel_info *channel_info) {} +void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, + const grpc_channel_info* channel_info) {} static const grpc_channel_filter dummy_filter = {StartTransportStreamOp, StartTransportOp, @@ -421,42 +419,42 @@ namespace dummy_transport { size_t sizeof_stream; /* = sizeof(transport stream) */ /* name of this transport implementation */ -const char *name; +const char* name; /* implementation of grpc_transport_init_stream */ -int InitStream(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena) { +int InitStream(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_stream* stream, grpc_stream_refcount* refcount, + const void* server_data, gpr_arena* arena) { return 0; } /* implementation of grpc_transport_set_pollset */ -void SetPollset(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset *pollset) {} +void SetPollset(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_stream* stream, grpc_pollset* pollset) {} /* implementation of grpc_transport_set_pollset */ -void SetPollsetSet(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset_set *pollset_set) {} +void SetPollsetSet(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_stream* stream, grpc_pollset_set* pollset_set) {} /* implementation of grpc_transport_perform_stream_op */ -void PerformStreamOp(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_transport_stream_op_batch *op) { +void PerformStreamOp(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_stream* stream, grpc_transport_stream_op_batch* op) { GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE); } /* implementation of grpc_transport_perform_op */ -void PerformOp(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_transport_op *op) {} +void PerformOp(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_transport_op* op) {} /* implementation of grpc_transport_destroy_stream */ -void DestroyStream(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_closure *then_sched_closure) {} +void DestroyStream(grpc_exec_ctx* exec_ctx, grpc_transport* self, + grpc_stream* stream, grpc_closure* then_sched_closure) {} /* implementation of grpc_transport_destroy */ -void Destroy(grpc_exec_ctx *exec_ctx, grpc_transport *self) {} +void Destroy(grpc_exec_ctx* exec_ctx, grpc_transport* self) {} /* implementation of grpc_transport_get_endpoint */ -grpc_endpoint *GetEndpoint(grpc_exec_ctx *exec_ctx, grpc_transport *self) { +grpc_endpoint* GetEndpoint(grpc_exec_ctx* exec_ctx, grpc_transport* self) { return nullptr; } @@ -474,8 +472,8 @@ class NoOp { public: class Op { public: - Op(grpc_exec_ctx *exec_ctx, NoOp *p, grpc_call_stack *s) {} - void Finish(grpc_exec_ctx *exec_ctx) {} + Op(grpc_exec_ctx* exec_ctx, NoOp* p, grpc_call_stack* s) {} + void Finish(grpc_exec_ctx* exec_ctx) {} }; }; @@ -491,11 +489,11 @@ class SendEmptyMetadata { class Op { public: - Op(grpc_exec_ctx *exec_ctx, SendEmptyMetadata *p, grpc_call_stack *s) { + Op(grpc_exec_ctx* exec_ctx, SendEmptyMetadata* p, grpc_call_stack* s) { grpc_metadata_batch_init(&batch_); p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_; } - void Finish(grpc_exec_ctx *exec_ctx) { + void Finish(grpc_exec_ctx* exec_ctx) { grpc_metadata_batch_destroy(exec_ctx, &batch_); } @@ -516,7 +514,7 @@ class SendEmptyMetadata { // Fixture<> template to specify this), and TestOp defines some unit of work to // perform on said filter. template <class Fixture, class TestOp> -static void BM_IsolatedFilter(benchmark::State &state) { +static void BM_IsolatedFilter(benchmark::State& state) { TrackCounters track_counters; Fixture fixture; std::ostringstream label; @@ -529,7 +527,7 @@ static void BM_IsolatedFilter(benchmark::State &state) { grpc_channel_args channel_args = {args.size(), &args[0]}; - std::vector<const grpc_channel_filter *> filters; + std::vector<const grpc_channel_filter*> filters; if (fixture.filter != nullptr) { filters.push_back(fixture.filter); } @@ -541,8 +539,8 @@ static void BM_IsolatedFilter(benchmark::State &state) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; size_t channel_size = grpc_channel_stack_size( filters.size() == 0 ? NULL : &filters[0], filters.size()); - grpc_channel_stack *channel_stack = - static_cast<grpc_channel_stack *>(gpr_zalloc(channel_size)); + grpc_channel_stack* channel_stack = + static_cast<grpc_channel_stack*>(gpr_zalloc(channel_size)); GPR_ASSERT(GRPC_LOG_IF_ERROR( "channel_stack_init", grpc_channel_stack_init(&exec_ctx, 1, FilterDestroy, channel_stack, @@ -552,8 +550,8 @@ static void BM_IsolatedFilter(benchmark::State &state) { : nullptr, "CHANNEL", channel_stack))); grpc_exec_ctx_flush(&exec_ctx); - grpc_call_stack *call_stack = static_cast<grpc_call_stack *>( - gpr_zalloc(channel_stack->call_stack_size)); + grpc_call_stack* call_stack = + static_cast<grpc_call_stack*>(gpr_zalloc(channel_stack->call_stack_size)); grpc_millis deadline = GRPC_MILLIS_INF_FUTURE; gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC); grpc_slice method = grpc_slice_from_static_string("/foo/bar"); @@ -630,12 +628,14 @@ BENCHMARK_TEMPLATE(BM_IsolatedFilter, LoadReportingFilter, SendEmptyMetadata); namespace isolated_call_filter { -typedef struct { grpc_call_combiner *call_combiner; } call_data; +typedef struct { + grpc_call_combiner* call_combiner; +} call_data; -static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = static_cast<call_data *>(elem->call_data); +static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_transport_stream_op_batch* op) { + call_data* calld = static_cast<call_data*>(elem->call_data); if (op->recv_initial_metadata) { GRPC_CALL_COMBINER_START( exec_ctx, calld->call_combiner, @@ -650,42 +650,42 @@ static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx, GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE); } -static void StartTransportOp(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) { +static void StartTransportOp(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_transport_op* op) { if (op->disconnect_with_error != GRPC_ERROR_NONE) { GRPC_ERROR_UNREF(op->disconnect_with_error); } GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); } -static grpc_error *InitCallElem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = static_cast<call_data *>(elem->call_data); +static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast<call_data*>(elem->call_data); calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } -static void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent) {} +static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_polling_entity* pollent) {} -static void DestroyCallElem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_sched_closure) { +static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_sched_closure) { GRPC_CLOSURE_SCHED(exec_ctx, then_sched_closure, GRPC_ERROR_NONE); } -grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_channel_element_args *args) { +grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, + grpc_channel_element_args* args) { return GRPC_ERROR_NONE; } -void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} +void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {} -void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - const grpc_channel_info *channel_info) {} +void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, + const grpc_channel_info* channel_info) {} static const grpc_channel_filter isolated_call_filter = { StartTransportStreamOp, @@ -704,7 +704,7 @@ static const grpc_channel_filter isolated_call_filter = { class IsolatedCallFixture : public TrackCounters { public: IsolatedCallFixture() { - grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); + grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder_set_name(builder, "dummy"); grpc_channel_stack_builder_set_target(builder, "dummy_target"); GPR_ASSERT(grpc_channel_stack_builder_append_filter( @@ -718,24 +718,24 @@ class IsolatedCallFixture : public TrackCounters { cq_ = grpc_completion_queue_create_for_next(NULL); } - void Finish(benchmark::State &state) { + void Finish(benchmark::State& state) { grpc_completion_queue_destroy(cq_); grpc_channel_destroy(channel_); TrackCounters::Finish(state); } - grpc_channel *channel() const { return channel_; } - grpc_completion_queue *cq() const { return cq_; } + grpc_channel* channel() const { return channel_; } + grpc_completion_queue* cq() const { return cq_; } private: - grpc_completion_queue *cq_; - grpc_channel *channel_; + grpc_completion_queue* cq_; + grpc_channel* channel_; }; -static void BM_IsolatedCall_NoOp(benchmark::State &state) { +static void BM_IsolatedCall_NoOp(benchmark::State& state) { IsolatedCallFixture fixture; gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - void *method_hdl = + void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL); while (state.KeepRunning()) { GPR_TIMER_SCOPE("BenchmarkCycle", 0); @@ -747,14 +747,14 @@ static void BM_IsolatedCall_NoOp(benchmark::State &state) { } BENCHMARK(BM_IsolatedCall_NoOp); -static void BM_IsolatedCall_Unary(benchmark::State &state) { +static void BM_IsolatedCall_Unary(benchmark::State& state) { IsolatedCallFixture fixture; gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - void *method_hdl = + void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL); grpc_slice slice = grpc_slice_from_static_string("hello world"); - grpc_byte_buffer *send_message = grpc_raw_byte_buffer_create(&slice, 1); - grpc_byte_buffer *recv_message = NULL; + grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1); + grpc_byte_buffer* recv_message = NULL; grpc_status_code status_code; grpc_slice status_details = grpc_empty_slice(); grpc_metadata_array recv_initial_metadata; @@ -778,7 +778,7 @@ static void BM_IsolatedCall_Unary(benchmark::State &state) { ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata; while (state.KeepRunning()) { GPR_TIMER_SCOPE("BenchmarkCycle", 0); - grpc_call *call = grpc_channel_create_registered_call( + grpc_call* call = grpc_channel_create_registered_call( fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(), method_hdl, deadline, NULL); grpc_call_start_batch(call, ops, 6, tag(1), NULL); @@ -793,13 +793,13 @@ static void BM_IsolatedCall_Unary(benchmark::State &state) { } BENCHMARK(BM_IsolatedCall_Unary); -static void BM_IsolatedCall_StreamingSend(benchmark::State &state) { +static void BM_IsolatedCall_StreamingSend(benchmark::State& state) { IsolatedCallFixture fixture; gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - void *method_hdl = + void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar", NULL, NULL); grpc_slice slice = grpc_slice_from_static_string("hello world"); - grpc_byte_buffer *send_message = grpc_raw_byte_buffer_create(&slice, 1); + grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1); grpc_metadata_array recv_initial_metadata; grpc_metadata_array_init(&recv_initial_metadata); grpc_metadata_array recv_trailing_metadata; @@ -810,7 +810,7 @@ static void BM_IsolatedCall_StreamingSend(benchmark::State &state) { ops[1].op = GRPC_OP_RECV_INITIAL_METADATA; ops[1].data.recv_initial_metadata.recv_initial_metadata = &recv_initial_metadata; - grpc_call *call = grpc_channel_create_registered_call( + grpc_call* call = grpc_channel_create_registered_call( fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(), method_hdl, deadline, NULL); grpc_call_start_batch(call, ops, 2, tag(1), NULL); diff --git a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc index bc2157b9f1..45f5382fde 100644 --- a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc +++ b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc @@ -22,22 +22,22 @@ #include <grpc/support/log.h> #include <string.h> #include <sstream> -extern "C" { + #include "src/core/ext/transport/chttp2/transport/hpack_encoder.h" #include "src/core/ext/transport/chttp2/transport/hpack_parser.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" #include "src/core/lib/transport/static_metadata.h" #include "src/core/lib/transport/timeout_encoding.h" -} + #include "test/cpp/microbenchmarks/helpers.h" #include "third_party/benchmark/include/benchmark/benchmark.h" -auto &force_library_initialization = Library::get(); +auto& force_library_initialization = Library::get(); static grpc_slice MakeSlice(std::vector<uint8_t> bytes) { grpc_slice s = grpc_slice_malloc(bytes.size()); - uint8_t *p = GRPC_SLICE_START_PTR(s); + uint8_t* p = GRPC_SLICE_START_PTR(s); for (auto b : bytes) { *p++ = b; } @@ -48,7 +48,7 @@ static grpc_slice MakeSlice(std::vector<uint8_t> bytes) { // HPACK encoder // -static void BM_HpackEncoderInitDestroy(benchmark::State &state) { +static void BM_HpackEncoderInitDestroy(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_chttp2_hpack_compressor c; @@ -62,7 +62,7 @@ static void BM_HpackEncoderInitDestroy(benchmark::State &state) { } BENCHMARK(BM_HpackEncoderInitDestroy); -static void BM_HpackEncoderEncodeDeadline(benchmark::State &state) { +static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_millis saved_now = grpc_exec_ctx_now(&exec_ctx); @@ -95,17 +95,19 @@ static void BM_HpackEncoderEncodeDeadline(benchmark::State &state) { grpc_exec_ctx_finish(&exec_ctx); std::ostringstream label; - label << "framing_bytes/iter:" << (static_cast<double>(stats.framing_bytes) / - static_cast<double>(state.iterations())) - << " header_bytes/iter:" << (static_cast<double>(stats.header_bytes) / - static_cast<double>(state.iterations())); + label << "framing_bytes/iter:" + << (static_cast<double>(stats.framing_bytes) / + static_cast<double>(state.iterations())) + << " header_bytes/iter:" + << (static_cast<double>(stats.header_bytes) / + static_cast<double>(state.iterations())); track_counters.AddLabel(label.str()); track_counters.Finish(state); } BENCHMARK(BM_HpackEncoderEncodeDeadline); template <class Fixture> -static void BM_HpackEncoderEncodeHeader(benchmark::State &state) { +static void BM_HpackEncoderEncodeHeader(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; static bool logged_representative_output = false; @@ -138,7 +140,7 @@ static void BM_HpackEncoderEncodeHeader(benchmark::State &state) { if (!logged_representative_output && state.iterations() > 3) { logged_representative_output = true; for (size_t i = 0; i < outbuf.count; i++) { - char *s = grpc_dump_slice(outbuf.slices[i], GPR_DUMP_HEX); + char* s = grpc_dump_slice(outbuf.slices[i], GPR_DUMP_HEX); gpr_log(GPR_DEBUG, "%" PRIdPTR ": %s", i, s); gpr_free(s); } @@ -152,10 +154,12 @@ static void BM_HpackEncoderEncodeHeader(benchmark::State &state) { grpc_exec_ctx_finish(&exec_ctx); std::ostringstream label; - label << "framing_bytes/iter:" << (static_cast<double>(stats.framing_bytes) / - static_cast<double>(state.iterations())) - << " header_bytes/iter:" << (static_cast<double>(stats.header_bytes) / - static_cast<double>(state.iterations())); + label << "framing_bytes/iter:" + << (static_cast<double>(stats.framing_bytes) / + static_cast<double>(state.iterations())) + << " header_bytes/iter:" + << (static_cast<double>(stats.header_bytes) / + static_cast<double>(state.iterations())); track_counters.AddLabel(label.str()); track_counters.Finish(state); } @@ -165,7 +169,7 @@ namespace hpack_encoder_fixtures { class EmptyBatch { public: static constexpr bool kEnableTrueBinary = false; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {}; } }; @@ -173,7 +177,7 @@ class EmptyBatch { class SingleStaticElem { public: static constexpr bool kEnableTrueBinary = false; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE}; } }; @@ -181,7 +185,7 @@ class SingleStaticElem { class SingleInternedElem { public: static constexpr bool kEnableTrueBinary = false; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {grpc_mdelem_from_slices( exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")), grpc_slice_intern(grpc_slice_from_static_string("def")))}; @@ -192,7 +196,7 @@ template <int kLength, bool kTrueBinary> class SingleInternedBinaryElem { public: static constexpr bool kEnableTrueBinary = kTrueBinary; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { grpc_slice bytes = MakeBytes(); std::vector<grpc_mdelem> out = {grpc_mdelem_from_slices( exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc-bin")), @@ -214,7 +218,7 @@ class SingleInternedBinaryElem { class SingleInternedKeyElem { public: static constexpr bool kEnableTrueBinary = false; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {grpc_mdelem_from_slices( exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")), grpc_slice_from_static_string("def"))}; @@ -224,7 +228,7 @@ class SingleInternedKeyElem { class SingleNonInternedElem { public: static constexpr bool kEnableTrueBinary = false; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {grpc_mdelem_from_slices(exec_ctx, grpc_slice_from_static_string("abc"), grpc_slice_from_static_string("def"))}; @@ -235,7 +239,7 @@ template <int kLength, bool kTrueBinary> class SingleNonInternedBinaryElem { public: static constexpr bool kEnableTrueBinary = kTrueBinary; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {grpc_mdelem_from_slices( exec_ctx, grpc_slice_from_static_string("abc-bin"), MakeBytes())}; } @@ -253,9 +257,10 @@ class SingleNonInternedBinaryElem { class RepresentativeClientInitialMetadata { public: static constexpr bool kEnableTrueBinary = true; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return { - GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST, + GRPC_MDELEM_SCHEME_HTTP, + GRPC_MDELEM_METHOD_POST, grpc_mdelem_from_slices( exec_ctx, GRPC_MDSTR_PATH, grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))), @@ -278,9 +283,10 @@ class RepresentativeClientInitialMetadata { class MoreRepresentativeClientInitialMetadata { public: static constexpr bool kEnableTrueBinary = true; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return { - GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST, + GRPC_MDELEM_SCHEME_HTTP, + GRPC_MDELEM_METHOD_POST, grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, grpc_slice_intern(grpc_slice_from_static_string( "/grpc.test.FooService/BarMethod"))), @@ -314,7 +320,7 @@ class MoreRepresentativeClientInitialMetadata { class RepresentativeServerInitialMetadata { public: static constexpr bool kEnableTrueBinary = true; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {GRPC_MDELEM_STATUS_200, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC, GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP}; @@ -324,7 +330,7 @@ class RepresentativeServerInitialMetadata { class RepresentativeServerTrailingMetadata { public: static constexpr bool kEnableTrueBinary = true; - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return {GRPC_MDELEM_GRPC_STATUS_0}; } }; @@ -423,7 +429,7 @@ BENCHMARK_TEMPLATE(BM_HpackEncoderEncodeHeader, // HPACK parser // -static void BM_HpackParserInitDestroy(benchmark::State &state) { +static void BM_HpackParserInitDestroy(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_chttp2_hpack_parser p; @@ -437,13 +443,13 @@ static void BM_HpackParserInitDestroy(benchmark::State &state) { } BENCHMARK(BM_HpackParserInitDestroy); -static void UnrefHeader(grpc_exec_ctx *exec_ctx, void *user_data, +static void UnrefHeader(grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem md) { GRPC_MDELEM_UNREF(exec_ctx, md); } -template <class Fixture, void (*OnHeader)(grpc_exec_ctx *, void *, grpc_mdelem)> -static void BM_HpackParserParseHeader(benchmark::State &state) { +template <class Fixture, void (*OnHeader)(grpc_exec_ctx*, void*, grpc_mdelem)> +static void BM_HpackParserParseHeader(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; std::vector<grpc_slice> init_slices = Fixture::GetInitSlices(); @@ -760,20 +766,20 @@ class RepresentativeServerTrailingMetadata { } }; -static void free_timeout(void *p) { gpr_free(p); } +static void free_timeout(void* p) { gpr_free(p); } // New implementation. -static void OnHeaderNew(grpc_exec_ctx *exec_ctx, void *user_data, +static void OnHeaderNew(grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem md) { if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) { - grpc_millis *cached_timeout = - static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout)); + grpc_millis* cached_timeout = + static_cast<grpc_millis*>(grpc_mdelem_get_user_data(md, free_timeout)); grpc_millis timeout; if (cached_timeout != NULL) { timeout = *cached_timeout; } else { if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) { - char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val); gpr_free(val); timeout = GRPC_MILLIS_INF_FUTURE; @@ -781,7 +787,7 @@ static void OnHeaderNew(grpc_exec_ctx *exec_ctx, void *user_data, if (GRPC_MDELEM_IS_INTERNED(md)) { /* not already parsed: parse it now, and store the * result away */ - cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis)); + cached_timeout = (grpc_millis*)gpr_malloc(sizeof(grpc_millis)); *cached_timeout = timeout; grpc_mdelem_set_user_data(md, free_timeout, cached_timeout); } diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc index e9f537faa4..154cc91778 100644 --- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc +++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc @@ -35,7 +35,7 @@ #include "test/cpp/microbenchmarks/helpers.h" #include "third_party/benchmark/include/benchmark/benchmark.h" -auto &force_library_initialization = Library::get(); +auto& force_library_initialization = Library::get(); //////////////////////////////////////////////////////////////////////////////// // Helper classes @@ -58,7 +58,7 @@ class DummyEndpoint : public grpc_endpoint { ru_ = grpc_resource_user_create(Library::get().rq(), "dummy_endpoint"); } - void PushInput(grpc_exec_ctx *exec_ctx, grpc_slice slice) { + void PushInput(grpc_exec_ctx* exec_ctx, grpc_slice slice) { if (read_cb_ == nullptr) { GPR_ASSERT(!have_slice_); buffered_slice_ = slice; @@ -71,14 +71,14 @@ class DummyEndpoint : public grpc_endpoint { } private: - grpc_resource_user *ru_; - grpc_closure *read_cb_ = nullptr; - grpc_slice_buffer *slices_ = nullptr; + grpc_resource_user* ru_; + grpc_closure* read_cb_ = nullptr; + grpc_slice_buffer* slices_ = nullptr; bool have_slice_ = false; grpc_slice buffered_slice_; - void QueueRead(grpc_exec_ctx *exec_ctx, grpc_slice_buffer *slices, - grpc_closure *cb) { + void QueueRead(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* slices, + grpc_closure* cb) { GPR_ASSERT(read_cb_ == nullptr); if (have_slice_) { have_slice_ = false; @@ -90,51 +90,50 @@ class DummyEndpoint : public grpc_endpoint { slices_ = slices; } - static void read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb) { - static_cast<DummyEndpoint *>(ep)->QueueRead(exec_ctx, slices, cb); + static void read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_slice_buffer* slices, grpc_closure* cb) { + static_cast<DummyEndpoint*>(ep)->QueueRead(exec_ctx, slices, cb); } - static void write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb) { + static void write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_slice_buffer* slices, grpc_closure* cb) { GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE); } - static grpc_workqueue *get_workqueue(grpc_endpoint *ep) { return NULL; } + static grpc_workqueue* get_workqueue(grpc_endpoint* ep) { return NULL; } - static void add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset) {} + static void add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_pollset* pollset) {} - static void add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pollset) {} + static void add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_pollset_set* pollset) {} - static void delete_from_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_endpoint *ep, - grpc_pollset_set *pollset) {} + static void delete_from_pollset_set(grpc_exec_ctx* exec_ctx, + grpc_endpoint* ep, + grpc_pollset_set* pollset) {} - static void shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { - grpc_resource_user_shutdown(exec_ctx, - static_cast<DummyEndpoint *>(ep)->ru_); - GRPC_CLOSURE_SCHED(exec_ctx, static_cast<DummyEndpoint *>(ep)->read_cb_, + static void shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, + grpc_error* why) { + grpc_resource_user_shutdown(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_); + GRPC_CLOSURE_SCHED(exec_ctx, static_cast<DummyEndpoint*>(ep)->read_cb_, why); } - static void destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { - grpc_resource_user_unref(exec_ctx, static_cast<DummyEndpoint *>(ep)->ru_); - delete static_cast<DummyEndpoint *>(ep); + static void destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { + grpc_resource_user_unref(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_); + delete static_cast<DummyEndpoint*>(ep); } - static grpc_resource_user *get_resource_user(grpc_endpoint *ep) { - return static_cast<DummyEndpoint *>(ep)->ru_; + static grpc_resource_user* get_resource_user(grpc_endpoint* ep) { + return static_cast<DummyEndpoint*>(ep)->ru_; } - static char *get_peer(grpc_endpoint *ep) { return gpr_strdup("test"); } - static int get_fd(grpc_endpoint *ep) { return 0; } + static char* get_peer(grpc_endpoint* ep) { return gpr_strdup("test"); } + static int get_fd(grpc_endpoint* ep) { return 0; } }; class Fixture { public: - Fixture(const grpc::ChannelArguments &args, bool client) { + Fixture(const grpc::ChannelArguments& args, bool client) { grpc_channel_args c_args = args.c_channel_args(); ep_ = new DummyEndpoint; t_ = grpc_create_chttp2_transport(exec_ctx(), &c_args, ep_, client); @@ -149,18 +148,18 @@ class Fixture { grpc_exec_ctx_finish(&exec_ctx_); } - grpc_chttp2_transport *chttp2_transport() { - return reinterpret_cast<grpc_chttp2_transport *>(t_); + grpc_chttp2_transport* chttp2_transport() { + return reinterpret_cast<grpc_chttp2_transport*>(t_); } - grpc_transport *transport() { return t_; } - grpc_exec_ctx *exec_ctx() { return &exec_ctx_; } + grpc_transport* transport() { return t_; } + grpc_exec_ctx* exec_ctx() { return &exec_ctx_; } void PushInput(grpc_slice slice) { ep_->PushInput(exec_ctx(), slice); } private: - DummyEndpoint *ep_; + DummyEndpoint* ep_; grpc_exec_ctx exec_ctx_ = GRPC_EXEC_CTX_INIT; - grpc_transport *t_; + grpc_transport* t_; }; class Closure : public grpc_closure { @@ -170,37 +169,37 @@ class Closure : public grpc_closure { template <class F> std::unique_ptr<Closure> MakeClosure( - F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) { + F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) { struct C : public Closure { - C(const F &f, grpc_closure_scheduler *sched) : f_(f) { + C(const F& f, grpc_closure_scheduler* sched) : f_(f) { GRPC_CLOSURE_INIT(this, Execute, this, sched); } F f_; - static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - static_cast<C *>(arg)->f_(exec_ctx, error); + static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { + static_cast<C*>(arg)->f_(exec_ctx, error); } }; return std::unique_ptr<Closure>(new C(f, sched)); } template <class F> -grpc_closure *MakeOnceClosure( - F f, grpc_closure_scheduler *sched = grpc_schedule_on_exec_ctx) { +grpc_closure* MakeOnceClosure( + F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) { struct C : public grpc_closure { - C(const F &f) : f_(f) {} + C(const F& f) : f_(f) {} F f_; - static void Execute(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - static_cast<C *>(arg)->f_(exec_ctx, error); - delete static_cast<C *>(arg); + static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { + static_cast<C*>(arg)->f_(exec_ctx, error); + delete static_cast<C*>(arg); } }; - auto *c = new C{f}; + auto* c = new C{f}; return GRPC_CLOSURE_INIT(c, C::Execute, c, sched); } class Stream { public: - Stream(Fixture *f) : f_(f) { + Stream(Fixture* f) : f_(f) { stream_size_ = grpc_transport_stream_size(f->transport()); stream_ = gpr_malloc(stream_size_); arena_ = gpr_arena_create(4096); @@ -212,7 +211,7 @@ class Stream { gpr_arena_destroy(arena_); } - void Init(benchmark::State &state) { + void Init(benchmark::State& state) { GRPC_STREAM_REF_INIT(&refcount_, 1, &Stream::FinishDestroy, this, "test_stream"); gpr_event_init(&done_); @@ -222,11 +221,11 @@ class Stream { arena_ = gpr_arena_create(4096); } grpc_transport_init_stream(f_->exec_ctx(), f_->transport(), - static_cast<grpc_stream *>(stream_), &refcount_, + static_cast<grpc_stream*>(stream_), &refcount_, NULL, arena_); } - void DestroyThen(grpc_exec_ctx *exec_ctx, grpc_closure *closure) { + void DestroyThen(grpc_exec_ctx* exec_ctx, grpc_closure* closure) { destroy_closure_ = closure; #ifndef NDEBUG grpc_stream_unref(exec_ctx, &refcount_, "DestroyThen"); @@ -235,31 +234,31 @@ class Stream { #endif } - void Op(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op) { + void Op(grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* op) { grpc_transport_perform_stream_op(exec_ctx, f_->transport(), - static_cast<grpc_stream *>(stream_), op); + static_cast<grpc_stream*>(stream_), op); } - grpc_chttp2_stream *chttp2_stream() { - return static_cast<grpc_chttp2_stream *>(stream_); + grpc_chttp2_stream* chttp2_stream() { + return static_cast<grpc_chttp2_stream*>(stream_); } private: - static void FinishDestroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - auto stream = static_cast<Stream *>(arg); + static void FinishDestroy(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { + auto stream = static_cast<Stream*>(arg); grpc_transport_destroy_stream(exec_ctx, stream->f_->transport(), - static_cast<grpc_stream *>(stream->stream_), + static_cast<grpc_stream*>(stream->stream_), stream->destroy_closure_); - gpr_event_set(&stream->done_, (void *)1); + gpr_event_set(&stream->done_, (void*)1); } - Fixture *f_; + Fixture* f_; grpc_stream_refcount refcount_; - gpr_arena *arena_; + gpr_arena* arena_; size_t stream_size_; - void *stream_; - grpc_closure *destroy_closure_ = nullptr; + void* stream_; + grpc_closure* destroy_closure_ = nullptr; gpr_event done_; }; @@ -267,7 +266,7 @@ class Stream { // Benchmarks // -static void BM_StreamCreateDestroy(benchmark::State &state) { +static void BM_StreamCreateDestroy(benchmark::State& state) { TrackCounters track_counters; Fixture f(grpc::ChannelArguments(), true); Stream s(&f); @@ -278,7 +277,7 @@ static void BM_StreamCreateDestroy(benchmark::State &state) { op.payload = &op_payload; op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; std::unique_ptr<Closure> next = - MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (!state.KeepRunning()) return; s.Init(state); s.Op(exec_ctx, &op); @@ -292,9 +291,10 @@ BENCHMARK(BM_StreamCreateDestroy); class RepresentativeClientInitialMetadata { public: - static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx *exec_ctx) { + static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) { return { - GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_METHOD_POST, + GRPC_MDELEM_SCHEME_HTTP, + GRPC_MDELEM_METHOD_POST, grpc_mdelem_from_slices( exec_ctx, GRPC_MDSTR_PATH, grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))), @@ -312,7 +312,7 @@ class RepresentativeClientInitialMetadata { }; template <class Metadata> -static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { +static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) { TrackCounters track_counters; Fixture f(grpc::ChannelArguments(), true); Stream s(&f); @@ -339,7 +339,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { } f.FlushExecCtx(); - start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (!state.KeepRunning()) return; s.Init(state); reset_op(); @@ -348,7 +348,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { op.payload->send_initial_metadata.send_initial_metadata = &b; s.Op(exec_ctx, &op); }); - done = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + done = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { reset_op(); op.cancel_stream = true; op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; @@ -363,7 +363,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { BENCHMARK_TEMPLATE(BM_StreamCreateSendInitialMetadataDestroy, RepresentativeClientInitialMetadata); -static void BM_TransportEmptyOp(benchmark::State &state) { +static void BM_TransportEmptyOp(benchmark::State& state) { TrackCounters track_counters; Fixture f(grpc::ChannelArguments(), true); Stream s(&f); @@ -376,7 +376,7 @@ static void BM_TransportEmptyOp(benchmark::State &state) { op.payload = &op_payload; }; std::unique_ptr<Closure> c = - MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (!state.KeepRunning()) return; reset_op(); op.on_complete = c.get(); @@ -388,8 +388,8 @@ static void BM_TransportEmptyOp(benchmark::State &state) { op.cancel_stream = true; op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; s.Op(f.exec_ctx(), &op); - s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx, - grpc_error *error) {})); + s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx, + grpc_error* error) {})); f.FlushExecCtx(); track_counters.Finish(state); } @@ -397,7 +397,7 @@ BENCHMARK(BM_TransportEmptyOp); std::vector<std::unique_ptr<gpr_event>> done_events; -static void BM_TransportStreamSend(benchmark::State &state) { +static void BM_TransportStreamSend(benchmark::State& state) { TrackCounters track_counters; Fixture f(grpc::ChannelArguments(), true); auto s = std::unique_ptr<Stream>(new Stream(&f)); @@ -428,13 +428,13 @@ static void BM_TransportStreamSend(benchmark::State &state) { grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i]))); } - gpr_event *bm_done = new gpr_event; + gpr_event* bm_done = new gpr_event; gpr_event_init(bm_done); std::unique_ptr<Closure> c = - MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (!state.KeepRunning()) { - gpr_event_set(bm_done, (void *)1); + gpr_event_set(bm_done, (void*)1); return; } // force outgoing window to be yuge @@ -462,8 +462,8 @@ static void BM_TransportStreamSend(benchmark::State &state) { op.cancel_stream = true; op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; s->Op(f.exec_ctx(), &op); - s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx, - grpc_error *error) {})); + s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx, + grpc_error* error) {})); f.FlushExecCtx(); s.reset(); track_counters.Finish(state); @@ -529,7 +529,7 @@ static grpc_slice CreateIncomingDataSlice(size_t length, size_t frame_size) { return grpc_slice_from_copied_buffer(framed.data(), framed.size()); } -static void BM_TransportStreamRecv(benchmark::State &state) { +static void BM_TransportStreamRecv(benchmark::State& state) { TrackCounters track_counters; Fixture f(grpc::ChannelArguments(), true); Stream s(&f); @@ -537,7 +537,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { grpc_transport_stream_op_batch_payload op_payload; memset(&op_payload, 0, sizeof(op_payload)); grpc_transport_stream_op_batch op; - grpc_byte_stream *recv_stream; + grpc_byte_stream* recv_stream; grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384); auto reset_op = [&]() { @@ -560,7 +560,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { } std::unique_ptr<Closure> do_nothing = - MakeClosure([](grpc_exec_ctx *exec_ctx, grpc_error *error) {}); + MakeClosure([](grpc_exec_ctx* exec_ctx, grpc_error* error) {}); uint32_t received; @@ -570,7 +570,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { grpc_slice recv_slice; std::unique_ptr<Closure> c = - MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (!state.KeepRunning()) return; // force outgoing window to be yuge s.chttp2_stream()->flow_control->TestOnlyForceHugeWindow(); @@ -585,7 +585,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { f.PushInput(grpc_slice_ref(incoming_data)); }); - drain_start = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + drain_start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { if (recv_stream == NULL) { GPR_ASSERT(!state.KeepRunning()); return; @@ -593,7 +593,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { GRPC_CLOSURE_RUN(exec_ctx, drain.get(), GRPC_ERROR_NONE); }); - drain = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + drain = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { do { if (received == recv_stream->length) { grpc_byte_stream_destroy(exec_ctx, recv_stream); @@ -609,7 +609,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { grpc_slice_unref_internal(exec_ctx, recv_slice), true)); }); - drain_continue = MakeClosure([&](grpc_exec_ctx *exec_ctx, grpc_error *error) { + drain_continue = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) { grpc_byte_stream_pull(exec_ctx, recv_stream, &recv_slice); received += GRPC_SLICE_LENGTH(recv_slice); grpc_slice_unref_internal(exec_ctx, recv_slice); @@ -643,8 +643,8 @@ static void BM_TransportStreamRecv(benchmark::State &state) { op.cancel_stream = true; op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; s.Op(f.exec_ctx(), &op); - s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx *exec_ctx, - grpc_error *error) {})); + s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx, + grpc_error* error) {})); f.FlushExecCtx(); track_counters.Finish(state); grpc_metadata_batch_destroy(f.exec_ctx(), &b); diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc index 41649b8a73..458a2ceb13 100644 --- a/test/cpp/microbenchmarks/bm_closure.cc +++ b/test/cpp/microbenchmarks/bm_closure.cc @@ -22,12 +22,10 @@ #include <grpc/grpc.h> #include <sstream> -extern "C" { #include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/spinlock.h" -} #include "test/cpp/microbenchmarks/helpers.h" @@ -100,9 +98,10 @@ static void BM_ClosureCreateAndRun(benchmark::State& state) { TrackCounters track_counters; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; while (state.KeepRunning()) { - GRPC_CLOSURE_RUN(&exec_ctx, GRPC_CLOSURE_CREATE(DoNothing, NULL, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_RUN( + &exec_ctx, + GRPC_CLOSURE_CREATE(DoNothing, NULL, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } grpc_exec_ctx_finish(&exec_ctx); track_counters.Finish(state); @@ -114,9 +113,10 @@ static void BM_ClosureInitAndRun(benchmark::State& state) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure c; while (state.KeepRunning()) { - GRPC_CLOSURE_RUN(&exec_ctx, GRPC_CLOSURE_INIT(&c, DoNothing, NULL, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_RUN( + &exec_ctx, + GRPC_CLOSURE_INIT(&c, DoNothing, NULL, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } grpc_exec_ctx_finish(&exec_ctx); track_counters.Finish(state); diff --git a/test/cpp/microbenchmarks/bm_cq.cc b/test/cpp/microbenchmarks/bm_cq.cc index 020ec0461c..dac702b08b 100644 --- a/test/cpp/microbenchmarks/bm_cq.cc +++ b/test/cpp/microbenchmarks/bm_cq.cc @@ -26,9 +26,7 @@ #include <grpc/support/log.h> #include "test/cpp/microbenchmarks/helpers.h" -extern "C" { #include "src/core/lib/surface/completion_queue.h" -} namespace grpc { namespace testing { diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc index 57a69acf01..8d4349a297 100644 --- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc +++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc @@ -25,11 +25,9 @@ #include <grpc/support/log.h> #include "test/cpp/microbenchmarks/helpers.h" -extern "C" { #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/port.h" #include "src/core/lib/surface/completion_queue.h" -} struct grpc_pollset { gpr_mu mu; diff --git a/test/cpp/microbenchmarks/bm_error.cc b/test/cpp/microbenchmarks/bm_error.cc index 56b80dfcf6..aa7822653f 100644 --- a/test/cpp/microbenchmarks/bm_error.cc +++ b/test/cpp/microbenchmarks/bm_error.cc @@ -21,10 +21,8 @@ #include <benchmark/benchmark.h> #include <memory> -extern "C" { #include "src/core/lib/iomgr/error.h" #include "src/core/lib/transport/error_utils.h" -} #include "test/cpp/microbenchmarks/helpers.h" diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc index 25d243a104..f75c3e4436 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc @@ -135,8 +135,9 @@ class TrickledCHTTP2 : public EndpointPairFixture { ? static_cast<grpc_chttp2_stream*>(server->stream_map.values[0]) : nullptr; write_csv( - log_.get(), static_cast<double>(now.tv_sec) + - 1e-9 * static_cast<double>(now.tv_nsec), + log_.get(), + static_cast<double>(now.tv_sec) + + 1e-9 * static_cast<double>(now.tv_nsec), iteration, grpc_trickle_get_backlog(endpoint_pair_.client), grpc_trickle_get_backlog(endpoint_pair_.server), client->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr, @@ -441,8 +442,8 @@ static void UnaryTrickleArgs(benchmark::internal::Benchmark* b) { } } BENCHMARK(BM_PumpUnbalancedUnary_Trickle)->Apply(UnaryTrickleArgs); -} -} +} // namespace testing +} // namespace grpc extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type); diff --git a/test/cpp/microbenchmarks/bm_metadata.cc b/test/cpp/microbenchmarks/bm_metadata.cc index 360bbabe13..1ed05f7466 100644 --- a/test/cpp/microbenchmarks/bm_metadata.cc +++ b/test/cpp/microbenchmarks/bm_metadata.cc @@ -21,10 +21,8 @@ #include <benchmark/benchmark.h> #include <grpc/grpc.h> -extern "C" { #include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/static_metadata.h" -} #include "test/cpp/microbenchmarks/helpers.h" diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc index eab1e89480..92d76f307e 100644 --- a/test/cpp/microbenchmarks/bm_pollset.cc +++ b/test/cpp/microbenchmarks/bm_pollset.cc @@ -23,12 +23,10 @@ #include <grpc/support/log.h> #include <grpc/support/useful.h> -extern "C" { #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -} #include "test/cpp/microbenchmarks/helpers.h" #include "third_party/benchmark/include/benchmark/benchmark.h" diff --git a/test/cpp/microbenchmarks/fullstack_fixtures.h b/test/cpp/microbenchmarks/fullstack_fixtures.h index 9d345a909b..71bbb393db 100644 --- a/test/cpp/microbenchmarks/fullstack_fixtures.h +++ b/test/cpp/microbenchmarks/fullstack_fixtures.h @@ -28,7 +28,6 @@ #include <grpc/support/atm.h> #include <grpc/support/log.h> -extern "C" { #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/endpoint.h" @@ -40,7 +39,6 @@ extern "C" { #include "src/core/lib/surface/server.h" #include "test/core/util/passthru_endpoint.h" #include "test/core/util/port.h" -} #include "src/cpp/client/create_channel_internal.h" #include "test/cpp/microbenchmarks/helpers.h" diff --git a/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h index 06d18b890d..0763d07855 100644 --- a/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h +++ b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h @@ -278,7 +278,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) { void* t; bool ok; - int expect_tags; + int expect_tags = 0; // Send 'max_ping_pongs' number of ping pong messages int ping_pong_cnt = 0; diff --git a/test/cpp/microbenchmarks/helpers.cc b/test/cpp/microbenchmarks/helpers.cc index 782f12e99a..a4c0a3a0ce 100644 --- a/test/cpp/microbenchmarks/helpers.cc +++ b/test/cpp/microbenchmarks/helpers.cc @@ -20,9 +20,9 @@ #include "test/cpp/microbenchmarks/helpers.h" -void TrackCounters::Finish(benchmark::State &state) { +void TrackCounters::Finish(benchmark::State& state) { std::ostringstream out; - for (const auto &l : labels_) { + for (const auto& l : labels_) { out << l << ' '; } AddToLabel(out, state); @@ -33,11 +33,11 @@ void TrackCounters::Finish(benchmark::State &state) { state.SetLabel(label.c_str()); } -void TrackCounters::AddLabel(const grpc::string &label) { +void TrackCounters::AddLabel(const grpc::string& label) { labels_.push_back(label); } -void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) { +void TrackCounters::AddToLabel(std::ostream& out, benchmark::State& state) { grpc_stats_data stats_end; grpc_stats_collect(&stats_end); grpc_stats_data stats; @@ -58,9 +58,10 @@ void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) { } #ifdef GPR_LOW_LEVEL_COUNTERS grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot(); - out << " locks/iter:" << ((double)(gpr_atm_no_barrier_load(&gpr_mu_locks) - - mu_locks_at_start_) / - (double)state.iterations()) + out << " locks/iter:" + << ((double)(gpr_atm_no_barrier_load(&gpr_mu_locks) - + mu_locks_at_start_) / + (double)state.iterations()) << " atm_cas/iter:" << ((double)(gpr_atm_no_barrier_load(&gpr_counter_atm_cas) - atm_cas_at_start_) / diff --git a/test/cpp/microbenchmarks/helpers.h b/test/cpp/microbenchmarks/helpers.h index b6cea7c317..07be589df6 100644 --- a/test/cpp/microbenchmarks/helpers.h +++ b/test/cpp/microbenchmarks/helpers.h @@ -22,11 +22,9 @@ #include <sstream> #include <vector> -extern "C" { #include <grpc/support/port_platform.h> #include "src/core/lib/debug/stats.h" #include "test/core/util/memory_counters.h" -} #include <benchmark/benchmark.h> #include <grpc++/impl/grpc_library.h> |