aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
authorGravatar Mark D. Roth <roth@google.com>2017-09-25 12:36:30 -0700
committerGravatar Mark D. Roth <roth@google.com>2017-09-25 12:36:30 -0700
commitb4c01f9e2982d61723014f76c42fc059c554b084 (patch)
tree7ac46ab54a86744ae26317a2365e0f9fe8647812 /src/core/lib
parentad9208c07e74d90dd5110324fe9cb830e6e6f68b (diff)
parent008a173a7e2ba1d5c0933aa7a77395945ba2024d (diff)
Merge remote-tracking branch 'upstream/master' into plugin_credentials_api_fix
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/channel/channel_args.c34
-rw-r--r--src/core/lib/channel/channel_stack.h2
-rw-r--r--src/core/lib/channel/channel_stack_builder.c22
-rw-r--r--src/core/lib/channel/connected_channel.c25
-rw-r--r--src/core/lib/channel/handshaker.c14
-rw-r--r--src/core/lib/channel/handshaker_registry.c2
-rw-r--r--src/core/lib/compression/compression.c4
-rw-r--r--src/core/lib/compression/stream_compression.c178
-rw-r--r--src/core/lib/compression/stream_compression.h32
-rw-r--r--src/core/lib/compression/stream_compression_gzip.c228
-rw-r--r--src/core/lib/compression/stream_compression_gzip.h26
-rw-r--r--src/core/lib/compression/stream_compression_identity.c94
-rw-r--r--src/core/lib/compression/stream_compression_identity.h27
-rw-r--r--src/core/lib/debug/stats.c2
-rw-r--r--src/core/lib/debug/stats_data.c514
-rw-r--r--src/core/lib/debug/stats_data.h346
-rw-r--r--src/core/lib/debug/stats_data.yaml184
-rw-r--r--src/core/lib/debug/stats_data_bq_schema.sql90
-rw-r--r--src/core/lib/debug/trace.h2
-rw-r--r--src/core/lib/http/format_request.c2
-rw-r--r--src/core/lib/http/httpcli.c15
-rw-r--r--src/core/lib/http/httpcli_security_connector.c3
-rw-r--r--src/core/lib/http/parser.c7
-rw-r--r--src/core/lib/iomgr/closure.c22
-rw-r--r--src/core/lib/iomgr/combiner.c6
-rw-r--r--src/core/lib/iomgr/error.c50
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c295
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c251
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.c64
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c120
-rw-r--r--src/core/lib/iomgr/ev_posix.c4
-rw-r--r--src/core/lib/iomgr/ev_posix.h2
-rw-r--r--src/core/lib/iomgr/executor.c222
-rw-r--r--src/core/lib/iomgr/executor.h7
-rw-r--r--src/core/lib/iomgr/iomgr.c2
-rw-r--r--src/core/lib/iomgr/is_epollexclusive_available.c12
-rw-r--r--src/core/lib/iomgr/polling_entity.c18
-rw-r--r--src/core/lib/iomgr/polling_entity.h8
-rw-r--r--src/core/lib/iomgr/pollset.h2
-rw-r--r--src/core/lib/iomgr/pollset_uv.c2
-rw-r--r--src/core/lib/iomgr/pollset_windows.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c4
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c2
-rw-r--r--src/core/lib/iomgr/resource_quota.c7
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.c8
-rw-r--r--src/core/lib/iomgr/socket_mutator.c8
-rw-r--r--src/core/lib/iomgr/tcp_posix.c177
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c4
-rw-r--r--src/core/lib/iomgr/timer_generic.c5
-rw-r--r--src/core/lib/iomgr/timer_manager.c2
-rw-r--r--src/core/lib/iomgr/udp_server.c2
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.c14
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.h4
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c3
-rw-r--r--src/core/lib/security/transport/security_connector.c22
-rw-r--r--src/core/lib/security/transport/security_handshaker.c38
-rw-r--r--src/core/lib/slice/slice.c4
-rw-r--r--src/core/lib/support/log_linux.c2
-rw-r--r--src/core/lib/support/string.c2
-rw-r--r--src/core/lib/surface/call.c108
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/channel.c8
-rw-r--r--src/core/lib/surface/completion_queue.c86
-rw-r--r--src/core/lib/surface/init.c9
-rw-r--r--src/core/lib/surface/server.c48
-rw-r--r--src/core/lib/transport/metadata_batch.c32
-rw-r--r--src/core/lib/transport/static_metadata.c816
-rw-r--r--src/core/lib/transport/status_conversion.c2
-rw-r--r--src/core/lib/transport/transport.c10
-rw-r--r--src/core/lib/transport/transport_op_string.c2
70 files changed, 2910 insertions, 1467 deletions
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 02db798b5c..30248b3c60 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
- grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *dst =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
+ dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
- grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
+ grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
- const grpc_arg *const *a = ap;
- const grpc_arg *const *b = bp;
+ const grpc_arg *const *a = (const grpc_arg *const *)ap;
+ const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
- grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
- grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *b =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
- b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
@@ -210,7 +212,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
- if (a == NULL) return 0;
+ if (a == NULL) return GRPC_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, a->args[i].key)) {
@@ -224,7 +226,7 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
grpc_stream_compression_algorithm
grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
size_t i;
- if (a == NULL) return 0;
+ if (a == NULL) return GRPC_STREAM_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
@@ -241,7 +243,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -251,7 +253,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -306,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The "
@@ -322,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
@@ -347,7 +349,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
state == 0) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
0);
gpr_log(GPR_ERROR,
@@ -364,7 +366,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index ae1cac31f7..f0de80f0c0 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -281,7 +281,7 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
/* Given the top element of a call stack, get the call stack itself */
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index 2c991ea960..b663ebfb52 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
+ grpc_channel_stack_builder *b =
+ (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
- grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
+ grpc_channel_stack_builder_iterator *it =
+ (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@@ -212,13 +214,13 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
- filter_node *new = (filter_node *)gpr_malloc(sizeof(*new));
- new->next = before->next;
- new->prev = before;
- new->next->prev = new->prev->next = new;
- new->filter = filter;
- new->init = post_init_func;
- new->init_arg = user_data;
+ filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node));
+ new_node->next = before->next;
+ new_node->prev = before;
+ new_node->next->prev = new_node->prev->next = new_node;
+ new_node->filter = filter;
+ new_node->init = post_init_func;
+ new_node->init_arg = user_data;
}
bool grpc_channel_stack_builder_add_filter_before(
@@ -266,7 +268,7 @@ grpc_error *grpc_channel_stack_builder_finish(
// create an array of filters
const grpc_channel_filter **filters =
- gpr_malloc(sizeof(*filters) * num_filters);
+ (const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters);
size_t i = 0;
for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
filters[i++] = p->filter;
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 8285226fc4..4f37908958 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -100,8 +100,8 @@ static callback_state *get_state_for_batch(
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (batch->recv_initial_metadata) {
callback_state *state = &calld->recv_initial_metadata_ready;
intercept_callback(
@@ -136,7 +136,7 @@ static void con_start_transport_stream_op_batch(
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@@ -144,8 +144,8 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
@@ -158,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@@ -168,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@@ -218,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = t;
+ cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -226,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
- channel_stack->call_stack_size += grpc_transport_stream_size(t);
+ channel_stack->call_stack_size +=
+ grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@@ -240,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 2cb83f4114..1753da5721 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
- grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
+ grpc_handshake_manager* mgr =
+ (grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
- mgr->handshakers =
- gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
+ mgr->handshakers = (grpc_handshaker**)gpr_realloc(
+ mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
- mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
+ mgr->args.read_buffer =
+ (grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
diff --git a/src/core/lib/channel/handshaker_registry.c b/src/core/lib/channel/handshaker_registry.c
index 8c4bc3aa00..c6bc87d704 100644
--- a/src/core/lib/channel/handshaker_registry.c
+++ b/src/core/lib/channel/handshaker_registry.c
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
- list->list = gpr_realloc(
+ list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.c
index ec84c01811..1cfac23129 100644
--- a/src/core/lib/compression/compression.c
+++ b/src/core/lib/compression/compression.c
@@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse(
}
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
- char **name) {
+ const char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
switch (algorithm) {
@@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
}
int grpc_stream_compression_algorithm_name(
- grpc_stream_compression_algorithm algorithm, char **name) {
+ grpc_stream_compression_algorithm algorithm, const char **name) {
GRPC_API_TRACE(
"grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression.c
index df13d53e06..411489f029 100644
--- a/src/core/lib/compression/stream_compression.c
+++ b/src/core/lib/compression/stream_compression.c
@@ -16,176 +16,62 @@
*
*/
-#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/compression/stream_compression_gzip.h"
-#define OUTPUT_BLOCK_SIZE (1024)
-
-static bool gzip_flate(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size, int flush,
- bool *end_of_context) {
- GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
- /* Full flush is not allowed when inflating. */
- GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
-
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- int r;
- bool eoc = false;
- size_t original_max_output_size = max_output_size;
- while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
- size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
- : OUTPUT_BLOCK_SIZE;
- grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
- ctx->zs.avail_out = (uInt)slice_size;
- ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
- while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
- grpc_slice slice = grpc_slice_buffer_take_first(in);
- ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
- ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
- r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
- if (r < 0 && r != Z_BUF_ERROR) {
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- } else if (r == Z_STREAM_END && ctx->flate == inflate) {
- eoc = true;
- }
- if (ctx->zs.avail_in > 0) {
- grpc_slice_buffer_undo_take_first(
- in,
- grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
- GRPC_SLICE_LENGTH(slice)));
- }
- grpc_slice_unref_internal(&exec_ctx, slice);
- }
- if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
- GPR_ASSERT(in->length == 0);
- r = ctx->flate(&ctx->zs, flush);
- if (flush == Z_SYNC_FLUSH) {
- switch (r) {
- case Z_OK:
- /* Maybe flush is not complete; just made some partial progress. */
- if (ctx->zs.avail_out > 0) {
- flush = 0;
- }
- break;
- case Z_BUF_ERROR:
- case Z_STREAM_END:
- flush = 0;
- break;
- default:
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- }
- } else if (flush == Z_FINISH) {
- switch (r) {
- case Z_OK:
- case Z_BUF_ERROR:
- /* Wait for the next loop to assign additional output space. */
- GPR_ASSERT(ctx->zs.avail_out == 0);
- break;
- case Z_STREAM_END:
- flush = 0;
- break;
- default:
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- }
- }
- }
-
- if (ctx->zs.avail_out == 0) {
- grpc_slice_buffer_add(out, slice_out);
- } else if (ctx->zs.avail_out < slice_size) {
- slice_out.data.refcounted.length -= ctx->zs.avail_out;
- grpc_slice_buffer_add(out, slice_out);
- } else {
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- }
- max_output_size -= (slice_size - ctx->zs.avail_out);
- }
- grpc_exec_ctx_finish(&exec_ctx);
- if (end_of_context) {
- *end_of_context = eoc;
- }
- if (output_size) {
- *output_size = original_max_output_size - max_output_size;
- }
- return true;
-}
+extern const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
bool grpc_stream_compress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
grpc_stream_compression_flush flush) {
- GPR_ASSERT(ctx->flate == deflate);
- int gzip_flush;
- switch (flush) {
- case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
- gzip_flush = 0;
- break;
- case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
- gzip_flush = Z_SYNC_FLUSH;
- break;
- case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
- gzip_flush = Z_FINISH;
- break;
- default:
- gzip_flush = 0;
- }
- return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush,
- NULL);
+ return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
+ flush);
}
bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context) {
- GPR_ASSERT(ctx->flate == inflate);
- return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH,
- end_of_context);
+ return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
+ end_of_context);
}
grpc_stream_compression_context *grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
- grpc_stream_compression_context *ctx =
- gpr_zalloc(sizeof(grpc_stream_compression_context));
- int r;
- if (ctx == NULL) {
- return NULL;
- }
- if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) {
- r = inflateInit2(&ctx->zs, 0x1F);
- ctx->flate = inflate;
- } else {
- r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
- Z_DEFAULT_STRATEGY);
- ctx->flate = deflate;
- }
- if (r != Z_OK) {
- gpr_free(ctx);
- return NULL;
+ switch (method) {
+ case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS:
+ return grpc_stream_compression_identity_vtable.context_create(method);
+ case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS:
+ return grpc_stream_compression_gzip_vtable.context_create(method);
+ default:
+ gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method);
+ return NULL;
}
-
- return ctx;
}
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx) {
- if (ctx->flate == inflate) {
- inflateEnd(&ctx->zs);
+ ctx->vtable->context_destroy(ctx);
+}
+
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress,
+ grpc_stream_compression_method *method) {
+ if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
+ : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
+ return 1;
+ } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS
+ : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS;
+ return 1;
} else {
- deflateEnd(&ctx->zs);
+ return 0;
}
- gpr_free(ctx);
}
diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h
index 844dff81a3..6d073280fa 100644
--- a/src/core/lib/compression/stream_compression.h
+++ b/src/core/lib/compression/stream_compression.h
@@ -24,15 +24,20 @@
#include <grpc/slice_buffer.h>
#include <zlib.h>
+#include "src/core/lib/transport/static_metadata.h"
+
+typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
+
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
- z_stream zs;
- int (*flate)(z_stream *zs, int flush);
+ const grpc_stream_compression_vtable *vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
- GRPC_STREAM_COMPRESSION_COMPRESS = 0,
- GRPC_STREAM_COMPRESSION_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0,
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_COMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS,
GRPC_STREAM_COMPRESSION_METHOD_COUNT
} grpc_stream_compression_method;
@@ -43,6 +48,19 @@ typedef enum grpc_stream_compression_flush {
GRPC_STREAM_COMPRESSION_FLUSH_COUNT
} grpc_stream_compression_flush;
+struct grpc_stream_compression_vtable {
+ bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size,
+ size_t max_output_size, grpc_stream_compression_flush flush);
+ bool (*decompress)(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size,
+ bool *end_of_context);
+ grpc_stream_compression_context *(*context_create)(
+ grpc_stream_compression_method method);
+ void (*context_destroy)(grpc_stream_compression_context *ctx);
+};
+
/**
* Compress bytes provided in \a in with a given context, with an optional flush
* at the end of compression. Emits at most \a max_output_size compressed bytes
@@ -87,4 +105,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx);
+/**
+ * Parse stream compression method based on algorithm name
+ */
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
+
#endif
diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c
new file mode 100644
index 0000000000..abcbdb3a91
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_gzip.c
@@ -0,0 +1,228 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression_gzip.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define OUTPUT_BLOCK_SIZE (1024)
+
+typedef struct grpc_stream_compression_context_gzip {
+ grpc_stream_compression_context base;
+
+ z_stream zs;
+ int (*flate)(z_stream *zs, int flush);
+} grpc_stream_compression_context_gzip;
+
+static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size, int flush,
+ bool *end_of_context) {
+ GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
+ /* Full flush is not allowed when inflating. */
+ GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ int r;
+ bool eoc = false;
+ size_t original_max_output_size = max_output_size;
+ while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
+ size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
+ : OUTPUT_BLOCK_SIZE;
+ grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
+ ctx->zs.avail_out = (uInt)slice_size;
+ ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
+ while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
+ grpc_slice slice = grpc_slice_buffer_take_first(in);
+ ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
+ ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
+ r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
+ if (r < 0 && r != Z_BUF_ERROR) {
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ } else if (r == Z_STREAM_END && ctx->flate == inflate) {
+ eoc = true;
+ }
+ if (ctx->zs.avail_in > 0) {
+ grpc_slice_buffer_undo_take_first(
+ in,
+ grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
+ GRPC_SLICE_LENGTH(slice)));
+ }
+ grpc_slice_unref_internal(&exec_ctx, slice);
+ }
+ if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
+ GPR_ASSERT(in->length == 0);
+ r = ctx->flate(&ctx->zs, flush);
+ if (flush == Z_SYNC_FLUSH) {
+ switch (r) {
+ case Z_OK:
+ /* Maybe flush is not complete; just made some partial progress. */
+ if (ctx->zs.avail_out > 0) {
+ flush = 0;
+ }
+ break;
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ flush = 0;
+ break;
+ default:
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ }
+ } else if (flush == Z_FINISH) {
+ switch (r) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ /* Wait for the next loop to assign additional output space. */
+ GPR_ASSERT(ctx->zs.avail_out == 0);
+ break;
+ case Z_STREAM_END:
+ flush = 0;
+ break;
+ default:
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ }
+ }
+ }
+
+ if (ctx->zs.avail_out == 0) {
+ grpc_slice_buffer_add(out, slice_out);
+ } else if (ctx->zs.avail_out < slice_size) {
+ slice_out.data.refcounted.length -= ctx->zs.avail_out;
+ grpc_slice_buffer_add(out, slice_out);
+ } else {
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ }
+ max_output_size -= (slice_size - ctx->zs.avail_out);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ if (end_of_context) {
+ *end_of_context = eoc;
+ }
+ if (output_size) {
+ *output_size = original_max_output_size - max_output_size;
+ }
+ return true;
+}
+
+static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == deflate);
+ int gzip_flush;
+ switch (flush) {
+ case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
+ gzip_flush = 0;
+ break;
+ case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
+ gzip_flush = Z_SYNC_FLUSH;
+ break;
+ case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
+ gzip_flush = Z_FINISH;
+ break;
+ default:
+ gzip_flush = 0;
+ }
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush,
+ NULL);
+}
+
+static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == inflate);
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
+ Z_SYNC_FLUSH, end_of_context);
+}
+
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_gzip(
+ grpc_stream_compression_method method) {
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)gpr_zalloc(
+ sizeof(grpc_stream_compression_context_gzip));
+ int r;
+ if (gzip_ctx == NULL) {
+ return NULL;
+ }
+ if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) {
+ r = inflateInit2(&gzip_ctx->zs, 0x1F);
+ gzip_ctx->flate = inflate;
+ } else {
+ r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
+ Z_DEFAULT_STRATEGY);
+ gzip_ctx->flate = deflate;
+ }
+ if (r != Z_OK) {
+ gpr_free(gzip_ctx);
+ return NULL;
+ }
+
+ gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
+ return (grpc_stream_compression_context *)gzip_ctx;
+}
+
+static void grpc_stream_compression_context_destroy_gzip(
+ grpc_stream_compression_context *ctx) {
+ if (ctx == NULL) {
+ return;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ if (gzip_ctx->flate == inflate) {
+ inflateEnd(&gzip_ctx->zs);
+ } else {
+ deflateEnd(&gzip_ctx->zs);
+ }
+ gpr_free(ctx);
+}
+
+const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = {
+ .compress = grpc_stream_compress_gzip,
+ .decompress = grpc_stream_decompress_gzip,
+ .context_create = grpc_stream_compression_context_create_gzip,
+ .context_destroy = grpc_stream_compression_context_destroy_gzip};
diff --git a/src/core/lib/compression/stream_compression_gzip.h b/src/core/lib/compression/stream_compression_gzip.h
new file mode 100644
index 0000000000..7cf49a0de9
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_gzip.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
+
+#include "src/core/lib/compression/stream_compression.h"
+
+extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
+
+#endif
diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c
new file mode 100644
index 0000000000..3dfcf53b85
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_identity.c
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression_identity.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define OUTPUT_BLOCK_SIZE (1024)
+
+/* Singleton context used for all identity streams. */
+static grpc_stream_compression_context identity_ctx = {
+ .vtable = &grpc_stream_compression_identity_vtable};
+
+static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size) {
+ if (max_output_size >= in->length) {
+ if (output_size) {
+ *output_size = in->length;
+ }
+ grpc_slice_buffer_move_into(in, out);
+ } else {
+ if (output_size) {
+ *output_size = max_output_size;
+ }
+ grpc_slice_buffer_move_first(in, max_output_size, out);
+ }
+}
+
+static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ return true;
+}
+
+static bool grpc_stream_decompress_identity(
+ grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ if (end_of_context) {
+ *end_of_context = false;
+ }
+ return true;
+}
+
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_identity(
+ grpc_stream_compression_method method) {
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
+ /* No context needed in this case. Use fake context instead. */
+ return (grpc_stream_compression_context *)&identity_ctx;
+}
+
+static void grpc_stream_compression_context_destroy_identity(
+ grpc_stream_compression_context *ctx) {
+ return;
+}
+
+const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = {
+ .compress = grpc_stream_compress_identity,
+ .decompress = grpc_stream_decompress_identity,
+ .context_create = grpc_stream_compression_context_create_identity,
+ .context_destroy = grpc_stream_compression_context_destroy_identity};
diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h
new file mode 100644
index 0000000000..41926e949e
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_identity.h
@@ -0,0 +1,27 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
+
+#include "src/core/lib/compression/stream_compression.h"
+
+extern const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
+
+#endif
diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c
index 91ca0aa76e..4096384dd9 100644
--- a/src/core/lib/debug/stats.c
+++ b/src/core/lib/debug/stats.c
@@ -33,7 +33,7 @@ static size_t g_num_cores;
void grpc_stats_init(void) {
g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
grpc_stats_per_cpu_storage =
- gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
+ (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
}
void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c
index 15ccaf21c4..fb6055f795 100644
--- a/src/core/lib/debug/stats_data.c
+++ b/src/core/lib/debug/stats_data.c
@@ -25,11 +25,23 @@
const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"client_calls_created",
"server_calls_created",
+ "cqs_created",
+ "client_channels_created",
+ "client_subchannels_created",
+ "server_channels_created",
"syscall_poll",
"syscall_wait",
+ "pollset_kick",
+ "pollset_kicked_without_poller",
+ "pollset_kicked_again",
+ "pollset_kick_wakeup_fd",
+ "pollset_kick_wakeup_cv",
+ "pollset_kick_own_thread",
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
+ "tcp_backup_pollers_created",
+ "tcp_backup_poller_polls",
"http2_op_batches",
"http2_op_cancel",
"http2_op_send_initial_metadata",
@@ -38,27 +50,96 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_op_recv_initial_metadata",
"http2_op_recv_message",
"http2_op_recv_trailing_metadata",
+ "http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
+ "http2_writes_offloaded",
+ "http2_writes_continued",
+ "http2_partial_writes",
+ "http2_initiate_write_due_to_initial_write",
+ "http2_initiate_write_due_to_start_new_stream",
+ "http2_initiate_write_due_to_send_message",
+ "http2_initiate_write_due_to_send_initial_metadata",
+ "http2_initiate_write_due_to_send_trailing_metadata",
+ "http2_initiate_write_due_to_retry_send_ping",
+ "http2_initiate_write_due_to_continue_pings",
+ "http2_initiate_write_due_to_goaway_sent",
+ "http2_initiate_write_due_to_rst_stream",
+ "http2_initiate_write_due_to_close_from_api",
+ "http2_initiate_write_due_to_stream_flow_control",
+ "http2_initiate_write_due_to_transport_flow_control",
+ "http2_initiate_write_due_to_send_settings",
+ "http2_initiate_write_due_to_bdp_estimator_ping",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update",
+ "http2_initiate_write_due_to_application_ping",
+ "http2_initiate_write_due_to_keepalive_ping",
+ "http2_initiate_write_due_to_transport_flow_control_unstalled",
+ "http2_initiate_write_due_to_ping_response",
+ "http2_initiate_write_due_to_force_rst_stream",
+ "hpack_recv_indexed",
+ "hpack_recv_lithdr_incidx",
+ "hpack_recv_lithdr_incidx_v",
+ "hpack_recv_lithdr_notidx",
+ "hpack_recv_lithdr_notidx_v",
+ "hpack_recv_lithdr_nvridx",
+ "hpack_recv_lithdr_nvridx_v",
+ "hpack_recv_uncompressed",
+ "hpack_recv_huffman",
+ "hpack_recv_binary",
+ "hpack_recv_binary_base64",
+ "hpack_send_indexed",
+ "hpack_send_lithdr_incidx",
+ "hpack_send_lithdr_incidx_v",
+ "hpack_send_lithdr_notidx",
+ "hpack_send_lithdr_notidx_v",
+ "hpack_send_lithdr_nvridx",
+ "hpack_send_lithdr_nvridx_v",
+ "hpack_send_uncompressed",
+ "hpack_send_huffman",
+ "hpack_send_binary",
+ "hpack_send_binary_base64",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
"combiner_locks_offloaded",
- "executor_scheduled_items",
+ "executor_scheduled_short_items",
+ "executor_scheduled_long_items",
"executor_scheduled_to_self",
"executor_wakeup_initiated",
"executor_queue_drained",
+ "executor_push_retries",
+ "executor_threads_created",
+ "executor_threads_used",
+ "server_requested_calls",
+ "server_slowpath_requests_queued",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
+ "Number of completion queues created", "Number of client channels created",
+ "Number of client subchannels created", "Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
+ "How many polling wakeups were performed by the process (only valid for "
+ "epoll1 right now)",
+ "How many times was a polling wakeup requested without an active poller "
+ "(only valid for epoll1 right now)",
+ "How many times was the same polling worker awoken repeatedly before "
+ "waking up (only valid for epoll1 right now)",
+ "How many times was an eventfd used as the wakeup vector for a polling "
+ "wakeup (only valid for epoll1 right now)",
+ "How many times was a condition variable used as the wakeup vector for a "
+ "polling wakeup (only valid for epoll1 right now)",
+ "How many times could a polling wakeup be satisfied by keeping the waking "
+ "thread awake? (only valid for epoll1 right now)",
"Number of times histogram increments went through the slow (binary "
"search) path",
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
"process",
"Number of read syscalls (or equivalent - eg recvmsg) made by this process",
+ "Number of times a backup poller has been created (this can be expensive)",
+ "Number of polls performed on the backup poller",
"Number of batches received by HTTP2 transport",
"Number of cancelations received by HTTP2 transport",
"Number of batches containing send initial metadata",
@@ -67,30 +148,153 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
- "Number of HTTP2 pings sent by process", "Number of HTTP2 writes initiated",
+ "Number of settings frames sent", "Number of HTTP2 pings sent by process",
+ "Number of HTTP2 writes initiated",
+ "Number of HTTP2 writes offloaded to the executor from application threads",
+ "Number of HTTP2 writes that finished seeing more data needed to be "
+ "written",
+ "Number of HTTP2 writes that were made knowing there was still more data "
+ "to be written (we cap maximum write size to syscall_write)",
+ "Number of HTTP2 writes initiated due to 'initial_write'",
+ "Number of HTTP2 writes initiated due to 'start_new_stream'",
+ "Number of HTTP2 writes initiated due to 'send_message'",
+ "Number of HTTP2 writes initiated due to 'send_initial_metadata'",
+ "Number of HTTP2 writes initiated due to 'send_trailing_metadata'",
+ "Number of HTTP2 writes initiated due to 'retry_send_ping'",
+ "Number of HTTP2 writes initiated due to 'continue_pings'",
+ "Number of HTTP2 writes initiated due to 'goaway_sent'",
+ "Number of HTTP2 writes initiated due to 'rst_stream'",
+ "Number of HTTP2 writes initiated due to 'close_from_api'",
+ "Number of HTTP2 writes initiated due to 'stream_flow_control'",
+ "Number of HTTP2 writes initiated due to 'transport_flow_control'",
+ "Number of HTTP2 writes initiated due to 'send_settings'",
+ "Number of HTTP2 writes initiated due to 'bdp_estimator_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_setting'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_update'",
+ "Number of HTTP2 writes initiated due to 'application_ping'",
+ "Number of HTTP2 writes initiated due to 'keepalive_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'transport_flow_control_unstalled'",
+ "Number of HTTP2 writes initiated due to 'ping_response'",
+ "Number of HTTP2 writes initiated due to 'force_rst_stream'",
+ "Number of HPACK indexed fields received",
+ "Number of HPACK literal headers received with incremental indexing",
+ "Number of HPACK literal headers received with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers received with no indexing",
+ "Number of HPACK literal headers received with no indexing and literal "
+ "keys",
+ "Number of HPACK literal headers received with never-indexing",
+ "Number of HPACK literal headers received with never-indexing and literal "
+ "keys",
+ "Number of uncompressed strings received in metadata",
+ "Number of huffman encoded strings received in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
+ "Number of HPACK indexed fields sent",
+ "Number of HPACK literal headers sent with incremental indexing",
+ "Number of HPACK literal headers sent with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers sent with no indexing",
+ "Number of HPACK literal headers sent with no indexing and literal keys",
+ "Number of HPACK literal headers sent with never-indexing",
+ "Number of HPACK literal headers sent with never-indexing and literal keys",
+ "Number of uncompressed strings sent in metadata",
+ "Number of huffman encoded strings sent in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
"Number of final items scheduled against combiner locks",
"Number of combiner locks offloaded to different threads",
- "Number of closures scheduled against the executor (gRPC thread pool)",
+ "Number of finite runtime closures scheduled against the executor (gRPC "
+ "thread pool)",
+ "Number of potentially infinite runtime closures scheduled against the "
+ "executor (gRPC thread pool)",
"Number of closures scheduled by the executor to the executor",
"Number of thread wakeups initiated within the executor",
"Number of times an executor queue was drained",
+ "Number of times we raced and were forced to retry pushing a closure to "
+ "the executor",
+ "Size of the backing thread pool for overflow gRPC Core work",
+ "How many executor threads actually got used",
+ "How many calls were requested (not necessarily received) by the server",
+ "How many times was the server slow path taken (indicates too few "
+ "outstanding requests)",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
- "tcp_write_size", "tcp_write_iov_size", "tcp_read_size",
- "tcp_read_offer", "tcp_read_offer_iov_size", "http2_send_message_size",
+ "call_initial_size",
+ "poll_events_returned",
+ "tcp_write_size",
+ "tcp_write_iov_size",
+ "tcp_read_size",
+ "tcp_read_offer",
+ "tcp_read_offer_iov_size",
+ "http2_send_message_size",
+ "http2_send_initial_metadata_per_write",
+ "http2_send_message_per_write",
+ "http2_send_trailing_metadata_per_write",
+ "http2_send_flowctl_per_write",
+ "executor_closures_per_wakeup",
+ "server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
+ "Initial size of the grpc_call arena created at call start",
+ "How many events are called for each syscall_poll",
"Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write",
"Number of bytes received by each syscall_read",
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
+ "Number of streams initiated written per TCP write",
+ "Number of streams whose payload was written per TCP write",
+ "Number of streams terminated per TCP write",
+ "Number of flow control updates written per TCP write",
+ "Number of closures executed each time an executor wakes up",
+ "How many completion queues were checked looking for a CQ that had "
+ "requested the incoming call",
};
const int grpc_stats_table_0[65] = {
+ 0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
+ 17, 21, 26, 32, 39, 47, 57, 68, 82, 98,
+ 117, 140, 167, 199, 238, 284, 339, 404, 482, 575,
+ 685, 816, 972, 1158, 1380, 1644, 1959, 2334, 2780, 3312,
+ 3945, 4699, 5597, 6667, 7941, 9459, 11267, 13420, 15984, 19038,
+ 22676, 27009, 32169, 38315, 45635, 54353, 64737, 77104, 91834, 109378,
+ 130273, 155159, 184799, 220100, 262144};
+const uint8_t grpc_stats_table_1[124] = {
+ 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
+ 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
+ 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 22, 23, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 29, 29, 30, 30, 30, 31, 31, 32, 33,
+ 33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
+ 51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
+const int grpc_stats_table_2[129] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
+ 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
+ 114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
+ 192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
+ 317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
+ 510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
+ 809, 834, 859, 885, 912, 939, 967, 996, 1024};
+const uint8_t grpc_stats_table_3[166] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
+ 17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
+ 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
+ 40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
+ 65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
+ 76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
+const int grpc_stats_table_4[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
15, 20, 26, 34, 44, 57, 73, 94,
121, 155, 199, 255, 327, 419, 537, 688,
@@ -100,26 +304,82 @@ const int grpc_stats_table_0[65] = {
326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
16777216};
-const uint8_t grpc_stats_table_1[87] = {
+const uint8_t grpc_stats_table_5[87] = {
0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
-const int grpc_stats_table_2[65] = {
+const int grpc_stats_table_6[65] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
-const uint8_t grpc_stats_table_3[102] = {
+const uint8_t grpc_stats_table_7[102] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
+const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
+const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 262144);
+ if (value < 6) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4651092515166879744ull) {
+ int bucket =
+ grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
+ _bkt.dbl = grpc_stats_table_0[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_0, 64));
+}
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 29) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4642789003353915392ull) {
+ int bucket =
+ grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
+ _bkt.dbl = grpc_stats_table_2[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_2, 128));
+}
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@@ -133,8 +393,8 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
- grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
- _bkt.dbl = grpc_stats_table_0[bucket];
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
bucket);
@@ -142,9 +402,10 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_0, 64));
+ (exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -158,8 +419,8 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
- grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
- _bkt.dbl = grpc_stats_table_2[bucket];
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
@@ -167,9 +428,10 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_2, 64));
+ (exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@@ -183,8 +445,8 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
- grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
- _bkt.dbl = grpc_stats_table_0[bucket];
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
bucket);
@@ -192,9 +454,10 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_0, 64));
+ (exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@@ -208,8 +471,8 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
- grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
- _bkt.dbl = grpc_stats_table_0[bucket];
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
bucket);
@@ -217,10 +480,11 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_0, 64));
+ (exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -234,8 +498,8 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
- grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
- _bkt.dbl = grpc_stats_table_2[bucket];
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
@@ -244,10 +508,11 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_2, 64));
+ (exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(
@@ -261,8 +526,8 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
- grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
- _bkt.dbl = grpc_stats_table_0[bucket];
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
@@ -271,17 +536,200 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
grpc_stats_histo_find_bucket_slow(
- (exec_ctx), value, grpc_stats_table_0, 64));
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 64);
+ if (value < 3) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4625196817309499392ull) {
+ int bucket =
+ grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
+ _bkt.dbl = grpc_stats_table_8[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_8, 8));
}
-const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64};
-const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320};
-const int *const grpc_stats_histo_bucket_boundaries[6] = {
- grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
- grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0};
-void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x) = {
+const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 8};
+const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
+ 512, 576, 640, 704, 768, 832, 896};
+const int *const grpc_stats_histo_bucket_boundaries[14] = {
+ grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_8};
+void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
+ grpc_stats_inc_call_initial_size,
+ grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,
grpc_stats_inc_tcp_read_offer,
grpc_stats_inc_tcp_read_offer_iov_size,
- grpc_stats_inc_http2_send_message_size};
+ grpc_stats_inc_http2_send_message_size,
+ grpc_stats_inc_http2_send_initial_metadata_per_write,
+ grpc_stats_inc_http2_send_message_per_write,
+ grpc_stats_inc_http2_send_trailing_metadata_per_write,
+ grpc_stats_inc_http2_send_flowctl_per_write,
+ grpc_stats_inc_executor_closures_per_wakeup,
+ grpc_stats_inc_server_cqs_checked};
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index 3151e5ab5c..6c0ad30543 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -27,11 +27,23 @@
typedef enum {
GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
+ GRPC_STATS_COUNTER_CQS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
+ GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT,
+ GRPC_STATS_COUNTER_POLLSET_KICK,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
+ GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
@@ -40,60 +52,164 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
+ GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
+ GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64,
+ GRPC_STATS_COUNTER_HPACK_SEND_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
- GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
+ GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
+ GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum {
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum {
- GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 0,
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 64,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 192,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 256,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_BUCKETS = 384
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
+ GRPC_STATS_HISTOGRAM_BUCKETS = 904
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
+#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED)
+#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
+#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
+#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
+#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
+#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
+#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
+#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
@@ -114,10 +230,165 @@ typedef enum {
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
+#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
+#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
+#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
+#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
+#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@@ -130,9 +401,12 @@ typedef enum {
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
-#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), \
- GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
@@ -141,6 +415,24 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
+#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
+#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
+#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
+#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
+#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
+#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
+ grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
+ grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@@ -159,10 +451,34 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
-extern const int grpc_stats_histo_buckets[6];
-extern const int grpc_stats_histo_start[6];
-extern const int *const grpc_stats_histo_bucket_boundaries[6];
-extern void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx,
+#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
+ (int)(value))
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
+ grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
+void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int x);
+#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
+ grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
+extern const int grpc_stats_histo_buckets[14];
+extern const int grpc_stats_histo_start[14];
+extern const int *const grpc_stats_histo_bucket_boundaries[14];
+extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
+ int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
index 53f6ff0074..de575f01c7 100644
--- a/src/core/lib/debug/stats_data.yaml
+++ b/src/core/lib/debug/stats_data.yaml
@@ -20,11 +20,49 @@
doc: Number of client side calls created by this process
- counter: server_calls_created
doc: Number of server side calls created by this process
+- histogram: call_initial_size
+ max: 262144
+ buckets: 64
+ doc: Initial size of the grpc_call arena created at call start
+- counter: cqs_created
+ doc: Number of completion queues created
+- counter: client_channels_created
+ doc: Number of client channels created
+- counter: client_subchannels_created
+ doc: Number of client subchannels created
+- counter: server_channels_created
+ doc: Number of server channels created
# polling
- counter: syscall_poll
doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
- counter: syscall_wait
doc: Number of sleeping syscalls made by this process
+- histogram: poll_events_returned
+ max: 1024
+ buckets: 128
+ doc: How many events are called for each syscall_poll
+- counter: pollset_kick
+ doc: How many polling wakeups were performed by the process
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_without_poller
+ doc: How many times was a polling wakeup requested without an active poller
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_again
+ doc: How many times was the same polling worker awoken repeatedly before
+ waking up
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_fd
+ doc: How many times was an eventfd used as the wakeup vector for a polling
+ wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_cv
+ doc: How many times was a condition variable used as the wakeup vector for a
+ polling wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_own_thread
+ doc: How many times could a polling wakeup be satisfied by keeping the waking
+ thread awake?
+ (only valid for epoll1 right now)
# stats system
- counter: histogram_slow_lookups
doc: Number of times histogram increments went through the slow
@@ -54,6 +92,10 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_read
+- counter: tcp_backup_pollers_created
+ doc: Number of times a backup poller has been created (this can be expensive)
+- counter: tcp_backup_poller_polls
+ doc: Number of polls performed on the backup poller
# chttp2
- counter: http2_op_batches
doc: Number of batches received by HTTP2 transport
@@ -75,10 +117,122 @@
max: 16777216
buckets: 64
doc: Size of messages received by HTTP2 transport
+- histogram: http2_send_initial_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams initiated written per TCP write
+- histogram: http2_send_message_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams whose payload was written per TCP write
+- histogram: http2_send_trailing_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams terminated per TCP write
+- histogram: http2_send_flowctl_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of flow control updates written per TCP write
+- counter: http2_settings_writes
+ doc: Number of settings frames sent
- counter: http2_pings_sent
doc: Number of HTTP2 pings sent by process
- counter: http2_writes_begun
doc: Number of HTTP2 writes initiated
+- counter: http2_writes_offloaded
+ doc: Number of HTTP2 writes offloaded to the executor from application threads
+- counter: http2_writes_continued
+ doc: Number of HTTP2 writes that finished seeing more data needed to be
+ written
+- counter: http2_partial_writes
+ doc: Number of HTTP2 writes that were made knowing there was still more data
+ to be written (we cap maximum write size to syscall_write)
+- counter: http2_initiate_write_due_to_initial_write
+ doc: Number of HTTP2 writes initiated due to 'initial_write'
+- counter: http2_initiate_write_due_to_start_new_stream
+ doc: Number of HTTP2 writes initiated due to 'start_new_stream'
+- counter: http2_initiate_write_due_to_send_message
+ doc: Number of HTTP2 writes initiated due to 'send_message'
+- counter: http2_initiate_write_due_to_send_initial_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_initial_metadata'
+- counter: http2_initiate_write_due_to_send_trailing_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_trailing_metadata'
+- counter: http2_initiate_write_due_to_retry_send_ping
+ doc: Number of HTTP2 writes initiated due to 'retry_send_ping'
+- counter: http2_initiate_write_due_to_continue_pings
+ doc: Number of HTTP2 writes initiated due to 'continue_pings'
+- counter: http2_initiate_write_due_to_goaway_sent
+ doc: Number of HTTP2 writes initiated due to 'goaway_sent'
+- counter: http2_initiate_write_due_to_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'rst_stream'
+- counter: http2_initiate_write_due_to_close_from_api
+ doc: Number of HTTP2 writes initiated due to 'close_from_api'
+- counter: http2_initiate_write_due_to_stream_flow_control
+ doc: Number of HTTP2 writes initiated due to 'stream_flow_control'
+- counter: http2_initiate_write_due_to_transport_flow_control
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control'
+- counter: http2_initiate_write_due_to_send_settings
+ doc: Number of HTTP2 writes initiated due to 'send_settings'
+- counter: http2_initiate_write_due_to_bdp_estimator_ping
+ doc: Number of HTTP2 writes initiated due to 'bdp_estimator_ping'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_setting
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_setting'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_update
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_update'
+- counter: http2_initiate_write_due_to_application_ping
+ doc: Number of HTTP2 writes initiated due to 'application_ping'
+- counter: http2_initiate_write_due_to_keepalive_ping
+ doc: Number of HTTP2 writes initiated due to 'keepalive_ping'
+- counter: http2_initiate_write_due_to_transport_flow_control_unstalled
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control_unstalled'
+- counter: http2_initiate_write_due_to_ping_response
+ doc: Number of HTTP2 writes initiated due to 'ping_response'
+- counter: http2_initiate_write_due_to_force_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
+- counter: hpack_recv_indexed
+ doc: Number of HPACK indexed fields received
+- counter: hpack_recv_lithdr_incidx
+ doc: Number of HPACK literal headers received with incremental indexing
+- counter: hpack_recv_lithdr_incidx_v
+ doc: Number of HPACK literal headers received with incremental indexing and literal keys
+- counter: hpack_recv_lithdr_notidx
+ doc: Number of HPACK literal headers received with no indexing
+- counter: hpack_recv_lithdr_notidx_v
+ doc: Number of HPACK literal headers received with no indexing and literal keys
+- counter: hpack_recv_lithdr_nvridx
+ doc: Number of HPACK literal headers received with never-indexing
+- counter: hpack_recv_lithdr_nvridx_v
+ doc: Number of HPACK literal headers received with never-indexing and literal keys
+- counter: hpack_recv_uncompressed
+ doc: Number of uncompressed strings received in metadata
+- counter: hpack_recv_huffman
+ doc: Number of huffman encoded strings received in metadata
+- counter: hpack_recv_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_recv_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
+- counter: hpack_send_indexed
+ doc: Number of HPACK indexed fields sent
+- counter: hpack_send_lithdr_incidx
+ doc: Number of HPACK literal headers sent with incremental indexing
+- counter: hpack_send_lithdr_incidx_v
+ doc: Number of HPACK literal headers sent with incremental indexing and literal keys
+- counter: hpack_send_lithdr_notidx
+ doc: Number of HPACK literal headers sent with no indexing
+- counter: hpack_send_lithdr_notidx_v
+ doc: Number of HPACK literal headers sent with no indexing and literal keys
+- counter: hpack_send_lithdr_nvridx
+ doc: Number of HPACK literal headers sent with never-indexing
+- counter: hpack_send_lithdr_nvridx_v
+ doc: Number of HPACK literal headers sent with never-indexing and literal keys
+- counter: hpack_send_uncompressed
+ doc: Number of uncompressed strings sent in metadata
+- counter: hpack_send_huffman
+ doc: Number of huffman encoded strings sent in metadata
+- counter: hpack_send_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_send_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
@@ -90,11 +244,37 @@
- counter: combiner_locks_offloaded
doc: Number of combiner locks offloaded to different threads
# executor
-- counter: executor_scheduled_items
- doc: Number of closures scheduled against the executor (gRPC thread pool)
+- counter: executor_scheduled_short_items
+ doc: Number of finite runtime closures scheduled against the executor
+ (gRPC thread pool)
+- counter: executor_scheduled_long_items
+ doc: Number of potentially infinite runtime closures scheduled against the
+ executor (gRPC thread pool)
- counter: executor_scheduled_to_self
doc: Number of closures scheduled by the executor to the executor
- counter: executor_wakeup_initiated
doc: Number of thread wakeups initiated within the executor
- counter: executor_queue_drained
doc: Number of times an executor queue was drained
+- counter: executor_push_retries
+ doc: Number of times we raced and were forced to retry pushing a closure to
+ the executor
+- counter: executor_threads_created
+ doc: Size of the backing thread pool for overflow gRPC Core work
+- counter: executor_threads_used
+ doc: How many executor threads actually got used
+- histogram: executor_closures_per_wakeup
+ max: 1024
+ buckets: 64
+ doc: Number of closures executed each time an executor wakes up
+# server
+- counter: server_requested_calls
+ doc: How many calls were requested (not necessarily received) by the server
+- histogram: server_cqs_checked
+ buckets: 8
+ max: 64
+ doc: How many completion queues were checked looking for a CQ that had
+ requested the incoming call
+- counter: server_slowpath_requests_queued
+ doc: How many times was the server slow path taken (indicates too few
+ outstanding requests)
diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql
new file mode 100644
index 0000000000..0611ccaff0
--- /dev/null
+++ b/src/core/lib/debug/stats_data_bq_schema.sql
@@ -0,0 +1,90 @@
+client_calls_created_per_iteration:FLOAT,
+server_calls_created_per_iteration:FLOAT,
+cqs_created_per_iteration:FLOAT,
+client_channels_created_per_iteration:FLOAT,
+client_subchannels_created_per_iteration:FLOAT,
+server_channels_created_per_iteration:FLOAT,
+syscall_poll_per_iteration:FLOAT,
+syscall_wait_per_iteration:FLOAT,
+pollset_kick_per_iteration:FLOAT,
+pollset_kicked_without_poller_per_iteration:FLOAT,
+pollset_kicked_again_per_iteration:FLOAT,
+pollset_kick_wakeup_fd_per_iteration:FLOAT,
+pollset_kick_wakeup_cv_per_iteration:FLOAT,
+pollset_kick_own_thread_per_iteration:FLOAT,
+histogram_slow_lookups_per_iteration:FLOAT,
+syscall_write_per_iteration:FLOAT,
+syscall_read_per_iteration:FLOAT,
+tcp_backup_pollers_created_per_iteration:FLOAT,
+tcp_backup_poller_polls_per_iteration:FLOAT,
+http2_op_batches_per_iteration:FLOAT,
+http2_op_cancel_per_iteration:FLOAT,
+http2_op_send_initial_metadata_per_iteration:FLOAT,
+http2_op_send_message_per_iteration:FLOAT,
+http2_op_send_trailing_metadata_per_iteration:FLOAT,
+http2_op_recv_initial_metadata_per_iteration:FLOAT,
+http2_op_recv_message_per_iteration:FLOAT,
+http2_op_recv_trailing_metadata_per_iteration:FLOAT,
+http2_settings_writes_per_iteration:FLOAT,
+http2_pings_sent_per_iteration:FLOAT,
+http2_writes_begun_per_iteration:FLOAT,
+http2_writes_offloaded_per_iteration:FLOAT,
+http2_writes_continued_per_iteration:FLOAT,
+http2_partial_writes_per_iteration:FLOAT,
+http2_initiate_write_due_to_initial_write_per_iteration:FLOAT,
+http2_initiate_write_due_to_start_new_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_message_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_initial_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_trailing_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_retry_send_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_continue_pings_per_iteration:FLOAT,
+http2_initiate_write_due_to_goaway_sent_per_iteration:FLOAT,
+http2_initiate_write_due_to_rst_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_close_from_api_per_iteration:FLOAT,
+http2_initiate_write_due_to_stream_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_settings_per_iteration:FLOAT,
+http2_initiate_write_due_to_bdp_estimator_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_setting_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_update_per_iteration:FLOAT,
+http2_initiate_write_due_to_application_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
+http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
+http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
+hpack_recv_indexed_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_recv_uncompressed_per_iteration:FLOAT,
+hpack_recv_huffman_per_iteration:FLOAT,
+hpack_recv_binary_per_iteration:FLOAT,
+hpack_recv_binary_base64_per_iteration:FLOAT,
+hpack_send_indexed_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_send_uncompressed_per_iteration:FLOAT,
+hpack_send_huffman_per_iteration:FLOAT,
+hpack_send_binary_per_iteration:FLOAT,
+hpack_send_binary_base64_per_iteration:FLOAT,
+combiner_locks_initiated_per_iteration:FLOAT,
+combiner_locks_scheduled_items_per_iteration:FLOAT,
+combiner_locks_scheduled_final_items_per_iteration:FLOAT,
+combiner_locks_offloaded_per_iteration:FLOAT,
+executor_scheduled_short_items_per_iteration:FLOAT,
+executor_scheduled_long_items_per_iteration:FLOAT,
+executor_scheduled_to_self_per_iteration:FLOAT,
+executor_wakeup_initiated_per_iteration:FLOAT,
+executor_queue_drained_per_iteration:FLOAT,
+executor_push_retries_per_iteration:FLOAT,
+executor_threads_created_per_iteration:FLOAT,
+executor_threads_used_per_iteration:FLOAT,
+server_requested_calls_per_iteration:FLOAT,
+server_slowpath_requests_queued_per_iteration:FLOAT
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index dd9e6a30fe..64f2e3fc33 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -35,7 +35,7 @@ typedef struct {
#else
bool value;
#endif
- char *name;
+ const char *name;
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER
diff --git a/src/core/lib/http/format_request.c b/src/core/lib/http/format_request.c
index f887726eea..88fb0ab0b6 100644
--- a/src/core/lib/http/format_request.c
+++ b/src/core/lib/http/format_request.c
@@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
- tmp = gpr_realloc(tmp, out_len + body_size);
+ tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 77af7b7c08..db995943a9 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
- internal_request *req = user_data;
+ internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@@ -217,7 +217,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
- GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
+ (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
@@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@@ -243,7 +243,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
- internal_request *req = gpr_malloc(sizeof(internal_request));
+ internal_request *req =
+ (internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 97c2886525..c553fa3981 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
+ c->handshaker_factory = NULL;
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index 9c5e93f4e5..0950bd655e 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
- char *out = gpr_malloc(length + 1);
+ char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
- *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
+ *hdrs = (grpc_http_header *)gpr_realloc(
+ *hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
- *body = gpr_realloc((void *)*body, parser->body_capacity);
+ *body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 26f9cbe0fa..00edefc6ae 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_closure *wc = arg;
+ wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
- wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
@@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
@@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 3d42f6d920..f899b25f10 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -81,7 +81,8 @@ grpc_combiner *grpc_combiner_create(void) {
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
+ GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -355,7 +356,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
- combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
+ combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
+ GRPC_ERROR_REF(error));
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 3759dda992..aa05501537 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
- *err = gpr_realloc(
+ *err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@@ -278,13 +278,13 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
-static void internal_add_error(grpc_error **err, grpc_error *new) {
- grpc_linked_error new_last = {new, UINT8_MAX};
+static void internal_add_error(grpc_error **err, grpc_error *new_err) {
+ grpc_linked_error new_last = {new_err, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
if (slot == UINT8_MAX) {
- gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err, new,
- grpc_error_string(new));
- GRPC_ERROR_UNREF(new);
+ gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
+ new_err, grpc_error_string(new_err));
+ GRPC_ERROR_UNREF(new_err);
return;
}
if ((*err)->first_err == UINT8_MAX) {
@@ -321,8 +321,8 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
uint8_t initial_arena_capacity = (uint8_t)(
DEFAULT_ERROR_CAPACITY +
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
- grpc_error *err =
- gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
+ grpc_error *err = (grpc_error *)gpr_malloc(
+ sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
@@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
- out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
+ out = (grpc_error *)gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -431,10 +432,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
intptr_t value) {
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_int(&new, which, value);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_int(&new_err, which, value);
GPR_TIMER_END("grpc_error_set_int", 0);
- return new;
+ return new_err;
}
typedef struct {
@@ -476,10 +477,10 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_str(&new, which, str);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_str(&new_err, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
- return new;
+ return new_err;
}
bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
@@ -506,10 +507,10 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_add_error(&new, child);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_add_error(&new_err, child);
GPR_TIMER_END("grpc_error_add_child", 0);
- return new;
+ return new_err;
}
static const char *no_error_string = "\"No Error\"";
@@ -530,7 +531,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = gpr_realloc(*s, *cap);
+ *s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
- kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ kvs->kvs =
+ (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@@ -639,7 +641,7 @@ static char *key_time(grpc_error_times which) {
static char *fmt_time(gpr_timespec tm) {
char *out;
- char *pfx = "!!";
+ const char *pfx = "!!";
switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:";
@@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
- const kv_pair *ka = a;
- const kv_pair *kb = b;
+ const kv_pair *ka = (const kv_pair *)a;
+ const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}
@@ -731,7 +733,7 @@ const char *grpc_error_string(grpc_error *err) {
void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
- return p;
+ return (const char *)p;
}
kv_pairs kvs;
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index b76eb9e1c9..3ac12ab56f 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -145,7 +145,7 @@ static const char *kick_state_string(kick_state st) {
}
struct grpc_pollset_worker {
- kick_state kick_state;
+ kick_state state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
@@ -154,24 +154,24 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work;
};
-#define SET_KICK_STATE(worker, state) \
+#define SET_KICK_STATE(worker, kick_state) \
do { \
- (worker)->kick_state = (state); \
+ (worker)->state = (kick_state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
-#define MAX_NEIGHBOURHOODS 1024
+#define MAX_NEIGHBORHOODS 1024
-typedef struct pollset_neighbourhood {
+typedef struct pollset_neighborhood {
gpr_mu mu;
grpc_pollset *active_root;
char pad[GPR_CACHELINE_SIZE];
-} pollset_neighbourhood;
+} pollset_neighborhood;
struct grpc_pollset {
gpr_mu mu;
- pollset_neighbourhood *neighbourhood;
- bool reassigning_neighbourhood;
+ pollset_neighborhood *neighborhood;
+ bool reassigning_neighborhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
@@ -260,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@@ -280,8 +280,9 @@ static grpc_fd *fd_create(int fd, const char *name) {
#endif
gpr_free(fd_name);
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
- .data.ptr = new_fd};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.data.ptr = new_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
@@ -384,8 +385,8 @@ GPR_TLS_DECL(g_current_thread_worker);
/* The designated poller */
static gpr_atm g_active_poller;
-static pollset_neighbourhood *g_neighbourhoods;
-static size_t g_num_neighbourhoods;
+static pollset_neighborhood *g_neighborhoods;
+static size_t g_num_neighborhoods;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@@ -424,8 +425,8 @@ static worker_remove_result worker_remove(grpc_pollset *pollset,
}
}
-static size_t choose_neighbourhood(void) {
- return (size_t)gpr_cpu_current_cpu() % g_num_neighbourhoods;
+static size_t choose_neighborhood(void) {
+ return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
}
static grpc_error *pollset_global_init(void) {
@@ -435,17 +436,18 @@ static grpc_error *pollset_global_init(void) {
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = &global_wakeup_fd};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = &global_wakeup_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
&ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
- g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
- g_neighbourhoods =
- gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_init(&g_neighbourhoods[i].mu);
+ g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
+ g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
+ sizeof(*g_neighborhoods) * g_num_neighborhoods);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_init(&g_neighborhoods[i].mu);
}
return GRPC_ERROR_NONE;
}
@@ -454,17 +456,17 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_destroy(&g_neighbourhoods[i].mu);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_destroy(&g_neighborhoods[i].mu);
}
- gpr_free(g_neighbourhoods);
+ gpr_free(g_neighborhoods);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
- pollset->reassigning_neighbourhood = false;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
+ pollset->reassigning_neighborhood = false;
pollset->root_worker = NULL;
pollset->kicked_without_poller = false;
pollset->seen_inactive = true;
@@ -477,47 +479,52 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- if (pollset->neighbourhood != neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (pollset->neighborhood != neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
pollset->prev->next = pollset->next;
pollset->next->prev = pollset->prev;
- if (pollset == pollset->neighbourhood->active_root) {
- pollset->neighbourhood->active_root =
+ if (pollset == pollset->neighborhood->active_root) {
+ pollset->neighborhood->active_root =
pollset->next == pollset ? NULL : pollset->next;
}
}
- gpr_mu_unlock(&pollset->neighbourhood->mu);
+ gpr_mu_unlock(&pollset->neighborhood->mu);
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_destroy(&pollset->mu);
}
-static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
+static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
- switch (worker->kick_state) {
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ switch (worker->state) {
case KICKED:
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@@ -550,7 +557,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
GPR_TIMER_END("pollset_shutdown", 0);
}
@@ -567,7 +574,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
+ 0, /* tv_sec */
+ GPR_NS_PER_MS - 1, /* tv_nsec */
+ GPR_TIMESPAN /* clock_type */
+ };
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
@@ -646,6 +656,8 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
+ GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
}
@@ -675,77 +687,77 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
// pollset has been observed to be inactive, we need to move back to the
// active list
bool is_reassigning = false;
- if (!pollset->reassigning_neighbourhood) {
+ if (!pollset->reassigning_neighborhood) {
is_reassigning = true;
- pollset->reassigning_neighbourhood = true;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
+ pollset->reassigning_neighborhood = true;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
}
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
if (pollset->seen_inactive) {
- if (neighbourhood != pollset->neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (neighborhood != pollset->neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
/* In the brief time we released the pollset locks above, the worker MAY
have been kicked. In this case, the worker should get out of this
pollset ASAP and hence this should neither add the pollset to
- neighbourhood nor mark the pollset as active.
+ neighborhood nor mark the pollset as active.
On a side note, the only way a worker's kick state could have changed
at this point is if it were "kicked specifically". Since the worker has
not added itself to the pollset yet (by calling worker_insert()), it is
not visible in the "kick any" path yet */
- if (worker->kick_state == UNKICKED) {
+ if (worker->state == UNKICKED) {
pollset->seen_inactive = false;
- if (neighbourhood->active_root == NULL) {
- neighbourhood->active_root = pollset->next = pollset->prev = pollset;
+ if (neighborhood->active_root == NULL) {
+ neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */
- if (worker->kick_state == UNKICKED &&
+ if (worker->state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
} else {
- pollset->next = neighbourhood->active_root;
+ pollset->next = neighborhood->active_root;
pollset->prev = pollset->next->prev;
pollset->next->prev = pollset->prev->next = pollset;
}
}
}
if (is_reassigning) {
- GPR_ASSERT(pollset->reassigning_neighbourhood);
- pollset->reassigning_neighbourhood = false;
+ GPR_ASSERT(pollset->reassigning_neighborhood);
+ pollset->reassigning_neighborhood = false;
}
- gpr_mu_unlock(&neighbourhood->mu);
+ gpr_mu_unlock(&neighborhood->mu);
}
worker_insert(pollset, worker);
pollset->begin_refs--;
- if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
+ if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
- while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
+ while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
- worker->kick_state == UNKICKED) {
+ worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
@@ -758,12 +770,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller);
}
/* We release pollset lock in this function at a couple of places:
- * 1. Briefly when assigning pollset to a neighbourhood
+ * 1. Briefly when assigning pollset to a neighborhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
@@ -778,15 +790,15 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
}
GPR_TIMER_END("begin_worker", 0);
- return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
+ return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
}
-static bool check_neighbourhood_for_available_poller(
- pollset_neighbourhood *neighbourhood) {
- GPR_TIMER_BEGIN("check_neighbourhood_for_available_poller", 0);
+static bool check_neighborhood_for_available_poller(
+ grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
+ GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
- grpc_pollset *inspect = neighbourhood->active_root;
+ grpc_pollset *inspect = neighborhood->active_root;
if (inspect == NULL) {
break;
}
@@ -795,7 +807,7 @@ static bool check_neighbourhood_for_available_poller(
grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
- switch (inspect_worker->kick_state) {
+ switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
@@ -806,6 +818,7 @@ static bool check_neighbourhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@@ -831,8 +844,8 @@ static bool check_neighbourhood_for_available_poller(
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
- if (inspect == neighbourhood->active_root) {
- neighbourhood->active_root =
+ if (inspect == neighborhood->active_root) {
+ neighborhood->active_root =
inspect->next == inspect ? NULL : inspect->next;
}
inspect->next->prev = inspect->prev;
@@ -841,7 +854,7 @@ static bool check_neighbourhood_for_available_poller(
}
gpr_mu_unlock(&inspect->mu);
} while (!found_worker);
- GPR_TIMER_END("check_neighbourhood_for_available_poller", 0);
+ GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
return found_worker;
}
@@ -858,13 +871,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
- if (worker->next != worker && worker->next->kick_state == UNKICKED) {
+ if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
@@ -873,32 +887,33 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
- size_t poller_neighbourhood_idx =
- (size_t)(pollset->neighbourhood - g_neighbourhoods);
+ size_t poller_neighborhood_idx =
+ (size_t)(pollset->neighborhood - g_neighborhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
- bool scan_state[MAX_NEIGHBOURHOODS];
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- if (gpr_mu_trylock(&neighbourhood->mu)) {
+ bool scan_state[MAX_NEIGHBORHOODS];
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker =
- check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
scan_state[i] = false;
}
}
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
if (scan_state[i]) continue;
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- gpr_mu_lock(&neighbourhood->mu);
- found_worker = check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ gpr_mu_lock(&neighborhood->mu);
+ found_worker =
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@@ -979,9 +994,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *pollset,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error *ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
@@ -994,14 +1010,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
- kick_state_string(pollset->root_worker->kick_state),
+ kick_state_string(pollset->root_worker->state),
pollset->root_worker->next,
- kick_state_string(pollset->root_worker->next->kick_state));
+ kick_state_string(pollset->root_worker->next->state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
- kick_state_string(specific_worker->kick_state));
+ kick_state_string(specific_worker->state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
@@ -1014,6 +1030,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
@@ -1021,13 +1038,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
grpc_pollset_worker *next_worker = root_worker->next;
- if (root_worker->kick_state == KICKED) {
+ if (root_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
- } else if (next_worker->kick_state == KICKED) {
+ } else if (next_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
@@ -1038,13 +1057,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
- } else if (next_worker->kick_state == UNKICKED) {
+ } else if (next_worker->state == UNKICKED) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
@@ -1052,8 +1073,8 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
- } else if (next_worker->kick_state == DESIGNATED_POLLER) {
- if (root_worker->kick_state != DESIGNATED_POLLER) {
+ } else if (next_worker->state == DESIGNATED_POLLER) {
+ if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
@@ -1062,10 +1083,12 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv);
}
goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
@@ -1075,11 +1098,13 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
} else {
- GPR_ASSERT(next_worker->kick_state == KICKED);
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+ GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
goto done;
}
} else {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
@@ -1089,13 +1114,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
GPR_UNREACHABLE_CODE(goto done);
}
- if (specific_worker->kick_state == KICKED) {
+ if (specific_worker->state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
@@ -1103,6 +1129,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
@@ -1110,6 +1137,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
@@ -1117,6 +1145,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
@@ -1173,34 +1202,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
index 277347ac70..8eb4de44d9 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ b/src/core/lib/iomgr/ev_epollex_linux.c
@@ -142,7 +142,7 @@ static grpc_error *pollable_materialize(pollable *p);
*/
struct grpc_fd {
- pollable pollable;
+ pollable pollable_obj;
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
@@ -193,15 +193,15 @@ struct grpc_pollset_worker {
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
gpr_cv cv;
grpc_pollset *pollset;
- pollable *pollable;
+ pollable *pollable_obj;
};
#define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
struct grpc_pollset {
- pollable pollable;
- pollable *current_pollable;
+ pollable pollable_obj;
+ pollable *current_pollable_obj;
int kick_alls_pending;
bool kicked_without_poller;
grpc_closure *shutdown_closure;
@@ -282,7 +282,7 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
- pollable_destroy(&fd->pollable);
+ pollable_destroy(&fd->pollable_obj);
gpr_mu_destroy(&fd->orphaned_mu);
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@@ -343,7 +343,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
- pollable_init(&new_fd->pollable, PO_FD);
+ pollable_init(&new_fd->pollable_obj, PO_FD);
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
@@ -385,7 +385,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
bool is_fd_closed = already_closed;
grpc_error *error = GRPC_ERROR_NONE;
- gpr_mu_lock(&fd->pollable.po.mu);
+ gpr_mu_lock(&fd->pollable_obj.po.mu);
gpr_mu_lock(&fd->orphaned_mu);
fd->on_done_closure = on_done;
@@ -411,7 +411,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu);
- gpr_mu_unlock(&fd->pollable.po.mu);
+ gpr_mu_unlock(&fd->pollable_obj.po.mu);
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
@@ -477,8 +477,9 @@ static grpc_error *pollable_materialize(pollable *p) {
close(new_epfd);
return err;
}
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = (void *)(1 | (intptr_t)&p->wakeup)};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = (void *)(1 | (intptr_t)&p->wakeup);
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(new_epfd);
@@ -507,9 +508,9 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
gpr_mu_unlock(&fd->orphaned_mu);
return GRPC_ERROR_NONE;
}
- struct epoll_event ev_fd = {
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE),
- .data.ptr = fd};
+ struct epoll_event ev_fd;
+ ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
+ ev_fd.data.ptr = fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
case EEXIST:
@@ -557,30 +558,34 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
grpc_pollset *pollset = (grpc_pollset *)arg;
- gpr_mu_lock(&pollset->pollable.po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_lock(&worker->pollable->po.mu);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&worker->pollable_obj->po.mu);
}
if (worker->initialized_cv && worker != pollset->root_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
+ pollset, worker, &pollset->pollable_obj,
+ worker->pollable_obj);
}
worker->kicked = true;
gpr_cv_signal(&worker->cv);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
+ pollset, worker, &pollset->pollable_obj,
+ worker->pollable_obj);
}
- append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
+ append_error(&error,
+ grpc_wakeup_fd_wakeup(&worker->pollable_obj->wakeup),
"pollset_shutdown");
}
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker->pollable_obj->po.mu);
}
worker = worker->links[PWL_POLLSET].next;
@@ -588,7 +593,7 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
}
pollset->kick_alls_pending--;
pollset_maybe_finish_shutdown(exec_ctx, pollset);
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("kick_all", error);
}
@@ -662,26 +667,27 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
}
/* p->po.mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *pollset,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- pollable *p = pollset->current_pollable;
- if (p != &pollset->pollable) {
+ pollable *p = pollset->current_pollable_obj;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ if (p != &pollset->pollable_obj) {
gpr_mu_lock(&p->po.mu);
}
grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
- if (p != &pollset->pollable) {
+ if (p != &pollset->pollable_obj) {
gpr_mu_unlock(&p->po.mu);
}
return error;
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- pollable_init(&pollset->pollable, PO_POLLSET);
- pollset->current_pollable = &g_empty_pollable;
+ pollable_init(&pollset->pollable_obj, PO_POLLSET);
+ pollset->current_pollable_obj = &g_empty_pollable;
pollset->kicked_without_poller = false;
pollset->shutdown_closure = NULL;
pollset->root_worker = NULL;
- *mu = &pollset->pollable.po.mu;
+ *mu = &pollset->pollable_obj.po.mu;
}
/* Convert a timespec to milliseconds:
@@ -703,7 +709,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
+ 0, /* tv_sec */
+ GPR_NS_PER_MS - 1, /* tv_nsec */
+ GPR_TIMESPAN /* clock_type */
+ };
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
@@ -729,8 +738,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "fd_become_pollable";
- if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) {
- append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc);
+ if (append_error(&error, pollable_materialize(&fd->pollable_obj), err_desc)) {
+ append_error(&error, pollable_add_fd(&fd->pollable_obj, fd), err_desc);
}
return error;
}
@@ -745,7 +754,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
- return p != &g_empty_pollable && p != &pollset->pollable;
+ return p != &g_empty_pollable && p != &pollset->pollable_obj;
}
static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
@@ -762,8 +771,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
- append_error(&error, grpc_wakeup_fd_consume_wakeup(
- (void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ append_error(&error,
+ grpc_wakeup_fd_consume_wakeup(
+ (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
err_desc);
} else {
grpc_fd *fd = (grpc_fd *)data_ptr;
@@ -790,9 +800,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- pollable_destroy(&pollset->pollable);
- if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
+ pollable_destroy(&pollset->pollable_obj);
+ if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) {
+ UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2,
"pollset_pollable");
}
GRPC_LOG_IF_ERROR("pollset_process_events",
@@ -882,68 +892,69 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker->initialized_cv = false;
worker->kicked = false;
worker->pollset = pollset;
- worker->pollable = pollset->current_pollable;
+ worker->pollable_obj = pollset->current_pollable_obj;
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
+ if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
+ REF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
- if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
+ if (!worker_insert(&worker->pollable_obj->root_worker, PWL_POLLABLE,
+ worker)) {
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
}
if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
+ worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
- worker->pollable, worker,
+ worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline, *now));
}
- while (do_poll && worker->pollable->root_worker != worker) {
- if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
+ while (do_poll && worker->pollable_obj->root_worker != worker) {
+ if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
- worker->pollable, worker);
+ worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
- worker);
+ gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
+ worker->pollable_obj, worker);
}
do_poll = false;
} else if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
+ worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
- worker->pollable, worker);
+ worker->pollable_obj, worker);
}
}
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
- gpr_mu_lock(&pollset->pollable.po.mu);
- gpr_mu_lock(&worker->pollable->po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker->pollable_obj->po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
+ gpr_mu_lock(&worker->pollable_obj->po.mu);
}
*now = gpr_now(now->clock_type);
}
return do_poll && pollset->shutdown_closure == NULL &&
- pollset->current_pollable == worker->pollable;
+ pollset->current_pollable_obj == worker->pollable_obj;
}
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
if (NEW_ROOT ==
- worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
- gpr_cv_signal(&worker->pollable->root_worker->cv);
+ worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) {
+ gpr_cv_signal(&worker->pollable_obj->root_worker->cv);
}
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
+ if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
+ UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset);
@@ -971,41 +982,41 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE;
}
- if (pollset->current_pollable != &pollset->pollable) {
- gpr_mu_lock(&pollset->current_pollable->po.mu);
+ if (pollset->current_pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&pollset->current_pollable_obj->po.mu);
}
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
- append_error(&error, pollable_materialize(worker.pollable), err_desc);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
+ append_error(&error, pollable_materialize(worker.pollable_obj), err_desc);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
if (pollset->event_cursor == pollset->event_count) {
- append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable,
+ append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj,
now, deadline),
err_desc);
}
append_error(&error, pollset_process_events(exec_ctx, pollset, false),
err_desc);
- gpr_mu_lock(&pollset->pollable.po.mu);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_lock(&worker.pollable->po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&worker.pollable_obj->po.mu);
}
gpr_tls_set(&g_current_thread_pollset, 0);
gpr_tls_set(&g_current_thread_worker, 0);
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
end_worker(exec_ctx, pollset, &worker, worker_hdl);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
if (grpc_exec_ctx_has_work(exec_ctx)) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->pollable.po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
}
return error;
}
@@ -1022,7 +1033,7 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
bool fd_locked) {
static const char *err_desc = "pollset_add_fd";
grpc_error *error = GRPC_ERROR_NONE;
- if (pollset->current_pollable == &g_empty_pollable) {
+ if (pollset->current_pollable_obj == &g_empty_pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
@@ -1030,19 +1041,19 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
}
/* empty pollable --> single fd pollable */
pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &fd->pollable;
- if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
+ pollset->current_pollable_obj = &fd->pollable_obj;
+ if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu);
append_error(&error, fd_become_pollable_locked(fd), err_desc);
- if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
+ if (!fd_locked) gpr_mu_unlock(&fd->pollable_obj.po.mu);
REF_BY(fd, 2, "pollset_pollable");
- } else if (pollset->current_pollable == &pollset->pollable) {
+ } else if (pollset->current_pollable_obj == &pollset->pollable_obj) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
}
- append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
+ append_error(&error, pollable_add_fd(pollset->current_pollable_obj, fd),
err_desc);
- } else if (pollset->current_pollable != &fd->pollable) {
- grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
+ } else if (pollset->current_pollable_obj != &fd->pollable_obj) {
+ grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable_obj;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
@@ -1054,11 +1065,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &pollset->pollable;
- if (append_error(&error, pollable_materialize(&pollset->pollable),
+ pollset->current_pollable_obj = &pollset->pollable_obj;
+ if (append_error(&error, pollable_materialize(&pollset->pollable_obj),
err_desc)) {
- pollable_add_fd(&pollset->pollable, had_fd);
- pollable_add_fd(&pollset->pollable, fd);
+ pollable_add_fd(&pollset->pollable_obj, had_fd);
+ pollable_add_fd(&pollset->pollable_obj, fd);
}
GRPC_CLOSURE_SCHED(exec_ctx,
GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
@@ -1070,9 +1081,9 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
- gpr_mu_lock(&pollset->pollable.po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
@@ -1094,7 +1105,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
- po_join(exec_ctx, &pss->po, &fd->pollable.po);
+ po_join(exec_ctx, &pss->po, &fd->pollable_obj.po);
}
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
@@ -1102,7 +1113,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
- po_join(exec_ctx, &pss->po, &ps->pollable.po);
+ po_join(exec_ctx, &pss->po, &ps->pollable_obj.po);
}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
@@ -1385,34 +1396,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_epollex_linux(
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c
index b88c3ba111..4d8bdf1401 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.c
@@ -1021,10 +1021,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
}
/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) {
@@ -1132,7 +1133,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
+ grpc_pollset *ps,
+ const char *reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
@@ -1158,7 +1160,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we
@@ -1670,34 +1672,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 6a083f8ade..e170702dca 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -209,7 +209,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
@@ -365,36 +365,39 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier;
}
-static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
+static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ grpc_error *err =
+ pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
-static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
+static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next);
+ pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
-static void wake_all_watchers_locked(grpc_fd *fd) {
+static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher);
+ pollset_kick_locked(exec_ctx, watcher);
}
if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
@@ -435,7 +438,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
if (!has_watchers(fd)) {
close_fd_locked(exec_ctx, fd);
} else {
- wake_all_watchers_locked(fd);
+ wake_all_watchers_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
@@ -479,7 +482,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@@ -648,7 +651,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
}
if (kick) {
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
}
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
close_fd_locked(exec_ctx, fd);
@@ -712,11 +715,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
if (specific_worker != NULL) {
@@ -782,9 +786,9 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
- return pollset_kick_ext(p, specific_worker, 0);
+ return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
@@ -847,7 +851,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
exit:
gpr_mu_unlock(&pollset->mu);
}
@@ -989,6 +993,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
+ }
+
if (r < 0) {
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
@@ -1009,6 +1017,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
+ }
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@@ -1016,6 +1027,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
+ pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
+ (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
+ }
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@@ -1071,7 +1087,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* check shutdown conditions */
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
@@ -1100,7 +1116,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}
@@ -1527,7 +1543,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- idx = FD_TO_IDX(fds[i].fd);
+ idx = GRPC_FD_TO_IDX(fds[i].fd);
fd_cvs[i].cv = &pollcv_cv;
fd_cvs[i].prev = NULL;
fd_cvs[i].next = g_cvfds.cvfds[idx].cvs;
@@ -1590,8 +1606,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- remove_cvn(&g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
- if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
+ remove_cvn(&g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
+ if (g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
@@ -1676,34 +1692,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index d881e2d4dd..4d3ae2228e 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -210,9 +210,9 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- return g_event_engine->pollset_kick(pollset, specific_worker);
+ return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 1108e46ef8..1ff2ff1413 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -54,7 +54,7 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
gpr_timespec deadline);
- grpc_error *(*pollset_kick)(grpc_pollset *pollset,
+ grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index eb8d55678a..2439f15a8a 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -32,15 +32,14 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
-#define MAX_DEPTH 2
-
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
- size_t depth;
bool shutdown;
+ bool queued_long_job;
gpr_thd_id id;
+ grpc_closure_list local_elems;
} thread_state;
static thread_state *g_thread_state;
@@ -50,25 +49,40 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
+static grpc_tracer_flag executor_trace =
+ GRPC_TRACER_INITIALIZER(false, "executor");
+
static void executor_thread(void *arg);
-static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
- size_t n = 0;
+static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+ int n = 0; // number of closures executed
- grpc_closure *c = list.head;
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+ while (!grpc_closure_list_empty(*list)) {
+ grpc_closure *c = list->head;
+ grpc_closure_list_init(list);
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
+ c->file_created, c->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
+#endif
+ }
#ifndef NDEBUG
- c->scheduled = false;
+ c->scheduled = false;
#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- n++;
+ n++;
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ grpc_exec_ctx_flush(exec_ctx);
+ }
}
- return n;
+ GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
}
bool grpc_executor_is_threaded() {
@@ -113,7 +127,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
- run_closures(exec_ctx, g_thread_state[i].elems);
+ run_closures(exec_ctx, &g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
@@ -121,6 +135,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+ grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@@ -136,67 +151,160 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
- size_t subtract_depth = 0;
+ GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
+
+ bool used = false;
for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
+ }
gpr_mu_lock(&ts->mu);
- ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+ ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
+ (int)(ts - g_thread_state));
+ }
gpr_mu_unlock(&ts->mu);
break;
}
+ if (!used) {
+ GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
+ used = true;
+ }
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
- grpc_closure_list exec = ts->elems;
+ GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
+ ts->local_elems = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
+ }
- subtract_depth = run_closures(&exec_ctx, exec);
- grpc_exec_ctx_flush(&exec_ctx);
+ run_closures(&exec_ctx, &ts->local_elems);
}
grpc_exec_ctx_finish(&exec_ctx);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx);
- if (cur_thread_count == 0) {
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- return;
- }
- thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
- if (ts == NULL) {
- ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ grpc_error *error, bool is_short) {
+ bool retry_push;
+ if (is_short) {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
} else {
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
- gpr_mu_lock(&ts->mu);
- if (grpc_closure_list_empty(ts->elems)) {
- GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
- gpr_cv_signal(&ts->cv);
- }
- grpc_closure_list_append(&ts->elems, closure, error);
- ts->depth++;
- bool try_new_thread = ts->depth > MAX_DEPTH &&
- cur_thread_count < g_max_threads && !ts->shutdown;
- gpr_mu_unlock(&ts->mu);
- if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
- cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count < g_max_threads) {
- gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
-
- gpr_thd_options opt = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&opt);
- gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
- &g_thread_state[cur_thread_count], &opt);
+ do {
+ retry_push = false;
+ size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count == 0) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
+ closure, closure->file_created, closure->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
+#endif
+ }
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ return;
}
- gpr_spinlock_unlock(&g_adding_thread_lock);
- }
+ thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ if (ts == NULL) {
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+ if (is_short) {
+ grpc_closure_list_append(&ts->local_elems, closure, error);
+ return;
+ }
+ }
+ thread_state *orig_ts = ts;
+
+ bool try_new_thread;
+ for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(
+ GPR_DEBUG,
+ "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
+ closure, is_short ? "short" : "long", closure->file_created,
+ closure->line_created, (int)(ts - g_thread_state));
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
+ closure, is_short ? "short" : "long",
+ (int)(ts - g_thread_state));
+#endif
+ }
+ gpr_mu_lock(&ts->mu);
+ if (ts->queued_long_job) {
+ // if there's a long job queued, we never queue anything else to this
+ // queue (since long jobs can take 'infinite' time and we need to
+ // guarantee no starvation)
+ // ... spin through queues and try again
+ gpr_mu_unlock(&ts->mu);
+ size_t idx = (size_t)(ts - g_thread_state);
+ ts = &g_thread_state[(idx + 1) % cur_thread_count];
+ if (ts == orig_ts) {
+ retry_push = true;
+ try_new_thread = true;
+ break;
+ }
+ continue;
+ }
+ if (grpc_closure_list_empty(ts->elems)) {
+ GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
+ gpr_cv_signal(&ts->cv);
+ }
+ grpc_closure_list_append(&ts->elems, closure, error);
+ try_new_thread = ts->elems.head != closure &&
+ cur_thread_count < g_max_threads && !ts->shutdown;
+ if (!is_short) ts->queued_long_job = true;
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
+ cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count < g_max_threads) {
+ gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
+ &g_thread_state[cur_thread_count], &opt);
+ }
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ }
+ if (retry_push) {
+ GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
+ }
+ } while (retry_push);
+}
+
+static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, true);
+}
+
+static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, false);
}
-static const grpc_closure_scheduler_vtable executor_vtable = {
- executor_push, executor_push, "executor"};
-static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
-grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
+static const grpc_closure_scheduler_vtable executor_vtable_short = {
+ executor_push_short, executor_push_short, "executor"};
+static grpc_closure_scheduler executor_scheduler_short = {
+ &executor_vtable_short};
+
+static const grpc_closure_scheduler_vtable executor_vtable_long = {
+ executor_push_long, executor_push_long, "executor"};
+static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
+
+grpc_closure_scheduler *grpc_executor_scheduler(
+ grpc_executor_job_length length) {
+ return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
+ : &executor_scheduler_long;
+}
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index c3382a0a12..0412c02790 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -21,6 +21,11 @@
#include "src/core/lib/iomgr/closure.h"
+typedef enum {
+ GRPC_EXECUTOR_SHORT,
+ GRPC_EXECUTOR_LONG
+} grpc_executor_job_length;
+
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@@ -28,7 +33,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
-extern grpc_closure_scheduler *grpc_executor_scheduler;
+grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 1feea6d628..f63f190155 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -50,7 +50,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
- g_root_object.name = "root";
+ g_root_object.name = (char *)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}
diff --git a/src/core/lib/iomgr/is_epollexclusive_available.c b/src/core/lib/iomgr/is_epollexclusive_available.c
index e8a7d4d52c..d08844c0df 100644
--- a/src/core/lib/iomgr/is_epollexclusive_available.c
+++ b/src/core/lib/iomgr/is_epollexclusive_available.c
@@ -57,12 +57,12 @@ bool grpc_is_epollexclusive_available(void) {
close(fd);
return false;
}
- struct epoll_event ev = {
- /* choose events that should cause an error on
- EPOLLEXCLUSIVE enabled kernels - specifically the combination of
- EPOLLONESHOT and EPOLLEXCLUSIVE */
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT),
- .data.ptr = NULL};
+ struct epoll_event ev;
+ /* choose events that should cause an error on
+ EPOLLEXCLUSIVE enabled kernels - specifically the combination of
+ EPOLLONESHOT and EPOLLEXCLUSIVE */
+ ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
+ ev.data.ptr = NULL;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
if (!logged_why_not) {
diff --git a/src/core/lib/iomgr/polling_entity.c b/src/core/lib/iomgr/polling_entity.c
index 74d8794af5..8591a5518e 100644
--- a/src/core/lib/iomgr/polling_entity.c
+++ b/src/core/lib/iomgr/polling_entity.c
@@ -25,7 +25,7 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set *pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
- pollent.tag = POPS_POLLSET_SET;
+ pollent.tag = GRPC_POLLS_POLLSET_SET;
return pollent;
}
@@ -33,12 +33,12 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset(
grpc_pollset *pollset) {
grpc_polling_entity pollent;
pollent.pollent.pollset = pollset;
- pollent.tag = POPS_POLLSET;
+ pollent.tag = GRPC_POLLS_POLLSET;
return pollent;
}
grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
return pollent->pollent.pollset;
}
return NULL;
@@ -46,23 +46,23 @@ grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
grpc_pollset_set *grpc_polling_entity_pollset_set(
grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET_SET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
return pollent->pollent.pollset_set;
}
return NULL;
}
bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
- return pollent->tag == POPS_NONE;
+ return pollent->tag == GRPC_POLLS_NONE;
}
void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
@@ -75,10 +75,10 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 971fd88b42..a161e1fea6 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -22,6 +22,12 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+typedef enum grpc_pollset_tag {
+ GRPC_POLLS_NONE,
+ GRPC_POLLS_POLLSET,
+ GRPC_POLLS_POLLSET_SET
+} grpc_pollset_tag;
+
/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
@@ -31,7 +37,7 @@ typedef struct grpc_polling_entity {
grpc_pollset *pollset;
grpc_pollset_set *pollset_set;
} pollent;
- enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag;
+ grpc_pollset_tag tag;
} grpc_polling_entity;
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index a609a3877a..a0f6b3a9d3 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -76,7 +76,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT;
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index a79fe89d3e..2651325e25 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -145,7 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index ea017a6054..eb295d3eeb 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -98,7 +98,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
pollset->shutting_down = 1;
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
@@ -181,7 +181,7 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *p,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@@ -209,7 +209,7 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
- grpc_pollset_kick(p, specific_worker);
+ grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index b515b8f1e6..60cfeebd47 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) {
/* Retry if well-known service name is recognized */
- char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
@@ -177,7 +177,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addrs) {
request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 45cfd7248d..0cb0029f4e 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -159,7 +159,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 6c58986b53..4d69986fbc 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -656,7 +657,7 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@@ -666,12 +667,12 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
}
static void *rq_copy(void *rq) {
- grpc_resource_quota_ref(rq);
+ grpc_resource_quota_ref((grpc_resource_quota *)rq);
return rq;
}
static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) {
- grpc_resource_quota_unref_internal(exec_ctx, rq);
+ grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq);
}
static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
diff --git a/src/core/lib/iomgr/socket_factory_posix.c b/src/core/lib/iomgr/socket_factory_posix.c
index 0f82dea570..8e907703ae 100644
--- a/src/core/lib/iomgr/socket_factory_posix.c
+++ b/src/core/lib/iomgr/socket_factory_posix.c
@@ -69,11 +69,11 @@ void grpc_socket_factory_unref(grpc_socket_factory *factory) {
}
static void *socket_factory_arg_copy(void *p) {
- return grpc_socket_factory_ref(p);
+ return grpc_socket_factory_ref((grpc_socket_factory *)p);
}
static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_factory_unref(p);
+ grpc_socket_factory_unref((grpc_socket_factory *)p);
}
static int socket_factory_cmp(void *a, void *b) {
@@ -85,8 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory,
- &socket_factory_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
+ factory, &socket_factory_arg_vtable);
}
#endif
diff --git a/src/core/lib/iomgr/socket_mutator.c b/src/core/lib/iomgr/socket_mutator.c
index 5d6c2c400e..b0435d5a07 100644
--- a/src/core/lib/iomgr/socket_mutator.c
+++ b/src/core/lib/iomgr/socket_mutator.c
@@ -60,11 +60,11 @@ void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
}
static void *socket_mutator_arg_copy(void *p) {
- return grpc_socket_mutator_ref(p);
+ return grpc_socket_mutator_ref((grpc_socket_mutator *)p);
}
static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_mutator_unref(p);
+ grpc_socket_mutator_unref((grpc_socket_mutator *)p);
}
static int socket_mutator_cmp(void *a, void *b) {
@@ -76,6 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator,
- &socket_mutator_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
+ mutator, &socket_mutator_arg_vtable);
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index ba0be9116f..7e271294fd 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -43,6 +43,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -90,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
- grpc_closure read_closure;
- grpc_closure write_closure;
+ grpc_closure read_done_closure;
+ grpc_closure write_done_closure;
char *peer_string;
@@ -99,6 +100,148 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+typedef struct backup_poller {
+ gpr_mu *pollset_mu;
+ grpc_closure run_poller;
+} backup_poller;
+
+#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
+
+static gpr_atm g_uncovered_notifications_pending;
+static gpr_atm g_backup_poller; /* backup_poller* */
+
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
+ }
+ grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
+ gpr_free(p);
+}
+
+static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
+ }
+ gpr_mu_lock(p->pollset_mu);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec deadline =
+ gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
+ GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
+ GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
+ now, deadline));
+ gpr_mu_unlock(p->pollset_mu);
+ /* last "uncovered" notification is the ref that keeps us polling, if we get
+ * there try a cas to release it */
+ if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
+ gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
+ gpr_mu_lock(p->pollset_mu);
+ bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
+ }
+ gpr_mu_unlock(p->pollset_mu);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
+ }
+ grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
+ GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
+ grpc_schedule_on_exec_ctx));
+ } else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
+ }
+}
+
+static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
+ (int)old_count - 1);
+ }
+ GPR_ASSERT(old_count != 1);
+}
+
+static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p;
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
+ 2 + (int)old_count);
+ }
+ if (old_count == 0) {
+ GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
+ p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size());
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
+ }
+ grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
+ gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
+ grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
+ GRPC_ERROR_NONE);
+ } else {
+ while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
+ // spin waiting for backup poller
+ }
+ }
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
+ }
+ grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+ if (old_count != 0) {
+ drop_uncovered(exec_ctx, tcp);
+ }
+}
+
+static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
+ }
+ GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
+}
+
+static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
+ }
+ cover_self(exec_ctx, tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
+}
+
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
+ }
+ drop_uncovered(exec_ctx, (grpc_tcp *)arg);
+ tcp_handle_write(exec_ctx, arg, error);
+}
+
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@@ -214,6 +357,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -271,7 +415,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@@ -308,6 +452,10 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)tcpp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
+ grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@@ -323,9 +471,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
+ }
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
+ }
tcp_do_read(exec_ctx, tcp);
}
}
@@ -334,6 +488,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@@ -357,9 +514,9 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
@@ -472,7 +629,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@@ -525,7 +682,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@@ -602,7 +759,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@@ -631,10 +788,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index c3ec3e447a..06612d639c 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -198,12 +198,12 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
-
+ grpc_pollset *read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
- grpc_pollset *read_notifier_pollset =
+ read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count];
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index c08bb525b7..e9a7236c8c 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -95,9 +95,7 @@ struct shared_mutables {
gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
-static struct shared_mutables g_shared_mutables = {
- .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
-};
+static struct shared_mutables g_shared_mutables;
static gpr_clock_type g_clock_type;
static gpr_timespec g_start_time;
@@ -155,6 +153,7 @@ void grpc_timer_list_init(gpr_timespec now) {
uint32_t i;
g_shared_mutables.initialized = true;
+ g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
g_clock_type = now.clock_type;
g_start_time = now;
diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.c
index ae2c0bf0ae..04ca44563d 100644
--- a/src/core/lib/iomgr/timer_manager.c
+++ b/src/core/lib/iomgr/timer_manager.c
@@ -276,7 +276,7 @@ static void timer_thread(void *completed_thread_ptr) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
timer_main_loop(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- timer_thread_cleanup(completed_thread_ptr);
+ timer_thread_cleanup((completed_thread *)completed_thread_ptr);
}
static void start_threads(void) {
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 9a02c1d1bb..00b2e68bb5 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -118,7 +118,7 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
- return arg->value.pointer.p;
+ return (grpc_socket_factory *)arg->value.pointer.p;
}
}
return NULL;
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.c
index 5e0b1d1704..268e0175dd 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.c
+++ b/src/core/lib/iomgr/wakeup_fd_cv.c
@@ -57,7 +57,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = NULL;
g_cvfds.cvfds[idx].is_set = 0;
- fd_info->read_fd = IDX_TO_FD(idx);
+ fd_info->read_fd = GRPC_IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
@@ -66,8 +66,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
- cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
+ cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
@@ -78,7 +78,7 @@ static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
@@ -89,9 +89,9 @@ static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
- GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
- g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
+ GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
index 46e84f5843..dc170ad5b4 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.h
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -37,8 +37,8 @@
#include "src/core/lib/iomgr/ev_posix.h"
-#define FD_TO_IDX(fd) (-(fd)-1)
-#define IDX_TO_FD(idx) (-(idx)-1)
+#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
+#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
typedef struct cv_node {
gpr_cv* cv;
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index a2a8e289ee..691d66df69 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -79,7 +79,8 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
detector->is_done = 1;
GRPC_LOG_IF_ERROR(
"Pollset kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL));
+ grpc_pollset_kick(exec_ctx,
+ grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu);
}
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index a7568b995f..2a9e939d40 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_server_handshaker_factory *handshaker_factory;
+ tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
- if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
+ c->client_handshaker_factory = NULL;
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc);
@@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- if (c->handshaker_factory != NULL) {
- tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
+ c->server_handshaker_factory = NULL;
gpr_free(sc);
}
@@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
- c->handshaker_factory,
+ c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name,
&tsi_hs);
@@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
- c->handshaker_factory, &tsi_hs);
+ c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
@@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
result = tsi_create_ssl_client_handshaker_factory(
has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->client_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
config->pem_root_certs, get_tsi_client_certificate_request_type(
config->client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->server_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index ea9608f444..3d19605617 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -128,25 +128,23 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = arg;
- gpr_mu_lock(&h->mu);
+static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
- goto done;
+ return;
}
// Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
- h->handshaker_result, NULL, &zero_copy_protector);
+ exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
error = grpc_set_tsi_error_result(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
}
// Create frame protector if zero-copy frame protector is NULL.
tsi_frame_protector *protector = NULL;
@@ -158,7 +156,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
}
}
// Get unused bytes.
@@ -192,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
-done:
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = (security_handshaker *)arg;
+ gpr_mu_lock(&h->mu);
+ on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@@ -254,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
- security_handshaker *h = user_data;
+ security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -296,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -313,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
- h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
+ h->handshake_buffer =
+ (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@@ -338,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -415,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
- security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
+ security_handshaker *h =
+ (security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@@ -465,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
- grpc_handshaker *h = gpr_malloc(sizeof(*h));
+ grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}
diff --git a/src/core/lib/slice/slice.c b/src/core/lib/slice/slice.c
index 321a21a10b..0764eda052 100644
--- a/src/core/lib/slice/slice.c
+++ b/src/core/lib/slice/slice.c
@@ -174,8 +174,8 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = {
grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t)) {
grpc_slice slice;
- new_with_len_slice_refcount *rc =
- gpr_malloc(sizeof(new_with_len_slice_refcount));
+ new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc(
+ sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_with_len_vtable;
rc->rc.sub_refcount = &rc->rc;
diff --git a/src/core/lib/support/log_linux.c b/src/core/lib/support/log_linux.c
index 61d2346427..7755018693 100644
--- a/src/core/lib/support/log_linux.c
+++ b/src/core/lib/support/log_linux.c
@@ -57,7 +57,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
}
void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+ const char *final_slash;
char *prefix;
const char *display_file;
char time_buffer[64];
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index 523e43445b..6b172df82f 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -276,7 +276,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs,
void gpr_string_split(const char *input, const char *sep, char ***strs,
size_t *nstrs) {
- char *next;
+ const char *next;
*strs = NULL;
*nstrs = 0;
size_t capstrs = 0;
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 93c512df69..03f47553a1 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -158,7 +158,7 @@ struct grpc_call {
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
- child_call *child_call;
+ child_call *child;
/* client or server call */
bool is_client;
@@ -293,11 +293,11 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled);
-static void add_init_error(grpc_error **composite, grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+static void add_init_error(grpc_error **composite, grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
- *composite = grpc_error_add_child(*composite, new);
+ *composite = grpc_error_add_child(*composite, new_err);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
@@ -330,8 +330,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- gpr_arena *arena =
- gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
+ size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
+ GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
+ gpr_arena *arena = gpr_arena_create(initial_size);
call = (grpc_call *)gpr_arena_alloc(
arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
@@ -377,24 +378,24 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
- if (args->parent_call != NULL) {
- child_call *cc = call->child_call =
- gpr_arena_alloc(arena, sizeof(child_call));
- call->child_call->parent = args->parent_call;
+ if (args->parent != NULL) {
+ child_call *cc = call->child =
+ (child_call *)gpr_arena_alloc(arena, sizeof(child_call));
+ call->child->parent = args->parent;
- GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
+ GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!args->parent_call->is_client);
+ GPR_ASSERT(!args->parent->is_client);
- parent_call *pc = get_or_create_parent_call(args->parent_call);
+ parent_call *pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
- args->parent_call->send_deadline.clock_type),
- args->parent_call->send_deadline);
+ args->parent->send_deadline.clock_type),
+ args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@@ -406,9 +407,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
"Census tracing propagation requested "
"without Census context propagation"));
}
- grpc_call_context_set(
- call, GRPC_CONTEXT_TRACING,
- args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ args->parent->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Census context propagation requested "
@@ -416,7 +417,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
- if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
+ if (gpr_atm_acq_load(&args->parent->received_final_op_atm)) {
immediately_cancel = true;
}
}
@@ -426,9 +427,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cc->sibling_next = cc->sibling_prev = call;
} else {
cc->sibling_next = pc->first_child;
- cc->sibling_prev = pc->first_child->child_call->sibling_prev;
- cc->sibling_next->child_call->sibling_prev =
- cc->sibling_prev->child_call->sibling_next = call;
+ cc->sibling_prev = pc->first_child->child->sibling_prev;
+ cc->sibling_next->child->sibling_prev =
+ cc->sibling_prev->child->sibling_next = call;
}
gpr_mu_unlock(&pc->child_list_mu);
@@ -549,7 +550,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
- get_final_status(call, set_status_value_directly, &c->final_info.final_status,
+ get_final_status(c, set_status_value_directly, &c->final_info.final_status,
NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@@ -570,7 +571,7 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
- child_call *cc = c->child_call;
+ child_call *cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
@@ -585,8 +586,8 @@ void grpc_call_unref(grpc_call *c) {
pc->first_child = NULL;
}
}
- cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
- cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
+ cc->sibling_prev->child->sibling_next = cc->sibling_next;
+ cc->sibling_next->child->sibling_prev = cc->sibling_prev;
gpr_mu_unlock(&pc->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
@@ -1316,7 +1317,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
child = pc->first_child;
if (child != NULL) {
do {
- next_child_call = child->child_call->sibling_next;
+ next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
@@ -1345,7 +1346,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
+ GRPC_CLOSURE_RUN(
+ exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1474,7 +1476,7 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
- process_data_after_md(exec_ctx, bctlp);
+ process_data_after_md(exec_ctx, bctl);
}
}
@@ -1510,7 +1512,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
algo_name);
@@ -1524,7 +1526,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
call->incoming_stream_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(
call->incoming_stream_compression_algorithm, &algo_name);
gpr_log(
@@ -1551,7 +1553,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@@ -1567,7 +1569,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->encodings_accepted_by_peer,
call->incoming_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(call->incoming_compression_algorithm,
&algo_name);
gpr_log(GPR_ERROR,
@@ -1672,6 +1674,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
batch_control *bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
+ grpc_transport_stream_op_batch *stream_op;
+ grpc_transport_stream_op_batch_payload *stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1679,11 +1683,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
if (!is_notify_tag_closure) {
GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
- grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
- free_no_op_completion, NULL,
- gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1697,9 +1702,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->completion_data.notify_tag.is_closure =
(uint8_t)(is_notify_tag_closure != 0);
- grpc_transport_stream_op_batch *stream_op = &bctl->op;
- grpc_transport_stream_op_batch_payload *stream_op_payload =
- &call->stream_op_payload;
+ stream_op = &bctl->op;
+ stream_op_payload = &call->stream_op_payload;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@@ -1709,7 +1713,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
switch (op->op) {
- case GRPC_OP_SEND_INITIAL_METADATA:
+ case GRPC_OP_SEND_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (!are_initial_metadata_flags_valid(op->flags, call->is_client)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1803,7 +1807,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->peer_string;
}
break;
- case GRPC_OP_SEND_MESSAGE:
+ }
+ case GRPC_OP_SEND_MESSAGE: {
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
goto done_with_error;
@@ -1832,7 +1837,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_message.send_message =
&call->sending_stream.base;
break;
- case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ }
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1851,7 +1857,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ }
+ case GRPC_OP_SEND_STATUS_FROM_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1913,7 +1920,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_RECV_INITIAL_METADATA:
+ }
+ case GRPC_OP_RECV_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1940,7 +1948,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_MESSAGE:
+ }
+ case GRPC_OP_RECV_MESSAGE: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1961,7 +1970,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ }
+ case GRPC_OP_RECV_STATUS_ON_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1988,7 +1998,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
- case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ }
+ case GRPC_OP_RECV_CLOSE_ON_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -2012,6 +2023,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
+ }
}
}
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index d537637cbb..c680139cf6 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -37,7 +37,7 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
typedef struct grpc_call_create_args {
grpc_channel *channel;
- grpc_call *parent_call;
+ grpc_call *parent;
uint32_t propagation_mask;
grpc_completion_queue *cq;
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 34548dac26..48962e5e45 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -77,6 +78,11 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
+ if (channel_stack_type == GRPC_SERVER_CHANNEL) {
+ GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
+ } else {
+ GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
+ }
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);
@@ -276,7 +282,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
- args.parent_call = parent_call;
+ args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 4726503994..fed66e3a20 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -26,6 +26,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@@ -54,7 +55,7 @@ typedef struct {
bool can_listen;
size_t (*size)(void);
void (*init)(grpc_pollset *pollset, gpr_mu **mu);
- grpc_error *(*kick)(grpc_pollset *pollset,
+ grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
@@ -130,7 +131,8 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
}
static grpc_error *non_polling_poller_kick(
- grpc_pollset *pollset, grpc_pollset_worker *specific_worker) {
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
non_polling_poller *p = (non_polling_poller *)pollset;
if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
if (specific_worker != NULL) {
@@ -327,25 +329,12 @@ static void cq_destroy_pluck(void *data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
/* GRPC_CQ_NEXT */
- {.data_size = sizeof(cq_next_data),
- .cq_completion_type = GRPC_CQ_NEXT,
- .init = cq_init_next,
- .shutdown = cq_shutdown_next,
- .destroy = cq_destroy_next,
- .begin_op = cq_begin_op_for_next,
- .end_op = cq_end_op_for_next,
- .next = cq_next,
- .pluck = NULL},
+ {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
+ cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL},
/* GRPC_CQ_PLUCK */
- {.data_size = sizeof(cq_pluck_data),
- .cq_completion_type = GRPC_CQ_PLUCK,
- .init = cq_init_pluck,
- .shutdown = cq_shutdown_pluck,
- .destroy = cq_destroy_pluck,
- .begin_op = cq_begin_op_for_pluck,
- .end_op = cq_end_op_for_pluck,
- .next = NULL,
- .pluck = cq_pluck},
+ {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
+ cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL,
+ cq_pluck},
};
#define DATA_FROM_CQ(cq) ((void *)(cq + 1))
@@ -420,6 +409,10 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+
cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
vtable->data_size +
poller_vtable->size());
@@ -560,13 +553,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
* true if the increment was successful; false if the counter is zero */
static bool atm_inc_if_nonzero(gpr_atm *counter) {
while (true) {
- gpr_atm count = gpr_atm_no_barrier_load(counter);
+ gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
* increment) to maintain the contract: do not increment the counter if it
* is zero. */
if (count == 0) {
return false;
- } else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) {
+ } else if (gpr_atm_full_cas(counter, count, count + 1)) {
break;
}
}
@@ -575,12 +568,12 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
}
static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
@@ -625,7 +618,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
}
}
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@@ -638,15 +631,19 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Add the completion to the queue */
bool is_first = cq_event_queue_push(&cqd->queue, storage);
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
- bool will_definitely_shutdown =
- gpr_atm_no_barrier_load(&cqd->pending_events) == 1;
+
+ /* Since we do not hold the cq lock here, it is important to do an 'acquire'
+ load here (instead of a 'no_barrier' load) to match with the release store
+ (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
+ */
+ bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
if (!will_definitely_shutdown) {
/* Only kick if this is the first item queued */
if (is_first) {
gpr_mu_lock(cq->mu);
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
@@ -686,7 +683,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
void *done_arg,
grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@@ -732,7 +729,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
@@ -769,7 +766,7 @@ typedef struct {
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -820,7 +817,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
gpr_timespec now;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -883,7 +880,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
}
- if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) {
+ if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
/* Before returning, check if the queue has any items left over (since
gpr_mpscq_pop() can sometimes return NULL even if the queue is not
empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
@@ -929,9 +926,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
if (cq_event_queue_num_items(&cqd->queue) > 0 &&
- gpr_atm_no_barrier_load(&cqd->pending_events) > 0) {
+ gpr_atm_acq_load(&cqd->pending_events) > 0) {
gpr_mu_lock(cq->mu);
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
}
@@ -953,7 +950,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
this function */
static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@@ -964,7 +961,7 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@@ -980,6 +977,9 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
return;
}
cqd->shutdown_called = true;
+ /* Doing a full_fetch_add (i.e acq/release) here to match with
+ * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
+ * on this counter without necessarily holding a lock on cq */
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
cq_finish_shutdown_next(exec_ctx, cq);
}
@@ -994,7 +994,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
static int add_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@@ -1006,7 +1006,7 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
static void del_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@@ -1020,7 +1020,7 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -1057,7 +1057,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1181,7 +1181,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@@ -1195,7 +1195,7 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
* merging them is a bit tricky and probably not worth it */
static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 280315036f..b089da2c54 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
+#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/alarm_internal.h"
@@ -179,14 +180,16 @@ void grpc_shutdown(void) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_iomgr_shutdown(&exec_ctx);
- gpr_timers_global_destroy();
- grpc_tracer_shutdown();
+ grpc_executor_shutdown(&exec_ctx);
+ grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
+ grpc_iomgr_shutdown(&exec_ctx);
+ gpr_timers_global_destroy();
+ grpc_tracer_shutdown();
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
grpc_slice_intern_shutdown();
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index a8eb1b0425..1d0fd472d0 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -75,7 +76,7 @@ typedef struct requested_call {
grpc_call_details *details;
} batch;
struct {
- registered_method *registered_method;
+ registered_method *method;
gpr_timespec *deadline;
grpc_byte_buffer **optional_payload;
} registered;
@@ -144,7 +145,7 @@ struct call_data {
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
- request_matcher *request_matcher;
+ request_matcher *matcher;
grpc_byte_buffer *payload;
grpc_closure got_initial_metadata;
@@ -170,7 +171,7 @@ struct registered_method {
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
- request_matcher request_matcher;
+ request_matcher matcher;
registered_method *next;
};
@@ -333,7 +334,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
- grpc_call_unref(grpc_call_from_top_element(elem));
+ grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
@@ -386,7 +387,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
if (server->started) {
- request_matcher_destroy(&rm->request_matcher);
+ request_matcher_destroy(&rm->matcher);
}
gpr_free(rm->method);
gpr_free(rm->host);
@@ -518,7 +519,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *call_elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)call_elem->call_data;
channel_data *chand = (channel_data *)call_elem->channel_data;
- request_matcher *rm = calld->request_matcher;
+ request_matcher *rm = calld->matcher;
grpc_server *server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
@@ -540,6 +541,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@@ -550,6 +552,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
+ GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@@ -580,7 +583,7 @@ static void finish_start_new_rpc(
return;
}
- calld->request_matcher = rm;
+ calld->matcher = rm;
switch (payload_handling) {
case GRPC_SRM_PAYLOAD_NONE:
@@ -626,7 +629,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -644,7 +647,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -665,7 +668,7 @@ static int num_listeners(grpc_server *server) {
static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
grpc_cq_completion *completion) {
- server_unref(exec_ctx, server);
+ server_unref(exec_ctx, (grpc_server *)server);
}
static int num_channels(grpc_server *server) {
@@ -688,9 +691,9 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &server->unregistered_request_matcher);
for (registered_method *rm = server->registered_methods; rm;
rm = rm->next) {
- request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher,
+ request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
- request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
}
}
GRPC_ERROR_UNREF(error);
@@ -1111,15 +1114,17 @@ void grpc_server_start(grpc_server *server) {
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->request_matcher,
+ request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
server_ref(server);
server->starting = true;
- GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
- grpc_executor_scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(start_listeners, server,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1262,8 +1267,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
- grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
- NULL, gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
@@ -1385,7 +1391,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- rm = &rc->data.registered.registered_method->request_matcher;
+ rm = &rc->data.registered.method->matcher;
break;
}
server->requested_calls_per_cq[cq_idx][request_id] = *rc;
@@ -1430,6 +1436,7 @@ grpc_call_error grpc_server_request_call(
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1476,6 +1483,7 @@ grpc_call_error grpc_server_request_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
registered_method *rm = (registered_method *)rmp;
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1512,7 +1520,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->call = call;
- rc->data.registered.registered_method = rm;
+ rc->data.registered.method = rm;
rc->data.registered.deadline = deadline;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c
index a077052561..54388bdcda 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.c
@@ -233,32 +233,32 @@ void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *storage,
grpc_slice value) {
- grpc_mdelem old = storage->md;
- grpc_mdelem new = grpc_mdelem_from_slices(
- exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old)), value);
- storage->md = new;
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ grpc_mdelem old_mdelem = storage->md;
+ grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
+ storage->md = new_mdelem;
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
}
grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
- grpc_mdelem new) {
+ grpc_mdelem new_mdelem) {
assert_valid_callouts(exec_ctx, batch);
grpc_error *error = GRPC_ERROR_NONE;
- grpc_mdelem old = storage->md;
- if (!grpc_slice_eq(GRPC_MDKEY(new), GRPC_MDKEY(old))) {
+ grpc_mdelem old_mdelem = storage->md;
+ if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
maybe_unlink_callout(batch, storage);
- storage->md = new;
+ storage->md = new_mdelem;
error = maybe_link_callout(batch, storage);
if (error != GRPC_ERROR_NONE) {
unlink_storage(&batch->list, storage);
GRPC_MDELEM_UNREF(exec_ctx, storage->md);
}
} else {
- storage->md = new;
+ storage->md = new_mdelem;
}
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
assert_valid_callouts(exec_ctx, batch);
return error;
}
@@ -302,12 +302,12 @@ grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
grpc_error *error = GRPC_ERROR_NONE;
while (l) {
grpc_linked_mdelem *next = l->next;
- grpc_filtered_mdelem new = func(exec_ctx, user_data, l->md);
- add_error(&error, new.error, composite_error_string);
- if (GRPC_MDISNULL(new.md)) {
+ grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
+ add_error(&error, new_mdelem.error, composite_error_string);
+ if (GRPC_MDISNULL(new_mdelem.md)) {
grpc_metadata_batch_remove(exec_ctx, batch, l);
- } else if (new.md.payload != l->md.payload) {
- grpc_metadata_batch_substitute(exec_ctx, batch, l, new.md);
+ } else if (new_mdelem.md.payload != l->md.payload) {
+ grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md);
}
l = next;
}
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index b20d94aeac..472cf888ea 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -216,206 +216,106 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
- {.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[6],
- .data.refcounted = {g_bytes + 38, 12}},
- {.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[8],
- .data.refcounted = {g_bytes + 61, 16}},
- {.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[11],
- .data.refcounted = {g_bytes + 110, 21}},
- {.refcount = &grpc_static_metadata_refcounts[12],
- .data.refcounted = {g_bytes + 131, 13}},
- {.refcount = &grpc_static_metadata_refcounts[13],
- .data.refcounted = {g_bytes + 144, 14}},
- {.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 201, 30}},
- {.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 231, 37}},
- {.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 268, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 278, 4}},
- {.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 282, 8}},
- {.refcount = &grpc_static_metadata_refcounts[22],
- .data.refcounted = {g_bytes + 290, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}},
- {.refcount = &grpc_static_metadata_refcounts[24],
- .data.refcounted = {g_bytes + 302, 19}},
- {.refcount = &grpc_static_metadata_refcounts[25],
- .data.refcounted = {g_bytes + 321, 12}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 333, 30}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 31}},
- {.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 394, 36}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 430, 1}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 431, 1}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 432, 1}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 452, 8}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 460, 16}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 476, 4}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 480, 3}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 483, 3}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 486, 4}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 490, 5}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 495, 4}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 499, 3}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 502, 3}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 505, 1}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 506, 11}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 517, 3}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 520, 3}},
- {.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 523, 3}},
- {.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 526, 3}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 529, 3}},
- {.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 532, 14}},
- {.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 546, 13}},
- {.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 559, 15}},
- {.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 574, 13}},
- {.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 587, 6}},
- {.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 593, 27}},
- {.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 620, 3}},
- {.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 623, 5}},
- {.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 628, 13}},
- {.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 641, 13}},
- {.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 654, 19}},
- {.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 673, 16}},
- {.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 689, 14}},
- {.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 703, 16}},
- {.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 719, 13}},
- {.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 732, 6}},
- {.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 738, 4}},
- {.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 742, 4}},
- {.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 746, 6}},
- {.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 752, 7}},
- {.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 759, 4}},
- {.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 763, 8}},
- {.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 771, 17}},
- {.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 788, 13}},
- {.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 801, 8}},
- {.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 809, 19}},
- {.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 828, 13}},
- {.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 841, 11}},
- {.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 852, 4}},
- {.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 856, 8}},
- {.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 864, 12}},
- {.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 876, 18}},
- {.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 894, 19}},
- {.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 913, 5}},
- {.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 918, 7}},
- {.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 925, 7}},
- {.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 932, 11}},
- {.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 943, 6}},
- {.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 949, 10}},
- {.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 959, 25}},
- {.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 984, 17}},
- {.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 1001, 4}},
- {.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 1005, 3}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 1008, 16}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1024, 16}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1053, 12}},
- {.refcount = &grpc_static_metadata_refcounts[99],
- .data.refcounted = {g_bytes + 1065, 21}},
+ {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}},
+ {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}},
+ {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}},
+ {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}},
+ {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}},
+ {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}},
+ {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}},
+ {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}},
+ {&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}},
+ {&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}},
+ {&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}},
+ {&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}},
+ {&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@@ -478,350 +378,178 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
}
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 430, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 431, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 432, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 452, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 460, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 476, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 480, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 483, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 486, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 490, 5}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 495, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 499, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 502, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 505, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 506, 11}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 517, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 520, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 523, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 526, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 529, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 532, 14}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 546, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 559, 15}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 574, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 587, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 593, 27}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 620, 3}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 623, 5}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 628, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 641, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 654, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 673, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 689, 14}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 703, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 719, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 732, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 738, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 742, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 746, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 752, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 759, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 278, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 763, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 771, 17}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 788, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 801, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 809, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 828, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 282, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 841, 11}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 852, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 856, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 864, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 876, 18}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 894, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 913, 5}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 918, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 925, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 932, 11}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 943, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 949, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 959, 25}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 984, 17}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 268, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 1001, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 1005, 3}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 1008, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1024, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1053, 12}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[99],
- .data.refcounted = {g_bytes + 1065, 21}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}},
+ {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}},
+ {{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}},
+ {{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
};
bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
true, // :path
diff --git a/src/core/lib/transport/status_conversion.c b/src/core/lib/transport/status_conversion.c
index 9a76977e4b..a40d333284 100644
--- a/src/core/lib/transport/status_conversion.c
+++ b/src/core/lib/transport/status_conversion.c
@@ -18,7 +18,7 @@
#include "src/core/lib/transport/status_conversion.h"
-int grpc_status_to_http2_error(grpc_status_code status) {
+grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
switch (status) {
case GRPC_STATUS_OK:
return GRPC_HTTP2_NO_ERROR;
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 5f6302ad00..682a820b48 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -72,7 +72,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
cope with.
Throw this over to the executor (on a core-owned thread) and process it
there. */
- refcount->destroy.scheduler = grpc_executor_scheduler;
+ refcount->destroy.scheduler =
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
@@ -101,8 +102,11 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
void *buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
- return (grpc_slice){.refcount = &refcount->slice_refcount,
- .data.refcounted = {.bytes = buffer, .length = length}};
+ grpc_slice res;
+ res.refcount = &refcount->slice_refcount,
+ res.data.refcounted.bytes = (uint8_t *)buffer;
+ res.data.refcounted.length = length;
+ return res;
}
static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 409a6c4103..858664715c 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -197,7 +197,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
return out;
}
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
char *str = grpc_transport_stream_op_batch_string(op);