aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/base_resources.c28
-rw-r--r--src/core/ext/census/context.c12
-rw-r--r--src/core/ext/census/grpc_filter.c26
-rw-r--r--src/core/ext/census/mlog.c3
-rw-r--r--src/core/ext/census/resource.c22
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.c23
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c857
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.c9
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.c4
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.c12
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.c10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.c2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c226
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c39
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c32
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c61
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.c16
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.c2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c13
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c90
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c52
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c39
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.c15
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c60
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h5
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.c46
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h7
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.c7
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.c117
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h8
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.c28
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.c2
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c415
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.c84
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.c (renamed from src/core/ext/filters/load_reporting/load_reporting_filter.c)23
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.h (renamed from src/core/ext/filters/load_reporting/load_reporting_filter.h)11
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.c (renamed from src/core/ext/filters/load_reporting/load_reporting.c)32
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.h (renamed from src/core/ext/filters/load_reporting/load_reporting.h)7
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.c5
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.c6
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c1
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c10
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c2
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c2
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c38
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c703
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h1
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.c22
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c19
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c14
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c103
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c60
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h90
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c21
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c52
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.c10
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c256
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c80
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.c126
-rw-r--r--src/core/lib/channel/channel_args.c124
-rw-r--r--src/core/lib/channel/channel_args.h31
-rw-r--r--src/core/lib/channel/channel_stack.c16
-rw-r--r--src/core/lib/channel/channel_stack.h13
-rw-r--r--src/core/lib/channel/channel_stack_builder.c51
-rw-r--r--src/core/lib/channel/channel_stack_builder.h10
-rw-r--r--src/core/lib/channel/connected_channel.c117
-rw-r--r--src/core/lib/channel/handshaker.c14
-rw-r--r--src/core/lib/channel/handshaker_registry.c2
-rw-r--r--src/core/lib/compression/algorithm_metadata.h14
-rw-r--r--src/core/lib/compression/compression.c102
-rw-r--r--src/core/lib/compression/stream_compression.c178
-rw-r--r--src/core/lib/compression/stream_compression.h32
-rw-r--r--src/core/lib/compression/stream_compression_gzip.c228
-rw-r--r--src/core/lib/compression/stream_compression_gzip.h (renamed from src/core/lib/support/thd_internal.h)12
-rw-r--r--src/core/lib/compression/stream_compression_identity.c94
-rw-r--r--src/core/lib/compression/stream_compression_identity.h (renamed from src/core/lib/iomgr/ev_epoll_thread_pool_linux.h)13
-rw-r--r--src/core/lib/debug/stats.c174
-rw-r--r--src/core/lib/debug/stats.h61
-rw-r--r--src/core/lib/debug/stats_data.c735
-rw-r--r--src/core/lib/debug/stats_data.h484
-rw-r--r--src/core/lib/debug/stats_data.yaml280
-rw-r--r--src/core/lib/debug/stats_data_bq_schema.sql90
-rw-r--r--src/core/lib/debug/trace.c6
-rw-r--r--src/core/lib/debug/trace.h2
-rw-r--r--src/core/lib/http/format_request.c2
-rw-r--r--src/core/lib/http/httpcli.c15
-rw-r--r--src/core/lib/http/httpcli_security_connector.c3
-rw-r--r--src/core/lib/http/parser.c7
-rw-r--r--src/core/lib/iomgr/call_combiner.c202
-rw-r--r--src/core/lib/iomgr/call_combiner.h121
-rw-r--r--src/core/lib/iomgr/closure.c22
-rw-r--r--src/core/lib/iomgr/combiner.c15
-rw-r--r--src/core/lib/iomgr/error.c50
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c600
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c1957
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h28
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.c1182
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c269
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.c85
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c174
-rw-r--r--src/core/lib/iomgr/ev_posix.c14
-rw-r--r--src/core/lib/iomgr/ev_posix.h2
-rw-r--r--src/core/lib/iomgr/exec_ctx.h7
-rw-r--r--src/core/lib/iomgr/executor.c227
-rw-r--r--src/core/lib/iomgr/executor.h7
-rw-r--r--src/core/lib/iomgr/iocp_windows.c2
-rw-r--r--src/core/lib/iomgr/iomgr.c10
-rw-r--r--src/core/lib/iomgr/is_epollexclusive_available.c12
-rw-r--r--src/core/lib/iomgr/load_file.c3
-rw-r--r--src/core/lib/iomgr/polling_entity.c18
-rw-r--r--src/core/lib/iomgr/polling_entity.h8
-rw-r--r--src/core/lib/iomgr/pollset.h2
-rw-r--r--src/core/lib/iomgr/pollset_uv.c2
-rw-r--r--src/core/lib/iomgr/pollset_windows.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c15
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c2
-rw-r--r--src/core/lib/iomgr/resource_quota.c43
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.c8
-rw-r--r--src/core/lib/iomgr/socket_mutator.c8
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c9
-rw-r--r--src/core/lib/iomgr/tcp_posix.c201
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c15
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.c2
-rw-r--r--src/core/lib/iomgr/timer.h4
-rw-r--r--src/core/lib/iomgr/timer_generic.c7
-rw-r--r--src/core/lib/iomgr/timer_heap.c8
-rw-r--r--src/core/lib/iomgr/timer_manager.c4
-rw-r--r--src/core/lib/iomgr/timer_uv.c2
-rw-r--r--src/core/lib/iomgr/udp_server.c18
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.c17
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.h4
-rw-r--r--src/core/lib/json/json.c2
-rw-r--r--src/core/lib/json/json_string.c26
-rw-r--r--src/core/lib/profiling/timers.h26
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c3
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c203
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c200
-rw-r--r--src/core/lib/security/transport/secure_endpoint.h11
-rw-r--r--src/core/lib/security/transport/security_connector.c22
-rw-r--r--src/core/lib/security/transport/security_handshaker.c69
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c91
-rw-r--r--src/core/lib/slice/b64.c4
-rw-r--r--src/core/lib/slice/slice.c30
-rw-r--r--src/core/lib/slice/slice_buffer.c9
-rw-r--r--src/core/lib/slice/slice_hash_table.c5
-rw-r--r--src/core/lib/slice/slice_intern.c13
-rw-r--r--src/core/lib/support/block_annotate.h22
-rw-r--r--src/core/lib/support/log_linux.c2
-rw-r--r--src/core/lib/support/string.c16
-rw-r--r--src/core/lib/support/string.h3
-rw-r--r--src/core/lib/surface/alarm.c37
-rw-r--r--src/core/lib/surface/byte_buffer.c6
-rw-r--r--src/core/lib/surface/call.c583
-rw-r--r--src/core/lib/surface/call.h12
-rw-r--r--src/core/lib/surface/call_log_batch.c2
-rw-r--r--src/core/lib/surface/call_test_only.h12
-rw-r--r--src/core/lib/surface/channel.c43
-rw-r--r--src/core/lib/surface/channel_init.c12
-rw-r--r--src/core/lib/surface/channel_ping.c4
-rw-r--r--src/core/lib/surface/completion_queue.c113
-rw-r--r--src/core/lib/surface/init.c14
-rw-r--r--src/core/lib/surface/lame_client.cc19
-rw-r--r--src/core/lib/surface/server.c153
-rw-r--r--src/core/lib/surface/version.c2
-rw-r--r--src/core/lib/transport/byte_stream.c1
-rw-r--r--src/core/lib/transport/byte_stream.h4
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/core/lib/transport/metadata.c11
-rw-r--r--src/core/lib/transport/metadata_batch.c34
-rw-r--r--src/core/lib/transport/metadata_batch.h1
-rw-r--r--src/core/lib/transport/service_config.c8
-rw-r--r--src/core/lib/transport/static_metadata.c1009
-rw-r--r--src/core/lib/transport/static_metadata.h298
-rw-r--r--src/core/lib/transport/status_conversion.c2
-rw-r--r--src/core/lib/transport/transport.c40
-rw-r--r--src/core/lib/transport/transport.h13
-rw-r--r--src/core/lib/transport/transport_impl.h3
-rw-r--r--src/core/lib/transport/transport_op_string.c9
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.c8
-rw-r--r--src/core/tsi/fake_transport_security.c135
-rw-r--r--src/core/tsi/fake_transport_security.h5
-rw-r--r--src/core/tsi/ssl_transport_security.c113
-rw-r--r--src/core/tsi/ssl_transport_security.h37
-rw-r--r--src/core/tsi/test_creds/BUILD14
-rw-r--r--src/core/tsi/transport_security.h10
-rw-r--r--src/core/tsi/transport_security_grpc.c33
-rw-r--r--src/core/tsi/transport_security_grpc.h22
199 files changed, 8695 insertions, 7332 deletions
diff --git a/src/core/ext/census/base_resources.c b/src/core/ext/census/base_resources.c
index 2114bf04cd..1f2bb39fe0 100644
--- a/src/core/ext/census/base_resources.c
+++ b/src/core/ext/census/base_resources.c
@@ -37,20 +37,20 @@
void define_base_resources() {
google_census_Resource_BasicUnit numerator =
google_census_Resource_BasicUnit_SECS;
- resource r = {"client_rpc_latency", // name
- "Client RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
+ resource r = {(char *)"client_rpc_latency", // name
+ (char *)"Client RPC latency in seconds", // description
+ 0, // prefix
+ 1, // n_numerators
+ &numerator, // numerators
+ 0, // n_denominators
+ NULL}; // denominators
define_resource(&r);
- r = (resource){"server_rpc_latency", // name
- "Server RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
+ r = (resource){(char *)"server_rpc_latency", // name
+ (char *)"Server RPC latency in seconds", // description
+ 0, // prefix
+ 1, // n_numerators
+ &numerator, // numerators
+ 0, // n_denominators
+ NULL}; // denominators
define_resource(&r);
}
diff --git a/src/core/ext/census/context.c b/src/core/ext/census/context.c
index 1019b287d7..9b25a32e36 100644
--- a/src/core/ext/census/context.c
+++ b/src/core/ext/census/context.c
@@ -141,7 +141,7 @@ static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
// Make a copy (in 'to') of an existing tag_set.
static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
memcpy(to, from, sizeof(struct tag_set));
- to->kvm = gpr_malloc(to->kvm_size);
+ to->kvm = (char *)gpr_malloc(to->kvm_size);
memcpy(to->kvm, from->kvm, from->kvm_used);
}
@@ -184,7 +184,7 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
if (tags->kvm_used + tag_size > tags->kvm_size) {
// allocate new memory if needed
tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
- char *new_kvm = gpr_malloc(tags->kvm_size);
+ char *new_kvm = (char *)gpr_malloc(tags->kvm_size);
if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used);
gpr_free(tags->kvm);
tags->kvm = new_kvm;
@@ -274,7 +274,8 @@ static void tag_set_flatten(struct tag_set *tags) {
census_context *census_context_create(const census_context *base,
const census_tag *tags, int ntags,
census_context_status const **status) {
- census_context *context = gpr_malloc(sizeof(census_context));
+ census_context *context =
+ (census_context *)gpr_malloc(sizeof(census_context));
// If we are given a base, copy it into our new tag set. Otherwise set it
// to zero/NULL everything.
if (base == NULL) {
@@ -459,7 +460,7 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
}
tags->kvm_used = size - header_size;
tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
- tags->kvm = gpr_malloc(tags->kvm_size);
+ tags->kvm = (char *)gpr_malloc(tags->kvm_size);
if (tag_header_size != TAG_HEADER_SIZE) {
// something new in the tag information. I don't understand it, so
// don't copy it over.
@@ -481,7 +482,8 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
}
census_context *census_context_decode(const char *buffer, size_t size) {
- census_context *context = gpr_malloc(sizeof(census_context));
+ census_context *context =
+ (census_context *)gpr_malloc(sizeof(census_context));
memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
if (buffer == NULL) {
memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 13fe2e6b1c..b37ab90389 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -60,8 +60,8 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (op->send_initial_metadata) {
extract_and_annotate_method_tag(
op->payload->send_initial_metadata.send_initial_metadata, calld, chand);
@@ -78,9 +78,9 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (error == GRPC_ERROR_NONE) {
extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
@@ -90,7 +90,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
calld->recv_initial_metadata =
@@ -117,7 +117,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
@@ -128,7 +128,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
}
@@ -136,7 +136,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
@@ -150,7 +150,7 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
}
@@ -158,14 +158,14 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
}
@@ -179,7 +179,6 @@ const grpc_channel_filter grpc_client_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-client"};
@@ -193,6 +192,5 @@ const grpc_channel_filter grpc_server_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-server"};
diff --git a/src/core/ext/census/mlog.c b/src/core/ext/census/mlog.c
index 937ceb101b..4b8c8466b3 100644
--- a/src/core/ext/census/mlog.c
+++ b/src/core/ext/census/mlog.c
@@ -467,7 +467,8 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
g_log.blocks = (cl_block*)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ g_log.buffer =
+ (char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
diff --git a/src/core/ext/census/resource.c b/src/core/ext/census/resource.c
index 1a676f0e1e..44a887231c 100644
--- a/src/core/ext/census/resource.c
+++ b/src/core/ext/census/resource.c
@@ -87,7 +87,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
gpr_log(GPR_INFO, "Zero-length Resource name.");
return false;
}
- vresource->name = gpr_malloc(stream->bytes_left + 1);
+ vresource->name = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->name[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
return false;
@@ -106,7 +106,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
if (stream->bytes_left == 0) {
return true;
}
- vresource->description = gpr_malloc(stream->bytes_left + 1);
+ vresource->description = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->description[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->description,
stream->bytes_left)) {
@@ -134,7 +134,8 @@ static bool validate_units_helper(pb_istream_t *stream, int *count,
// Have to allocate a new array of values. Normal case is 0 or 1, so
// this should normally not be an issue.
google_census_Resource_BasicUnit *new_bup =
- gpr_malloc((size_t)*count * sizeof(google_census_Resource_BasicUnit));
+ (google_census_Resource_BasicUnit *)gpr_malloc(
+ (size_t)*count * sizeof(google_census_Resource_BasicUnit));
if (*count != 1) {
memcpy(new_bup, *bup,
(size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
@@ -207,7 +208,8 @@ size_t allocate_resource(void) {
// Expand resources if needed.
if (n_resources == n_defined_resources) {
size_t new_n_resources = n_resources ? n_resources * 2 : 2;
- resource **new_resources = gpr_malloc(new_n_resources * sizeof(resource *));
+ resource **new_resources =
+ (resource **)gpr_malloc(new_n_resources * sizeof(resource *));
if (n_resources != 0) {
memcpy(new_resources, resources, n_resources * sizeof(resource *));
}
@@ -226,7 +228,7 @@ size_t allocate_resource(void) {
}
}
GPR_ASSERT(id < n_resources && resources[id] == NULL);
- resources[id] = gpr_malloc(sizeof(resource));
+ resources[id] = (resource *)gpr_malloc(sizeof(resource));
memset(resources[id], 0, sizeof(resource));
n_defined_resources++;
next_id = (id + 1) % n_resources;
@@ -276,22 +278,24 @@ int32_t define_resource(const resource *base) {
gpr_mu_lock(&resource_lock);
size_t id = allocate_resource();
size_t len = strlen(base->name) + 1;
- resources[id]->name = gpr_malloc(len);
+ resources[id]->name = (char *)gpr_malloc(len);
memcpy(resources[id]->name, base->name, len);
if (base->description) {
len = strlen(base->description) + 1;
- resources[id]->description = gpr_malloc(len);
+ resources[id]->description = (char *)gpr_malloc(len);
memcpy(resources[id]->description, base->description, len);
}
resources[id]->prefix = base->prefix;
resources[id]->n_numerators = base->n_numerators;
len = (size_t)base->n_numerators * sizeof(*base->numerators);
- resources[id]->numerators = gpr_malloc(len);
+ resources[id]->numerators =
+ (google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->numerators, base->numerators, len);
resources[id]->n_denominators = base->n_denominators;
if (base->n_denominators != 0) {
len = (size_t)base->n_denominators * sizeof(*base->denominators);
- resources[id]->denominators = gpr_malloc(len);
+ resources[id]->denominators =
+ (google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->denominators, base->denominators, len);
}
gpr_mu_unlock(&resource_lock);
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c
index b83c95275f..3844b98021 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.c
@@ -86,20 +86,20 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
- int delete = 0;
- state_watcher *w = pw;
+ bool should_delete = false;
+ state_watcher *w = (state_watcher *)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
case READY_TO_CALL_BACK:
GPR_UNREACHABLE_CODE(return );
case CALLING_BACK_AND_FINISHED:
- delete = 1;
+ should_delete = true;
break;
}
gpr_mu_unlock(&w->mu);
- if (delete) {
+ if (should_delete) {
delete_state_watcher(exec_ctx, w);
}
}
@@ -161,12 +161,12 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
- partly_done(exec_ctx, pw, true, GRPC_ERROR_REF(error));
+ partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
}
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
- partly_done(exec_ctx, pw, false, GRPC_ERROR_REF(error));
+ partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
@@ -191,13 +191,19 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(wa);
}
+int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
+}
+
void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- state_watcher *w = gpr_malloc(sizeof(*w));
+ state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -222,7 +228,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
- watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg *wa =
+ (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index 58e31d7b45..016199b1f4 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -85,7 +85,7 @@ static void method_parameters_unref(method_parameters *method_params) {
}
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
- method_parameters_unref(value);
+ method_parameters_unref((method_parameters *)value);
}
static bool parse_wait_for_ready(grpc_json *field,
@@ -148,7 +148,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
- method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ method_parameters *value =
+ (method_parameters *)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -254,7 +255,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- lb_policy_connectivity_watcher *w = arg;
+ lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@@ -281,7 +282,8 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
- lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ lb_policy_connectivity_watcher *w =
+ (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -310,7 +312,8 @@ typedef struct {
} service_config_parsing_state;
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
- service_config_parsing_state *parsing_state = arg;
+ service_config_parsing_state *parsing_state =
+ (service_config_parsing_state *)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -365,14 +368,14 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
- char *lb_policy_name = NULL;
+ char *lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
@@ -380,6 +383,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
+ const char *lb_policy_name = NULL;
const grpc_arg *channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
@@ -391,7 +395,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
+ grpc_lb_addresses *addresses =
+ (grpc_lb_addresses *)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -469,7 +474,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
- lb_policy_name = gpr_strdup(lb_policy_name);
+ lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
@@ -477,8 +482,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
- chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "",
- service_config_json);
+ chand, lb_policy_name_dup,
+ lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
// be NULL if (e.g.) the resolver failed to return results or the
@@ -486,9 +491,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
//
// First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
- if (lb_policy_name != NULL) {
+ if (lb_policy_name_dup != NULL) {
gpr_free(chand->info_lb_policy_name);
- chand->info_lb_policy_name = lb_policy_name;
+ chand->info_lb_policy_name = lb_policy_name_dup;
}
if (service_config_json != NULL) {
gpr_free(chand->info_service_config_json);
@@ -586,9 +591,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- grpc_transport_op *op = arg;
- grpc_channel_element *elem = op->handler_private.extra_arg;
- channel_data *chand = elem->channel_data;
+ grpc_transport_op *op = (grpc_transport_op *)arg;
+ grpc_channel_element *elem =
+ (grpc_channel_element *)op->handler_private.extra_arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -642,7 +648,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -662,7 +668,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@@ -682,7 +688,7 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -712,8 +718,10 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer");
}
- grpc_client_channel_factory_ref(arg->value.pointer.p);
- chand->client_channel_factory = arg->value.pointer.p;
+ grpc_client_channel_factory_ref(
+ (grpc_client_channel_factory *)arg->value.pointer.p);
+ chand->client_channel_factory =
+ (grpc_client_channel_factory *)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@@ -745,7 +753,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resolver *resolver = arg;
+ grpc_resolver *resolver = (grpc_resolver *)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
@@ -753,7 +761,7 @@ static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
@@ -796,7 +804,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-#define MAX_WAITING_BATCHES 6
+// We also add room for a single cancel_stream batch.
+#define MAX_WAITING_BATCHES 7
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -807,24 +816,27 @@ typedef struct client_channel_call_data {
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
- // and this struct both independently store a pointer to the call
- // stack and each has its own mutex. If/when we have time, find a way
- // to avoid this without breaking the grpc_deadline_state abstraction.
+ // and this struct both independently store pointers to the call stack
+ // and call combiner. If/when we have time, find a way to avoid this
+ // without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
+ gpr_arena *arena;
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
+
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
- /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
- bit is 0), or a pointer to an error (if the lowest bit is 1) */
- gpr_atm subchannel_call_or_error;
- gpr_arena *arena;
+ grpc_subchannel_call *subchannel_call;
+ grpc_error *error;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
+ grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@@ -832,10 +844,9 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
+ grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch_payload *initial_metadata_payload;
-
- grpc_call_stack *owning_call;
+ grpc_transport_stream_op_batch *initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
@@ -843,56 +854,43 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
-typedef struct {
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
-} call_or_error;
-
-static call_or_error get_call_or_error(call_data *p) {
- gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
- if (c == 0)
- return (call_or_error){NULL, NULL};
- else if (c & 1)
- return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
- else
- return (call_or_error){(grpc_subchannel_call *)c, NULL};
+grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ return calld->subchannel_call;
}
-static bool set_call_or_error(call_data *p, call_or_error coe) {
- // this should always be under a lock
- call_or_error existing = get_call_or_error(p);
- if (existing.error != GRPC_ERROR_NONE) {
- GRPC_ERROR_UNREF(coe.error);
- return false;
- }
- GPR_ASSERT(existing.subchannel_call == NULL);
- if (coe.error != GRPC_ERROR_NONE) {
- GPR_ASSERT(coe.subchannel_call == NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_add(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->initial_metadata_batch == NULL);
+ calld->initial_metadata_batch = batch;
} else {
- GPR_ASSERT(coe.subchannel_call != NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error,
- (gpr_atm)coe.subchannel_call);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
- return true;
}
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *call_elem) {
- return get_call_or_error(call_elem->call_data).subchannel_call;
-}
-
-static void waiting_for_pick_batches_add_locked(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+// This is called via the call combiner, so access to calld is synchronized.
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ call_data *calld = (call_data *)arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
+ GRPC_ERROR_REF(error), calld->call_combiner);
+ }
}
-static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = elem->call_data;
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_error *error) {
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
@@ -900,42 +898,68 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
grpc_error_string(error));
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ fail_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_REF(error),
+ "waiting_for_pick_batches_fail");
+ }
+ if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "waiting_for_pick_batches_fail");
}
- calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- if (calld->waiting_for_pick_batches_count == 0) return;
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- waiting_for_pick_batches_fail_locked(exec_ctx, elem,
- GRPC_ERROR_REF(coe.error));
- return;
+// This is called via the call combiner, so access to calld is synchronized.
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *ignored) {
+ call_data *calld = (call_data *)arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_subchannel_call_process_op(
+ exec_ctx, calld->subchannel_call,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
}
+}
+
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- coe.subchannel_call);
+ chand, calld, calld->waiting_for_pick_batches_count,
+ calld->subchannel_call);
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
- calld->waiting_for_pick_batches[i]);
- }
- calld->waiting_for_pick_batches_count = 0;
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ run_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_NONE,
+ "waiting_for_pick_batches_resume");
+ }
+ GPR_ASSERT(calld->initial_metadata_batch != NULL);
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+ calld->initial_metadata_batch);
}
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -945,7 +969,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
+ calld->method_params = (method_parameters *)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@@ -968,194 +992,97 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call = NULL;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
.start_time = calld->call_start_time,
.deadline = calld->deadline,
.arena = calld->arena,
- .context = calld->subchannel_call_context};
+ .context = calld->subchannel_call_context,
+ .call_combiner = calld->call_combiner};
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args,
+ &calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
- elem->channel_data, calld, subchannel_call,
- grpc_error_string(new_error));
+ chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
} else {
- waiting_for_pick_batches_resume_locked(exec_ctx, elem);
+ waiting_for_pick_batches_resume(exec_ctx, elem);
}
GRPC_ERROR_UNREF(error);
}
-static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
- call_or_error coe = get_call_or_error(calld);
+// Invoked when a pick is completed, on both success or failure.
+static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_error *error) {
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
- grpc_error *failure =
- error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(failure));
- }
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
- } else if (coe.error != GRPC_ERROR_NONE) {
- /* already cancelled before subchannel became ready */
- grpc_error *child_errors[] = {error, coe.error};
- grpc_error *cancellation_error =
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Cancelled before creating subchannel", child_errors,
- GPR_ARRAY_SIZE(child_errors));
- /* if due to deadline, attach the deadline exceeded status to the error */
- if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
- cancellation_error =
- grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_DEADLINE_EXCEEDED);
+ calld, grpc_error_string(calld->error));
}
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: cancelled before subchannel became ready: %s",
- chand, calld, grpc_error_string(cancellation_error));
- }
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error);
}
-static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call =
- get_call_or_error(calld).subchannel_call;
- if (subchannel_call == NULL) {
- return NULL;
- } else {
- return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- }
+// A wrapper around pick_done_locked() that is used in cases where
+// either (a) the pick was deferred pending a resolver result or (b) the
+// pick was done asynchronously. Removes the call's polling entity from
+// chand->interested_parties before invoking pick_done_locked().
+static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem, grpc_error *error) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
+ pick_done_locked(exec_ctx, elem, error);
}
-/** Return true if subchannel is available immediately (in which case
- subchannel_ready_locked() should not be called), or false otherwise (in
- which case subchannel_ready_locked() should be called when the subchannel
- is available). */
-static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem);
-
-typedef struct {
- grpc_call_element *elem;
- bool cancelled;
- grpc_closure closure;
-} pick_after_resolver_result_args;
-
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args = arg;
- if (args->cancelled) {
- /* cancelled, do nothing */
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "call cancelled before resolver result");
- }
- } else {
- channel_data *chand = args->elem->channel_data;
- call_data *calld = args->elem->call_data;
- if (error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
- chand, calld);
- }
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
- } else {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
- chand, calld);
- }
- if (pick_subchannel_locked(exec_ctx, args->elem)) {
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
- }
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
+ chand, calld, calld->lb_policy);
}
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
+ &calld->connected_subchannel,
+ GRPC_ERROR_REF(error));
}
- gpr_free(args);
-}
-
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: deferring pick pending resolver result", chand,
- calld);
- }
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
- args->elem = elem;
- GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
- args, grpc_combiner_scheduler(chand->combiner));
- grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
- &args->closure, GRPC_ERROR_NONE);
-}
-
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- // If we don't yet have a resolver result, then a closure for
- // pick_after_resolver_result_done_locked() will have been added to
- // chand->waiting_for_resolver_result_closures, and it may not be invoked
- // until after this call has been destroyed. We mark the operation as
- // cancelled, so that when pick_after_resolver_result_done_locked()
- // is called, it will be a no-op. We also immediately invoke
- // subchannel_ready_locked() to propagate the error back to the caller.
- for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
- closure != NULL; closure = closure->next_data.next) {
- pick_after_resolver_result_args *args = closure->cb_arg;
- if (!args->cancelled && args->elem == elem) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: "
- "cancelling pick waiting for resolver result",
- chand, calld);
- }
- args->cancelled = true;
- subchannel_ready_locked(exec_ctx, elem,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
- }
- }
- GRPC_ERROR_UNREF(error);
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
-// Unrefs the LB policy after invoking subchannel_ready_locked().
+// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1163,28 +1090,51 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
- subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_lb_policy_pick_args *inputs) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
+ apply_service_config_to_call_locked(exec_ctx, elem);
+ // If the application explicitly set wait_for_ready, use that.
+ // Otherwise, if the service config specified a value for this
+ // method, use that.
+ uint32_t initial_metadata_flags =
+ calld->initial_metadata_batch->payload->send_initial_metadata
+ .send_initial_metadata_flags;
+ const bool wait_for_ready_set_from_api =
+ initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
+ const bool wait_for_ready_set_from_service_config =
+ calld->method_params != NULL &&
+ calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
+ if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
+ if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
+ initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ } else {
+ initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ }
+ }
+ const grpc_lb_policy_pick_args inputs = {
+ calld->initial_metadata_batch->payload->send_initial_metadata
+ .send_initial_metadata,
+ initial_metadata_flags, &calld->lb_token_mdelem};
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
- exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
+ exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
@@ -1194,160 +1144,160 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
+ } else {
+ GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+ pick_callback_cancel_locked, elem,
+ grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- GPR_ASSERT(calld->lb_policy != NULL);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
- }
- grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
- &calld->connected_subchannel, error);
-}
+typedef struct {
+ grpc_call_element *elem;
+ bool finished;
+ grpc_closure closure;
+ grpc_closure cancel_closure;
+} pick_after_resolver_result_args;
-static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- GPR_TIMER_BEGIN("pick_subchannel", 0);
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- bool pick_done = false;
- if (chand->lb_policy != NULL) {
- apply_service_config_to_call_locked(exec_ctx, elem);
- // If the application explicitly set wait_for_ready, use that.
- // Otherwise, if the service config specified a value for this
- // method, use that.
- uint32_t initial_metadata_flags =
- calld->initial_metadata_payload->send_initial_metadata
- .send_initial_metadata_flags;
- const bool wait_for_ready_set_from_api =
- initial_metadata_flags &
- GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
- const bool wait_for_ready_set_from_service_config =
- calld->method_params != NULL &&
- calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
- if (!wait_for_ready_set_from_api &&
- wait_for_ready_set_from_service_config) {
- if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
- initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
- } else {
- initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
- }
- }
- const grpc_lb_policy_pick_args inputs = {
- calld->initial_metadata_payload->send_initial_metadata
- .send_initial_metadata,
- initial_metadata_flags, &calld->lb_token_mdelem};
- pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
- } else if (chand->resolver != NULL) {
- if (!chand->started_resolving) {
- start_resolving_locked(exec_ctx, chand);
- }
- pick_after_resolver_result_start_locked(exec_ctx, elem);
- } else {
- subchannel_ready_locked(
- exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
+ if (args->finished) {
+ gpr_free(args);
+ return;
}
- GPR_TIMER_END("pick_subchannel", 0);
- return pick_done;
+ // If we don't yet have a resolver result, then a closure for
+ // pick_after_resolver_result_done_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // finished, so that when pick_after_resolver_result_done_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // async_pick_done_locked() to propagate the error back to the caller.
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: cancelling pick waiting for resolver result",
+ chand, calld);
+ }
+ // Note: Although we are not in the call combiner here, we are
+ // basically stealing the call combiner from the pending pick, so
+ // it's safe to call async_pick_done_locked() here -- we are
+ // essentially calling it here instead of calling it in
+ // pick_after_resolver_result_done_locked().
+ async_pick_done_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
+static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
- grpc_transport_stream_op_batch *batch = arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- /* need to recheck that another thread hasn't set the call */
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
+ grpc_error *error) {
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
+ if (args->finished) {
+ /* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
- goto done;
+ gpr_free(args);
+ return;
}
- if (coe.subchannel_call != NULL) {
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
+ chand, calld);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- // Add to waiting-for-pick list. If we succeed in getting a
- // subchannel call below, we'll handle this batch (along with any
- // other waiting batches) in waiting_for_pick_batches_resume_locked().
- waiting_for_pick_batches_add_locked(calld, batch);
- // If this is a cancellation, cancel the pending pick (if any) and
- // fail any pending batches.
- if (batch->cancel_stream) {
- grpc_error *error = batch->payload->cancel_stream.cancel_error;
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+ } else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(error));
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
+ chand, calld);
}
- /* Stash a copy of cancel_error in our call data, so that we can use
- it for subsequent operations. This ensures that if the call is
- cancelled before any batches are passed down (e.g., if the deadline
- is in the past when the call starts), we can return the right
- error to the caller when the first batch does get passed down. */
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
- if (calld->lb_policy != NULL) {
- pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else {
- pick_after_resolver_result_cancel_locked(exec_ctx, elem,
- GRPC_ERROR_REF(error));
+ if (pick_callback_start_locked(exec_ctx, elem)) {
+ // Even if the LB policy returns a result synchronously, we have
+ // already added our polling entity to chand->interested_parties
+ // in order to wait for the resolver result, so we need to
+ // remove it here. Therefore, we call async_pick_done_locked()
+ // instead of pick_done_locked().
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- goto done;
}
- /* if we don't have a subchannel, try to get one */
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->connected_subchannel == NULL);
- calld->initial_metadata_payload = batch->payload;
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
- /* If a subchannel is not available immediately, the polling entity from
- call_data should be provided to channel_data's interested_parties, so
- that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(exec_ctx, elem)) {
- // Pick was returned synchronously.
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
- if (calld->connected_subchannel == NULL) {
- grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy");
- set_call_or_error(calld,
- (call_or_error){.error = GRPC_ERROR_REF(error)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
- } else {
- // Create subchannel call.
- create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
- }
- } else {
- grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
+}
+
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring pick pending resolver result", chand,
+ calld);
+ }
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ args->elem = elem;
+ GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
+ args, grpc_combiner_scheduler(chand->combiner));
+ grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
+ &args->closure, GRPC_ERROR_NONE);
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&args->cancel_closure,
+ pick_after_resolver_result_cancel_locked, args,
+ grpc_combiner_scheduler(chand->combiner)));
+}
+
+static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ GPR_ASSERT(calld->connected_subchannel == NULL);
+ if (chand->lb_policy != NULL) {
+ // We already have an LB policy, so ask it for a pick.
+ if (pick_callback_start_locked(exec_ctx, elem)) {
+ // Pick completed synchronously.
+ pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+ return;
+ }
+ } else {
+ // We do not yet have an LB policy, so wait for a resolver result.
+ if (chand->resolver == NULL) {
+ pick_done_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ return;
+ }
+ if (!chand->started_resolving) {
+ start_resolving_locked(exec_ctx, chand);
}
+ pick_after_resolver_result_start_locked(exec_ctx, elem);
}
-done:
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
+ // We need to wait for either a resolver result or for an async result
+ // from the LB policy. Add the polling entity from call_data to the
+ // channel_data's interested_parties, so that the I/O of the LB policy
+ // and resolver can be done under it. The polling entity will be
+ // removed in async_pick_done_locked().
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1365,27 +1315,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error));
}
-/* The logic here is fairly complicated, due to (a) the fact that we
- need to handle the case where we receive the send op before the
- initial metadata op, and (b) the need for efficiency, especially in
- the streaming case.
-
- We use double-checked locking to initially see if initialization has been
- performed. If it has not, we acquire the combiner and perform initialization.
- If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
- GRPC_TRACER_ON(grpc_trace_channel)) {
- grpc_call_log_op(GPR_INFO, elem, batch);
- }
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
}
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
+ // If we've previously been cancelled, immediately fail any new batches.
+ if (calld->error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
+ chand, calld, grpc_error_string(calld->error));
+ }
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ goto done;
+ }
+ if (batch->cancel_stream) {
+ // Stash a copy of cancel_error in our call data, so that we can use
+ // it for subsequent operations. This ensures that if the call is
+ // cancelled before any batches are passed down (e.g., if the deadline
+ // is in the past when the call starts), we can return the right
+ // error to the caller when the first batch does get passed down.
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
+ calld, grpc_error_string(calld->error));
+ }
+ // If we have a subchannel call, send the cancellation batch down.
+ // Otherwise, fail all pending batches.
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+ } else {
+ waiting_for_pick_batches_add(calld, batch);
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
+ }
+ goto done;
+ }
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (batch->recv_trailing_metadata) {
@@ -1395,38 +1367,44 @@ static void cc_start_transport_stream_op_batch(
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete;
}
- /* try to (atomically) get the call */
- call_or_error coe = get_call_or_error(calld);
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
- if (coe.error != GRPC_ERROR_NONE) {
+ // Check if we've already gotten a subchannel call.
+ // Note that once we have completed the pick, we do not need to enter
+ // the channel combiner, which is more efficient (especially for
+ // streaming calls).
+ if (calld->subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ calld, calld->subchannel_call);
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
goto done;
}
- if (coe.subchannel_call != NULL) {
+ // We do not yet have a subchannel call.
+ // Add the batch to the waiting-for-pick list.
+ waiting_for_pick_batches_add(calld, batch);
+ // For batches containing a send_initial_metadata op, enter the channel
+ // combiner to start a pick.
+ if (batch->send_initial_metadata) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
+ chand, calld);
+ }
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
+ elem, grpc_combiner_scheduler(chand->combiner)),
+ GRPC_ERROR_NONE);
+ } else {
+ // For all other batches, release the call combiner.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
+ calld);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "batch does not include send_initial_metadata");
}
- /* we failed; lock and figure out what to do */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
- }
- GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
- batch->handler_private.extra_arg = elem;
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
- start_transport_stream_op_batch_locked, batch,
- grpc_combiner_scheduler(chand->combiner)),
- GRPC_ERROR_NONE);
done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@@ -1435,16 +1413,18 @@ done:
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->owning_call = args->call_stack;
calld->arena = args->arena;
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
if (chand->deadline_checking_enabled) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, calld->deadline);
}
return GRPC_ERROR_NONE;
}
@@ -1454,8 +1434,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@@ -1463,13 +1443,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
- call_or_error coe = get_call_or_error(calld);
- GRPC_ERROR_UNREF(coe.error);
- if (coe.subchannel_call != NULL) {
- grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
+ GRPC_ERROR_UNREF(calld->error);
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = NULL;
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == NULL);
@@ -1490,7 +1469,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->pollent = pollent;
}
@@ -1508,14 +1487,13 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data),
cc_init_channel_elem,
cc_destroy_channel_elem,
- cc_get_peer,
cc_get_channel_info,
"client-channel",
};
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@@ -1529,7 +1507,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1600,7 +1578,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1617,7 +1595,7 @@ int grpc_client_channel_num_external_connectivity_watchers(
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- external_connectivity_watcher *w = arg;
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
@@ -1630,7 +1608,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- external_connectivity_watcher *w = arg;
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
external_connectivity_watcher *found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
@@ -1659,8 +1637,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *closure, grpc_closure *watcher_timer_init) {
- channel_data *chand = elem->channel_data;
- external_connectivity_watcher *w = gpr_zalloc(sizeof(*w));
+ channel_data *chand = (channel_data *)elem->channel_data;
+ external_connectivity_watcher *w =
+ (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.c b/src/core/ext/filters/client_channel/client_channel_factory.c
index 7220a8639e..57eac8f875 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.c
+++ b/src/core/ext/filters/client_channel/client_channel_factory.c
@@ -43,14 +43,13 @@ grpc_channel* grpc_client_channel_factory_create_channel(
}
static void* factory_arg_copy(void* factory) {
- grpc_client_channel_factory_ref(factory);
+ grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
return factory;
}
static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) {
- // TODO(roth): Remove local exec_ctx when
- // https://github.com/grpc/grpc/pull/8705 is merged.
- grpc_client_channel_factory_unref(exec_ctx, factory);
+ grpc_client_channel_factory_unref(exec_ctx,
+ (grpc_client_channel_factory*)factory);
}
static int factory_arg_cmp(void* factory1, void* factory2) {
@@ -64,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY,
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable);
}
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.c
index c32e83d012..1f71c5a7f9 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.c
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.c
@@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
char *default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
- grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY,
- default_authority);
+ grpc_arg arg = grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.c b/src/core/ext/filters/client_channel/http_connect_handshaker.c
index 0952dc6d4e..418bb41ef6 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.c
@@ -124,7 +124,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@@ -145,7 +145,7 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@@ -281,7 +281,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
- headers = gpr_malloc(sizeof(grpc_http_header) * num_header_strings);
+ headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
+ num_header_strings);
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == NULL) {
@@ -308,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
request.host = server_name;
- request.http.method = "CONNECT";
+ request.http.method = (char*)"CONNECT";
request.http.path = server_name;
request.http.hdrs = headers;
request.http.hdr_count = num_headers;
@@ -333,7 +334,8 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_do_handshake};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
- http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
+ http_connect_handshaker* handshaker =
+ (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c
index ef3512ed83..c507a2750e 100644
--- a/src/core/ext/filters/client_channel/http_proxy.c
+++ b/src/core/ext/filters/client_channel/http_proxy.c
@@ -44,6 +44,8 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy");
+ char** authority_strs = NULL;
+ size_t authority_nstrs;
if (uri_str == NULL) return NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
@@ -56,8 +58,6 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
goto done;
}
/* Split on '@' to separate user credentials from host */
- char** authority_strs = NULL;
- size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) {
@@ -157,7 +157,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
}
grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
- GRPC_ARG_HTTP_CONNECT_SERVER,
+ (char*)GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */
@@ -166,8 +166,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred);
- args_to_add[1] =
- grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header);
+ args_to_add[1] = grpc_channel_arg_string_create(
+ (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header);
} else {
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.c
index dd95a135cf..8e6673d737 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.c
@@ -67,7 +67,7 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_lb_policy *policy = arg;
+ grpc_lb_policy *policy = (grpc_lb_policy *)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
index 568bb2ba8d..7ad322902b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
@@ -49,7 +49,7 @@ typedef struct {
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -59,7 +59,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -70,12 +70,13 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
- args->context[GRPC_GRPCLB_CLIENT_STATS].value);
+ (grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
+ .value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
@@ -84,7 +85,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@@ -98,7 +99,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
@@ -132,6 +133,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_load_reporting"};
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index bb9217d843..85ef7894ea 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -101,6 +101,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
+#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -137,7 +138,7 @@ static grpc_error *initial_metadata_add_lb_token(
}
static void destroy_client_stats(void *arg) {
- grpc_grpclb_client_stats_unref(arg);
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
}
typedef struct wrapped_rr_closure_arg {
@@ -181,7 +182,7 @@ typedef struct wrapped_rr_closure_arg {
* order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_rr_closure_arg *wc_arg = arg;
+ wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@@ -245,7 +246,7 @@ static void add_pending_pick(pending_pick **root,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
grpc_closure *on_complete) {
- pending_pick *pp = gpr_zalloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -271,7 +272,7 @@ typedef struct pending_ping {
} pending_ping;
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = gpr_zalloc(sizeof(*pping));
+ pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -285,7 +286,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
-static const grpc_lb_policy_vtable glb_lb_policy_vtable;
+
typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -671,7 +672,7 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
GPR_ASSERT(addresses != NULL);
- grpc_lb_policy_args *args = gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@@ -727,7 +728,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity =
- gpr_zalloc(sizeof(rr_connectivity_data));
+ (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -798,7 +799,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- rr_connectivity_data *rr_connectivity = arg;
+ rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -841,8 +842,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void *a, void *b) {
- const char *a_str = a;
- const char *b_str = b;
+ const char *a_str = (const char *)a;
+ const char *b_str = (const char *)b;
return strcmp(a_str, b_str);
}
@@ -869,7 +870,8 @@ static grpc_channel_args *build_lb_channel_args(
grpc_lb_addresses *lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
grpc_slice_hash_table_entry *targets_info_entries =
- gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
+ (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
+ num_grpclb_addrs);
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -911,92 +913,6 @@ static grpc_channel_args *build_lb_channel_args(
return result;
}
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error);
-static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
- /* Count the number of gRPC-LB addresses. There must be at least one.
- * TODO(roth): For now, we ignore non-balancer addresses, but in the
- * future, we may change the behavior such that we fall back to using
- * the non-balancer addresses if we cannot reach any balancers. In the
- * fallback case, we should use the LB policy indicated by
- * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
- * unset, we should default to pick_first). */
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- return NULL;
- }
- grpc_lb_addresses *addresses = arg->value.pointer.p;
- size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
- }
- if (num_grpclb_addrs == 0) return NULL;
-
- glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
-
- /* Get server name. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
- GPR_ASSERT(arg != NULL);
- GPR_ASSERT(arg->type == GRPC_ARG_STRING);
- grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
- GPR_ASSERT(uri->path[0] != '\0');
- glb_policy->server_name =
- gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
- glb_policy->server_name);
- }
- grpc_uri_destroy(uri);
-
- glb_policy->cc_factory = args->client_channel_factory;
- GPR_ASSERT(glb_policy->cc_factory != NULL);
-
- arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
- glb_policy->lb_call_timeout_ms =
- grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
-
- // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
- // since we use this to trigger the client_load_reporting filter.
- grpc_arg new_arg =
- grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
- static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
- glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
- args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
-
- /* Create a client channel over them to communicate with a LB service */
- glb_policy->response_generator =
- grpc_fake_resolver_response_generator_create();
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
- exec_ctx, addresses, glb_policy->response_generator, args->args);
- char *uri_str;
- gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
- glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
- exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
-
- /* Propagate initial resolution */
- grpc_fake_resolver_response_generator_set_response(
- exec_ctx, glb_policy->response_generator, lb_channel_args);
- grpc_channel_args_destroy(exec_ctx, lb_channel_args);
- gpr_free(uri_str);
- if (glb_policy->lb_channel == NULL) {
- gpr_free((void *)glb_policy->server_name);
- grpc_channel_args_destroy(exec_ctx, glb_policy->args);
- gpr_free(glb_policy);
- return NULL;
- }
- GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
- glb_lb_channel_on_connectivity_changed_cb, glb_policy,
- grpc_combiner_scheduler(args->combiner));
- grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
- grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
- "grpclb");
- return &glb_policy->base;
-}
-
static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
GPR_ASSERT(glb_policy->pending_picks == NULL);
@@ -1011,6 +927,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
+ grpc_subchannel_index_unref();
if (glb_policy->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
@@ -1190,7 +1107,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg *wc_arg =
+ (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@@ -1273,7 +1191,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@@ -1302,7 +1220,8 @@ static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
- request->client_stats.calls_finished_with_drop.arg;
+ (grpc_grpclb_dropped_call_counts *)
+ request->client_stats.calls_finished_with_drop.arg;
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
@@ -1313,7 +1232,7 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -1520,7 +1439,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
@@ -1533,7 +1452,7 @@ static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@@ -1544,6 +1463,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_byte_buffer_reader bbr;
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
+ grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_initial_response *response = NULL;
@@ -1640,6 +1560,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
+ } else {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in
@@ -1651,7 +1574,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
@@ -1666,7 +1589,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
@@ -1729,8 +1652,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
- glb_policy->pending_update_args =
- gpr_zalloc(sizeof(*glb_policy->pending_update_args));
+ glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
+ sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
@@ -1758,7 +1681,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
(void *)glb_policy);
}
}
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
@@ -1791,7 +1715,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
@@ -1862,6 +1786,90 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_notify_on_state_change_locked,
glb_update_locked};
+static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ /* Count the number of gRPC-LB addresses. There must be at least one.
+ * TODO(roth): For now, we ignore non-balancer addresses, but in the
+ * future, we may change the behavior such that we fall back to using
+ * the non-balancer addresses if we cannot reach any balancers. In the
+ * fallback case, we should use the LB policy indicated by
+ * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
+ * unset, we should default to pick_first). */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ return NULL;
+ }
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ size_t num_grpclb_addrs = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ }
+ if (num_grpclb_addrs == 0) return NULL;
+
+ glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
+
+ /* Get server name. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_STRING);
+ grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ glb_policy->server_name =
+ gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
+ glb_policy->server_name);
+ }
+ grpc_uri_destroy(uri);
+
+ glb_policy->cc_factory = args->client_channel_factory;
+ GPR_ASSERT(glb_policy->cc_factory != NULL);
+
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
+ glb_policy->lb_call_timeout_ms =
+ grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
+
+ // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
+ // since we use this to trigger the client_load_reporting filter.
+ grpc_arg new_arg = grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
+ static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
+ glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
+
+ /* Create a client channel over them to communicate with a LB service */
+ glb_policy->response_generator =
+ grpc_fake_resolver_response_generator_create();
+ grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ exec_ctx, addresses, glb_policy->response_generator, args->args);
+ char *uri_str;
+ gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
+ glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
+ exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
+
+ /* Propagate initial resolution */
+ grpc_fake_resolver_response_generator_set_response(
+ exec_ctx, glb_policy->response_generator, lb_channel_args);
+ grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+ gpr_free(uri_str);
+ if (glb_policy->lb_channel == NULL) {
+ gpr_free((void *)glb_policy->server_name);
+ grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+ gpr_free(glb_policy);
+ return NULL;
+ }
+ grpc_subchannel_index_ref();
+ GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
+ glb_lb_channel_on_connectivity_changed_cb, glb_policy,
+ grpc_combiner_scheduler(args->combiner));
+ grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
+ grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
+ "grpclb");
+ return &glb_policy->base;
+}
+
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
index 5b62623145..903120ca7d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
@@ -42,7 +42,8 @@ struct grpc_grpclb_client_stats {
};
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
- grpc_grpclb_client_stats* client_stats = gpr_zalloc(sizeof(*client_stats));
+ grpc_grpclb_client_stats* client_stats =
+ (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@@ -88,7 +89,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == NULL) {
client_stats->drop_token_counts =
- gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts));
+ (grpc_grpclb_dropped_call_counts*)gpr_zalloc(
+ sizeof(grpc_grpclb_dropped_call_counts));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@@ -103,9 +105,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
- drop_token_counts->token_counts =
- gpr_realloc(drop_token_counts->token_counts,
- new_num_entries * sizeof(grpc_grpclb_drop_token_count));
+ drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
+ drop_token_counts->token_counts,
+ new_num_entries * sizeof(grpc_grpclb_drop_token_count));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
index 6fa29f326e..8ef6dfc6f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
@@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- grpc_grpclb_serverlist *sl = *arg;
+ grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -46,9 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- decode_serverlist_arg *dec_arg = *arg;
+ decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
- grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
+ grpc_grpclb_server *server =
+ (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -59,7 +60,8 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
}
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
- grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -78,14 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- char *str = *arg;
+ char *str = (char *)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
return pb_encode_string(stream, (uint8_t *)str, strlen(str));
}
static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- grpc_grpclb_dropped_call_counts *drop_entries = *arg;
+ grpc_grpclb_dropped_call_counts *drop_entries =
+ (grpc_grpclb_dropped_call_counts *)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -104,7 +107,8 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats) {
- grpc_grpclb_request *req = gpr_zalloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -144,7 +148,8 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts *drop_entries =
- request->client_stats.calls_finished_with_drop.arg;
+ (grpc_grpclb_dropped_call_counts *)
+ request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
gpr_free(request);
@@ -166,7 +171,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
grpc_grpclb_initial_response *initial_res =
- gpr_malloc(sizeof(grpc_grpclb_initial_response));
+ (grpc_grpclb_initial_response *)gpr_malloc(
+ sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -179,7 +185,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *sl =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -193,7 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = gpr_zalloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ sl->servers = (grpc_grpclb_server **)gpr_zalloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -226,13 +234,16 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *copy =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
- copy->servers = gpr_malloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ copy->servers = (grpc_grpclb_server **)gpr_malloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
- copy->servers[i] = gpr_malloc(sizeof(grpc_grpclb_server));
+ copy->servers[i] =
+ (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
index fd0fb41fb9..d20cbb8388 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
@@ -89,6 +89,7 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
"picked_first_destroy");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ grpc_subchannel_index_unref();
if (p->pending_update_args != NULL) {
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
@@ -217,7 +218,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pp = gpr_malloc(sizeof(*pp));
+ pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -296,8 +297,6 @@ static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
- /* Find the number of backend addresses. We ignore balancer
- * addresses, since we don't know how to handle them. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -316,12 +315,9 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
- size_t num_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- if (!addresses->addresses[i].is_balancer) ++num_addrs;
- }
- if (num_addrs == 0) {
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
+ if (addresses->num_addresses == 0) {
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
@@ -333,9 +329,10 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void *)p, (unsigned long)num_addrs);
+ (void *)p, (unsigned long)addresses->num_addresses);
}
- grpc_subchannel_args *sc_args = gpr_zalloc(sizeof(*sc_args) * num_addrs);
+ grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc(
+ sizeof(*sc_args) * addresses->num_addresses);
/* We remove the following keys in order for subchannel keys belonging to
* subchannels point to the same address to match. */
static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
@@ -344,7 +341,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
/* Create list of subchannel args for new addresses in \a args. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
- if (addresses->addresses[i].is_balancer) continue;
+ // If there were any balancer, we would have chosen grpclb policy instead.
+ GPR_ASSERT(!addresses->addresses[i].is_balancer);
if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
@@ -396,7 +394,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
- p->pending_update_args = gpr_zalloc(sizeof(*p->pending_update_args));
+ p->pending_update_args =
+ (grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args));
p->pending_update_args->client_channel_factory =
args->client_channel_factory;
p->pending_update_args->args = grpc_channel_args_copy(args->args);
@@ -405,7 +404,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
/* Create the subchannels for the new subchannel args/addresses. */
grpc_subchannel **new_subchannels =
- gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
+ (grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
size_t num_new_subchannels = 0;
for (size_t i = 0; i < sc_args_count; i++) {
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
@@ -460,7 +459,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- pick_first_lb_policy *p = arg;
+ pick_first_lb_policy *p = (pick_first_lb_policy *)arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
@@ -682,12 +681,13 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
}
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
+ grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
grpc_combiner_scheduler(args->combiner));
return &p->base;
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index a7f7e9542c..a3a62e9f3c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -30,6 +30,7 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -74,9 +75,6 @@ typedef struct round_robin_lb_policy {
bool started_picking;
/** are we shutting down? */
bool shutdown;
- /** has the policy gotten into the GRPC_CHANNEL_SHUTDOWN? No picks can be
- * service after this point, the policy will never transition out. */
- bool in_connectivity_shutdown;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
@@ -147,10 +145,11 @@ struct rr_subchannel_list {
static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
size_t num_subchannels) {
- rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list));
+ rr_subchannel_list *subchannel_list =
+ (rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
subchannel_list->policy = p;
subchannel_list->subchannels =
- gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
+ (subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
subchannel_list->num_subchannels = num_subchannels;
gpr_ref_init(&subchannel_list->refcount, 1);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
@@ -312,6 +311,7 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
(void *)pol, (void *)pol);
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ grpc_subchannel_index_unref();
gpr_free(p);
}
@@ -424,7 +424,6 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
GPR_ASSERT(!p->shutdown);
- GPR_ASSERT(!p->in_connectivity_shutdown);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol);
}
@@ -456,7 +455,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = gpr_malloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -537,7 +536,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
- p->in_connectivity_shutdown = true;
+ p->shutdown = true;
new_state = GRPC_CHANNEL_SHUTDOWN;
} else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
@@ -557,7 +556,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- subchannel_data *sd = arg;
+ subchannel_data *sd = (subchannel_data *)arg;
round_robin_lb_policy *p = sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
@@ -590,7 +589,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Dispose of outdated subchannel lists.
if (sd->subchannel_list != p->subchannel_list &&
sd->subchannel_list != p->latest_pending_subchannel_list) {
- char *reason = NULL;
+ const char *reason = NULL;
if (sd->subchannel_list->shutting_down) {
reason = "sl_outdated_straggler";
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);
@@ -741,8 +740,6 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
round_robin_lb_policy *p = (round_robin_lb_policy *)policy;
- /* Find the number of backend addresses. We ignore balancer addresses, since
- * we don't know how to handle them. */
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -760,13 +757,10 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- grpc_lb_addresses *addresses = arg->value.pointer.p;
- size_t num_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- if (!addresses->addresses[i].is_balancer) ++num_addrs;
- }
- rr_subchannel_list *subchannel_list = rr_subchannel_list_create(p, num_addrs);
- if (num_addrs == 0) {
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ rr_subchannel_list *subchannel_list =
+ rr_subchannel_list_create(p, addresses->num_addresses);
+ if (addresses->num_addresses == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
@@ -798,9 +792,8 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
GRPC_ARG_LB_ADDRESSES};
/* Create subchannels for addresses in the update. */
for (size_t i = 0; i < addresses->num_addresses; i++) {
- /* Skip balancer addresses, since we only know how to handle backends. */
- if (addresses->addresses[i].is_balancer) continue;
- GPR_ASSERT(i < num_addrs);
+ // If there were any balancer, we would have chosen grpclb policy instead.
+ GPR_ASSERT(!addresses->addresses[i].is_balancer);
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
@@ -811,19 +804,30 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
+ grpc_channel_args_destroy(exec_ctx, new_args);
+ grpc_error *error;
+ // Get the connectivity state of the subchannel. Already existing ones may
+ // be in a state other than INIT.
+ const grpc_connectivity_state subchannel_connectivity_state =
+ grpc_subchannel_check_connectivity(subchannel, &error);
+ if (error != GRPC_ERROR_NONE) {
+ // The subchannel is in error (e.g. shutting down). Ignore it.
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error");
+ GRPC_ERROR_UNREF(error);
+ continue;
+ }
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(
GPR_DEBUG,
"[RR %p] index %lu: Created subchannel %p for address uri %s into "
- "subchannel_list %p",
+ "subchannel_list %p. Connectivity state %s",
(void *)p, (unsigned long)subchannel_index, (void *)subchannel,
- address_uri, (void *)subchannel_list);
+ address_uri, (void *)subchannel_list,
+ grpc_connectivity_state_name(subchannel_connectivity_state));
gpr_free(address_uri);
}
- grpc_channel_args_destroy(exec_ctx, new_args);
-
subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
@@ -835,7 +839,7 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
* won't be referring to this value again and it'll be overwritten after
* the first call to rr_connectivity_changed_locked */
sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
- sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
+ sd->curr_connectivity_state = subchannel_connectivity_state;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
@@ -886,8 +890,9 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- round_robin_lb_policy *p = gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
+ grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c
index 538d8d65ed..4d1405454c 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.c
@@ -28,11 +28,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
- grpc_lb_addresses* addresses = gpr_zalloc(sizeof(grpc_lb_addresses));
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
- addresses->addresses = gpr_zalloc(addresses_size);
+ addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
return addresses;
}
@@ -125,13 +126,14 @@ void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
}
static void* lb_addresses_copy(void* addresses) {
- return grpc_lb_addresses_copy(addresses);
+ return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
}
static void lb_addresses_destroy(grpc_exec_ctx* exec_ctx, void* addresses) {
- grpc_lb_addresses_destroy(exec_ctx, addresses);
+ grpc_lb_addresses_destroy(exec_ctx, (grpc_lb_addresses*)addresses);
}
static int lb_addresses_cmp(void* addresses1, void* addresses2) {
- return grpc_lb_addresses_cmp(addresses1, addresses2);
+ return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
+ (grpc_lb_addresses*)addresses2);
}
static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
@@ -139,7 +141,7 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses* addresses) {
return grpc_channel_arg_pointer_create(
- GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
+ (char*)GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
}
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
@@ -148,7 +150,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
if (lb_addresses_arg == NULL || lb_addresses_arg->type != GRPC_ARG_POINTER)
return NULL;
- return lb_addresses_arg->value.pointer.p;
+ return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p;
}
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.c b/src/core/ext/filters/client_channel/proxy_mapper_registry.c
index 5f43a0596a..09967eea3c 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.c
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
- list->list = gpr_realloc(
+ list->list = (grpc_proxy_mapper**)gpr_realloc(
list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
index f1480bb1ae..9bb229ad95 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
@@ -144,7 +144,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@@ -204,7 +204,7 @@ static char *choose_service_config(char *service_config_choice_json) {
int random_pct = rand() % 100;
int percentage;
if (sscanf(field->value, "%d", &percentage) != 1 ||
- random_pct > percentage) {
+ random_pct > percentage || percentage == 0) {
service_config_json = NULL;
break;
}
@@ -227,7 +227,7 @@ static char *choose_service_config(char *service_config_choice_json) {
static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
@@ -249,7 +249,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- GRPC_ARG_SERVICE_CONFIG, service_config_string);
+ (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
const char *lb_policy_name =
@@ -257,7 +257,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
+ (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
}
}
}
@@ -363,7 +363,8 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
const char *path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
- ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
+ ares_dns_resolver *r =
+ (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
index b696344eab..c30cc93b6f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -20,6 +20,7 @@
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
#include <ares.h>
+#include <sys/ioctl.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@@ -37,8 +38,6 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
- /** the grpc_fd owned by this fd node */
- grpc_fd *grpc_fd;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@@ -50,10 +49,14 @@ typedef struct fd_node {
/** mutex guarding the rest of the state */
gpr_mu mu;
+ /** the grpc_fd owned by this fd node */
+ grpc_fd *fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
bool writable_registered;
+ /** if the fd is being shut down */
+ bool shutting_down;
} fd_node;
struct grpc_ares_ev_driver {
@@ -96,22 +99,34 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
- gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
- grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
/* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
- grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, true /* already_closed */,
+ grpc_fd_orphan(exec_ctx, fdn->fd, NULL, NULL, true /* already_closed */,
"c-ares query finished");
gpr_free(fdn);
}
+static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+ gpr_mu_lock(&fdn->mu);
+ fdn->shutting_down = true;
+ if (!fdn->readable_registered && !fdn->writable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ } else {
+ grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "c-ares fd shutdown"));
+ gpr_mu_unlock(&fdn->mu);
+ }
+}
+
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
- *ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
+ *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@@ -150,9 +165,8 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
ev_driver->shutting_down = true;
fd_node *fn = ev_driver->fds;
while (fn != NULL) {
- grpc_fd_shutdown(
- exec_ctx, fn->grpc_fd,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
+ grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@@ -165,7 +179,7 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
dummy_head.next = *head;
fd_node *node = &dummy_head;
while (node->next != NULL) {
- if (grpc_fd_wrapped_fd(node->next->grpc_fd) == fd) {
+ if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
fd_node *ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
@@ -176,18 +190,33 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
return NULL;
}
+/* Check if \a fd is still readable */
+static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
+ int fd) {
+ size_t bytes_available = 0;
+ return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
+}
+
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
+ const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
+ if (fdn->shutting_down && !fdn->writable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ grpc_ares_ev_driver_unref(ev_driver);
+ return;
+ }
gpr_mu_unlock(&fdn->mu);
- gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "readable on %d", fd);
if (error == GRPC_ERROR_NONE) {
- ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->grpc_fd),
- ARES_SOCKET_BAD);
+ do {
+ ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD);
+ } while (grpc_ares_is_fd_still_readable(ev_driver, fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@@ -205,16 +234,22 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
+ const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
+ if (fdn->shutting_down && !fdn->readable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ grpc_ares_ev_driver_unref(ev_driver);
+ return;
+ }
gpr_mu_unlock(&fdn->mu);
- gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "writable on %d", fd);
if (error == GRPC_ERROR_NONE) {
- ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
- grpc_fd_wrapped_fd(fdn->grpc_fd));
+ ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@@ -251,19 +286,19 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (fdn == NULL) {
char *fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = gpr_malloc(sizeof(fd_node));
+ fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
- fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
+ fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
+ fdn->shutting_down = false;
gpr_mu_init(&fdn->mu);
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
grpc_schedule_on_exec_ctx);
- grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
- fdn->grpc_fd);
+ grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, fdn->fd);
gpr_free(fd_name);
}
fdn->next = new_list;
@@ -274,9 +309,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
!fdn->readable_registered) {
grpc_ares_ev_driver_ref(ev_driver);
- gpr_log(GPR_DEBUG, "notify read on: %d",
- grpc_fd_wrapped_fd(fdn->grpc_fd));
- grpc_fd_notify_on_read(exec_ctx, fdn->grpc_fd, &fdn->read_closure);
+ gpr_log(GPR_DEBUG, "notify read on: %d", grpc_fd_wrapped_fd(fdn->fd));
+ grpc_fd_notify_on_read(exec_ctx, fdn->fd, &fdn->read_closure);
fdn->readable_registered = true;
}
// Register write_closure if the socket is writable and write_closure
@@ -284,9 +318,9 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
!fdn->writable_registered) {
gpr_log(GPR_DEBUG, "notify write on: %d",
- grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_fd_wrapped_fd(fdn->fd));
grpc_ares_ev_driver_ref(ev_driver);
- grpc_fd_notify_on_write(exec_ctx, fdn->grpc_fd, &fdn->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, fdn->fd, &fdn->write_closure);
fdn->writable_registered = true;
}
gpr_mu_unlock(&fdn->mu);
@@ -299,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
while (ev_driver->fds != NULL) {
fd_node *cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
- fd_node_destroy(exec_ctx, cur);
+ fd_node_shutdown(exec_ctx, cur);
}
ev_driver->fds = new_list;
// If the ev driver has no working fd, all the tasks are done.
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
index e65723a63b..04379975e1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -123,8 +123,8 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
static grpc_ares_hostbyname_request *create_hostbyname_request(
grpc_ares_request *parent_request, char *host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request *hr =
- gpr_zalloc(sizeof(grpc_ares_hostbyname_request));
+ grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
+ sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
hr->port = port;
@@ -158,9 +158,9 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses =
- gpr_realloc((*lb_addresses)->addresses,
- sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
+ (*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@@ -174,7 +174,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
- hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET6_ADDRSTRLEN];
ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN);
@@ -195,7 +195,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
- hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET_ADDRSTRLEN];
ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN);
@@ -275,14 +275,15 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char *error_msg;
grpc_ares_request *r = (grpc_ares_request *)arg;
+ const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
+ struct ares_txt_ext *result = NULL;
+ struct ares_txt_ext *reply = NULL;
+ grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
- struct ares_txt_ext *reply = NULL;
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record.
- const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
- struct ares_txt_ext *result;
for (result = reply; result != NULL; result = result->next) {
if (result->record_start &&
memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) ==
@@ -293,12 +294,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
- *r->service_config_json_out = gpr_realloc(
+ *r->service_config_json_out = (char *)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@@ -313,7 +314,7 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
fail:
gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -331,6 +332,9 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) {
grpc_error *error = GRPC_ERROR_NONE;
+ grpc_ares_hostbyname_request *hr = NULL;
+ grpc_ares_request *r = NULL;
+ ares_channel *channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@@ -360,7 +364,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- grpc_ares_request *r = gpr_zalloc(sizeof(grpc_ares_request));
+ r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -368,7 +372,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
r->service_config_json_out = service_config_json;
r->success = false;
r->error = GRPC_ERROR_NONE;
- ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
// If dns_server is specified, use it.
if (dns_server != NULL) {
@@ -409,12 +413,12 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) {
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
- r, host, strhtons(port), false /* is_balancer */);
+ hr = create_hostbyname_request(r, host, strhtons(port),
+ false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
}
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
- r, host, strhtons(port), false /* is_balancer */);
+ hr = create_hostbyname_request(r, host, strhtons(port),
+ false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
if (check_grpclb) {
/* Query the SRV record */
@@ -502,10 +506,11 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
- *resolved_addresses = gpr_zalloc(sizeof(grpc_resolved_addresses));
+ *resolved_addresses =
+ (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = gpr_zalloc(sizeof(grpc_resolved_address) *
- (*resolved_addresses)->naddrs);
+ (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
+ sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],
@@ -525,7 +530,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
grpc_resolve_address_ares_request *r =
- gpr_zalloc(sizeof(grpc_resolve_address_ares_request));
+ (grpc_resolve_address_ares_request *)gpr_zalloc(
+ sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
index 56ed4371a9..69ea440ae6 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
@@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
@@ -125,7 +126,6 @@ static const grpc_resolver_vtable fake_resolver_vtable = {
struct grpc_fake_resolver_response_generator {
fake_resolver* resolver; // Set by the fake_resolver constructor to itself.
- grpc_channel_args* next_response;
gpr_refcount refcount;
};
@@ -151,19 +151,26 @@ void grpc_fake_resolver_response_generator_unref(
}
}
-static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- grpc_fake_resolver_response_generator* generator =
- (grpc_fake_resolver_response_generator*)arg;
+typedef struct set_response_closure_arg {
+ grpc_closure set_response_closure;
+ grpc_fake_resolver_response_generator* generator;
+ grpc_channel_args* next_response;
+} set_response_closure_arg;
+
+static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
+ grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
grpc_channel_args_destroy(exec_ctx, r->next_results);
}
- r->next_results = generator->next_response;
+ r->next_results = closure_arg->next_response;
if (r->results_upon_error != NULL) {
grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
}
- r->results_upon_error = grpc_channel_args_copy(generator->next_response);
+ r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response);
+ gpr_free(closure_arg);
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
@@ -171,12 +178,16 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
- generator->next_response = grpc_channel_args_copy(next_response);
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator,
- grpc_combiner_scheduler(
- generator->resolver->base.combiner)),
- GRPC_ERROR_NONE);
+ set_response_closure_arg* closure_arg =
+ (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
+ closure_arg->generator = generator;
+ closure_arg->next_response = grpc_channel_args_copy(next_response);
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
+ set_response_closure_fn, closure_arg,
+ grpc_combiner_scheduler(
+ generator->resolver->base.combiner)),
+ GRPC_ERROR_NONE);
}
static void* response_generator_arg_copy(void* p) {
@@ -199,7 +210,7 @@ grpc_arg grpc_fake_resolver_response_generator_arg(
grpc_fake_resolver_response_generator* generator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
+ arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.value.pointer.p = generator;
arg.value.pointer.vtable = &response_generator_arg_vtable;
return arg;
diff --git a/src/core/ext/filters/client_channel/retry_throttle.c b/src/core/ext/filters/client_channel/retry_throttle.c
index 0c7a3ae651..09dcade089 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.c
+++ b/src/core/ext/filters/client_channel/retry_throttle.c
@@ -99,7 +99,7 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
int max_milli_tokens, int milli_token_ratio,
grpc_server_retry_throttle_data* old_throttle_data) {
grpc_server_retry_throttle_data* throttle_data =
- gpr_malloc(sizeof(*throttle_data));
+ (grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data));
memset(throttle_data, 0, sizeof(*throttle_data));
gpr_ref_init(&throttle_data->refs, 1);
throttle_data->max_milli_tokens = max_milli_tokens;
@@ -131,20 +131,22 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
//
static void* copy_server_name(void* key, void* unused) {
- return gpr_strdup(key);
+ return gpr_strdup((const char*)key);
}
static long compare_server_name(void* key1, void* key2, void* unused) {
- return strcmp(key1, key2);
+ return strcmp((const char*)key1, (const char*)key2);
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
return grpc_server_retry_throttle_data_ref(throttle_data);
}
@@ -175,7 +177,8 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio) {
gpr_mu_lock(&g_mu);
grpc_server_retry_throttle_data* throttle_data =
- gpr_avl_get(g_avl, (char*)server_name, NULL);
+ (grpc_server_retry_throttle_data*)gpr_avl_get(g_avl, (char*)server_name,
+ NULL);
if (throttle_data == NULL) {
// Entry not found. Create a new one.
throttle_data = grpc_server_retry_throttle_data_create(
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 5788819331..40a51c72d6 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@@ -157,7 +158,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_connected_subchannel *c = arg;
+ grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
@@ -181,7 +182,7 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
@@ -290,21 +291,24 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
- c = gpr_zalloc(sizeof(*c));
+ GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
+ c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
+ c->filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(grpc_channel_filter *) * c->num_filters);
memcpy((void *)c->filters, args->filters,
sizeof(grpc_channel_filter *) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
- grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
+ grpc_resolved_address *addr =
+ (grpc_resolved_address *)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
@@ -400,7 +404,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- external_state_watcher *w = arg;
+ external_state_watcher *w = (external_state_watcher *)arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
@@ -416,7 +420,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -501,7 +505,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = gpr_malloc(sizeof(*w));
+ w = (external_state_watcher *)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -533,7 +537,7 @@ void grpc_connected_subchannel_process_transport_op(
static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
grpc_error *error) {
- state_watcher *sw = p;
+ state_watcher *sw = (state_watcher *)p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@@ -623,7 +627,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@@ -660,7 +664,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
@@ -696,7 +700,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_subchannel_call *c = call;
+ grpc_subchannel_call *c = (grpc_subchannel_call *)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
@@ -724,20 +728,14 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call) {
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- return top_elem->filter->get_peer(exec_ctx, top_elem);
-}
-
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call,
- grpc_transport_stream_op_batch *op) {
+ grpc_transport_stream_op_batch *batch) {
GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, op);
+ GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
+ top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
@@ -756,17 +754,19 @@ grpc_error *grpc_connected_subchannel_create_call(
const grpc_connected_subchannel_call_args *args,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_arena_alloc(
+ *call = (grpc_subchannel_call *)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- const grpc_call_element_args call_args = {.call_stack = callstk,
- .server_transport_data = NULL,
- .context = args->context,
- .path = args->path,
- .start_time = args->start_time,
- .deadline = args->deadline,
- .arena = args->arena};
+ const grpc_call_element_args call_args = {
+ .call_stack = callstk,
+ .server_transport_data = NULL,
+ .context = args->context,
+ .path = args->path,
+ .start_time = args->start_time,
+ .deadline = args->deadline,
+ .arena = args->arena,
+ .call_combiner = args->call_combiner};
grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
@@ -811,6 +811,6 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
return grpc_channel_arg_string_create(
- GRPC_ARG_SUBCHANNEL_ADDRESS,
+ (char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 6d2abb04df..51d712f6a7 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -106,6 +106,7 @@ typedef struct {
gpr_timespec deadline;
gpr_arena *arena;
grpc_call_context_element *context;
+ grpc_call_combiner *call_combiner;
} grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call(
@@ -150,10 +151,6 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op_batch *op);
-/** continue querying for peer */
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call);
-
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.c
index ababd05d84..d7a51f3899 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.c
+++ b/src/core/ext/filters/client_channel/subchannel_index.c
@@ -34,6 +34,8 @@ static gpr_avl g_subchannel_index;
static gpr_mu g_mu;
+static gpr_refcount g_refcount;
+
struct grpc_subchannel_key {
grpc_subchannel_args args;
};
@@ -43,11 +45,11 @@ static bool g_force_creation = false;
static grpc_subchannel_key *create_key(
const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
- grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
+ grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters =
- gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(*k->args.filters) * k->args.filter_count);
memcpy((grpc_channel_filter *)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
@@ -88,24 +90,26 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
static void sck_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- grpc_subchannel_key_destroy(exec_ctx, p);
+ grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p);
}
static void *sck_avl_copy(void *p, void *unused) {
- return subchannel_key_copy(p);
+ return subchannel_key_copy((grpc_subchannel_key *)p);
}
static long sck_avl_compare(void *a, void *b, void *unused) {
- return grpc_subchannel_key_compare(a, b);
+ return grpc_subchannel_key_compare((grpc_subchannel_key *)a,
+ (grpc_subchannel_key *)b);
}
static void scv_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, p, "subchannel_index");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
+ "subchannel_index");
}
static void *scv_avl_copy(void *p, void *unused) {
- GRPC_SUBCHANNEL_WEAK_REF(p, "subchannel_index");
+ GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index");
return p;
}
@@ -119,15 +123,27 @@ static const gpr_avl_vtable subchannel_avl_vtable = {
void grpc_subchannel_index_init(void) {
g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
gpr_mu_init(&g_mu);
+ gpr_ref_init(&g_refcount, 1);
}
void grpc_subchannel_index_shutdown(void) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_mu_destroy(&g_mu);
- gpr_avl_unref(g_subchannel_index, &exec_ctx);
- grpc_exec_ctx_finish(&exec_ctx);
+ // TODO(juanlishen): This refcounting mechanism may lead to memory leackage.
+ // To solve that, we should force polling to flush any pending callbacks, then
+ // shutdown safely.
+ grpc_subchannel_index_unref();
+}
+
+void grpc_subchannel_index_unref(void) {
+ if (gpr_unref(&g_refcount)) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_subchannel_index, &exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
}
+void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
+
grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
grpc_subchannel_key *key) {
// Lock, and take a reference to the subchannel index.
@@ -137,7 +153,7 @@ grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- gpr_avl_get(index, key, exec_ctx), "index_find");
+ (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
@@ -159,7 +175,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = gpr_avl_get(index, key, exec_ctx);
+ c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -207,7 +223,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel *c = gpr_avl_get(index, key, exec_ctx);
+ grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 98d882a453..92e36d5283 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -59,6 +59,13 @@ void grpc_subchannel_index_init(void);
/** Shutdown the subchannel index (global) */
void grpc_subchannel_index_shutdown(void);
+/** Increment the refcount (non-zero) of subchannel index (global). */
+void grpc_subchannel_index_ref(void);
+
+/** Decrement the refcount of subchannel index (global). If the refcount drops
+ to zero, unref the subchannel index and destroy its mutex. */
+void grpc_subchannel_index_unref(void);
+
/** \em TEST ONLY.
* If \a force_creation is true, all key comparisons will be false, resulting in
* new subchannels always being created. Otherwise, the keys will be compared as
diff --git a/src/core/ext/filters/client_channel/uri_parser.c b/src/core/ext/filters/client_channel/uri_parser.c
index e841928760..fb4fb8e694 100644
--- a/src/core/ext/filters/client_channel/uri_parser.c
+++ b/src/core/ext/filters/client_channel/uri_parser.c
@@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = gpr_malloc(pfx_len + 1);
+ line_prefix = (char *)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -156,7 +156,8 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
- uri->query_parts_values = gpr_malloc(uri->num_query_parts * sizeof(char **));
+ uri->query_parts_values =
+ (char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char **query_param_parts;
size_t num_query_param_parts;
@@ -269,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
- uri = gpr_zalloc(sizeof(*uri));
+ uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c
index 6789903c95..1aed488077 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.c
@@ -34,22 +34,56 @@
// grpc_deadline_state
//
+// The on_complete callback used when sending a cancel_error batch down the
+// filter stack. Yields the call combiner when the batch returns.
+static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "got on_complete from cancel_stream batch");
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
+}
+
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
+static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
+ deadline_state, grpc_schedule_on_exec_ctx));
+ batch->cancel_stream = true;
+ batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+}
+
// Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) {
- grpc_call_element_signal_error(
- exec_ctx, elem,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
+ grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
+ GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
+ send_cancel_op_in_call_combiner, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &deadline_state->timer_callback, error,
+ "deadline exceeded -- sending cancel_stream op");
+ } else {
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
+ "deadline_timer");
}
- GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
// Starts the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
@@ -58,51 +92,39 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
- grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
-retry:
- cur_state =
- (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
- switch (cur_state) {
+ switch (deadline_state->timer_state) {
case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_FINISHED,
- GRPC_DEADLINE_STATE_PENDING)) {
- // If we've already created and destroyed a timer, we always create a
- // new closure: we have no other guarantee that the inlined closure is
- // not in use (it may hold a pending call to timer_callback)
- closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure =
+ GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
break;
case GRPC_DEADLINE_STATE_INITIAL:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_INITIAL,
- GRPC_DEADLINE_STATE_PENDING)) {
- closure =
- GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
- elem, grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ closure =
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
break;
}
- GPR_ASSERT(closure);
+ GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
- GRPC_DEADLINE_STATE_FINISHED)) {
+ if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else {
// timer was either in STATE_INITAL (nothing to cancel)
@@ -131,22 +153,39 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
+ bool in_call_combiner;
grpc_call_element* elem;
gpr_timespec deadline;
grpc_closure closure;
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- struct start_timer_after_init_state* state = arg;
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)arg;
+ grpc_deadline_state* deadline_state =
+ (grpc_deadline_state*)state->elem->call_data;
+ if (!state->in_call_combiner) {
+ // We are initially called without holding the call combiner, so we
+ // need to bounce ourselves into it.
+ state->in_call_combiner = true;
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &state->closure, GRPC_ERROR_REF(error),
+ "scheduling deadline timer");
+ return;
+ }
start_timer_if_needed(exec_ctx, state->elem, state->deadline);
gpr_free(state);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "done scheduling deadline timer");
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
+ deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -158,7 +197,8 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
- struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@@ -232,7 +272,8 @@ typedef struct server_call_data {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -310,7 +351,6 @@ const grpc_channel_filter grpc_client_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
@@ -325,7 +365,6 @@ const grpc_channel_filter grpc_server_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 420bf7065a..3eb102ad28 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -31,7 +31,8 @@ typedef enum grpc_deadline_timer_state {
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- gpr_atm timer_state;
+ grpc_call_combiner* call_combiner;
+ grpc_deadline_timer_state timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
@@ -50,6 +51,7 @@ typedef struct grpc_deadline_state {
// assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@@ -61,6 +63,8 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
@@ -70,6 +74,8 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op);
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index 3ca01a41b5..6208089f2e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -36,6 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -138,8 +139,8 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -153,8 +154,8 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@@ -215,13 +216,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
// There may or may not be more to read, but we don't care. If we got
@@ -233,7 +234,7 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
- char *payload_bytes = gpr_malloc(slice_buffer->length + 1);
+ char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -299,10 +300,9 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
static void hc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
if (batch->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -414,7 +414,7 @@ static void hc_start_transport_stream_op_batch(
done:
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
} else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(exec_ctx, elem, batch);
}
@@ -426,6 +426,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -535,7 +536,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -551,7 +552,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}
@@ -565,6 +566,5 @@ const grpc_channel_filter grpc_http_client_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-client"};
diff --git a/src/core/ext/filters/http/http_filters_plugin.c b/src/core/ext/filters/http/http_filters_plugin.c
index a5c1b92054..88bd2250f9 100644
--- a/src/core/ext/filters/http/http_filters_plugin.c
+++ b/src/core/ext/filters/http/http_filters_plugin.c
@@ -44,7 +44,7 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter *filtarg = arg;
+ optional_filter *filtarg = (optional_filter *)arg;
const grpc_channel_args *channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index 20a3488115..f785e1355d 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -35,33 +35,29 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-#define INITIAL_METADATA_UNSEEN 0
-#define HAS_COMPRESSION_ALGORITHM 2
-#define NO_COMPRESSION_ALGORITHM 4
-
-#define CANCELLED_BIT ((gpr_atm)1)
+typedef enum {
+ // Initial metadata not yet seen.
+ INITIAL_METADATA_UNSEEN = 0,
+ // Initial metadata seen; compression algorithm set.
+ HAS_COMPRESSION_ALGORITHM,
+ // Initial metadata seen; no compression algorithm set.
+ NO_COMPRESSION_ALGORITHM,
+} initial_metadata_state;
typedef struct call_data {
- grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
+ grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
- uint32_t remaining_slice_bytes;
+ grpc_linked_mdelem accept_stream_encoding_storage;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
-
- /* Atomic recording the state of initial metadata; allowed values:
- INITIAL_METADATA_UNSEEN - initial metadata op not seen
- HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm
- set
- NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm
- set
- pointer - a stalled op containing a send_message that's waiting on initial
- metadata
- pointer | CANCELLED_BIT - request was cancelled with error pointed to */
- gpr_atm send_initial_metadata_state;
-
+ initial_metadata_state send_initial_metadata_state;
+ grpc_error *cancel_error;
+ grpc_closure start_send_message_batch_in_call_combiner;
grpc_transport_stream_op_batch *send_message_batch;
+ grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
grpc_closure *original_send_message_on_complete;
grpc_closure send_message_on_complete;
@@ -75,21 +71,28 @@ typedef struct channel_data {
uint32_t enabled_algorithms_bitset;
/** Supported compression algorithms */
uint32_t supported_compression_algorithms;
+
+ /** The default, channel-level, stream compression algorithm */
+ grpc_stream_compression_algorithm default_stream_compression_algorithm;
+ /** Bitset of enabled stream compression algorithms */
+ uint32_t enabled_stream_compression_algorithms_bitset;
+ /** Supported stream compression algorithms */
+ uint32_t supported_stream_compression_algorithms;
} channel_data;
static bool skip_compression(grpc_call_element *elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
- return 1;
+ return true;
}
if (has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
- return 1;
+ return true;
}
- return 0; /* we have an actual call-specific algorithm */
+ return false; /* we have an actual call-specific algorithm */
}
/* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@@ -103,34 +106,59 @@ static grpc_error *process_send_initial_metadata(
static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
*has_compression_algorithm = false;
- /* Parse incoming request for compression. If any, it'll be available
- * at calld->compression_algorithm */
- if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) {
+ grpc_stream_compression_algorithm stream_compression_algorithm =
+ GRPC_STREAM_COMPRESS_NONE;
+ if (initial_metadata->idx.named.grpc_internal_stream_encoding_request !=
+ NULL) {
grpc_mdelem md =
- initial_metadata->idx.named.grpc_internal_encoding_request->md;
- if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
- &calld->compression_algorithm)) {
+ initial_metadata->idx.named.grpc_internal_stream_encoding_request->md;
+ if (!grpc_stream_compression_algorithm_parse(
+ GRPC_MDVALUE(md), &stream_compression_algorithm)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
+ "Invalid stream compression algorithm: '%s' (unknown). Ignoring.",
+ val);
gpr_free(val);
- calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
+ }
+ if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
+ stream_compression_algorithm)) {
+ char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(
+ GPR_ERROR,
+ "Invalid stream compression algorithm: '%s' (previously disabled). "
+ "Ignoring.",
+ val);
+ gpr_free(val);
+ stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
+ }
+ *has_compression_algorithm = true;
+ grpc_metadata_batch_remove(
+ exec_ctx, initial_metadata,
+ initial_metadata->idx.named.grpc_internal_stream_encoding_request);
+ /* Disable message-wise compression */
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) {
+ grpc_metadata_batch_remove(
+ exec_ctx, initial_metadata,
+ initial_metadata->idx.named.grpc_internal_encoding_request);
}
- if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
- calld->compression_algorithm)) {
+ } else if (initial_metadata->idx.named.grpc_internal_encoding_request !=
+ NULL) {
+ grpc_mdelem md =
+ initial_metadata->idx.named.grpc_internal_encoding_request->md;
+ if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
+ &calld->compression_algorithm)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (previously disabled). "
- "Ignoring.",
- val);
+ "Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
gpr_free(val);
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
*has_compression_algorithm = true;
-
grpc_metadata_batch_remove(
exec_ctx, initial_metadata,
initial_metadata->idx.named.grpc_internal_encoding_request);
@@ -138,13 +166,25 @@ static grpc_error *process_send_initial_metadata(
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
- calld->compression_algorithm = channeld->default_compression_algorithm;
+ if (channeld->default_stream_compression_algorithm !=
+ GRPC_STREAM_COMPRESS_NONE) {
+ stream_compression_algorithm =
+ channeld->default_stream_compression_algorithm;
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ } else {
+ calld->compression_algorithm = channeld->default_compression_algorithm;
+ }
*has_compression_algorithm = true;
}
grpc_error *error = GRPC_ERROR_NONE;
/* hint compression algorithm */
- if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
+ if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata,
+ &calld->stream_compression_algorithm_storage,
+ grpc_stream_compression_encoding_mdelem(stream_compression_algorithm));
+ } else if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
error = grpc_metadata_batch_add_tail(
exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
grpc_compression_encoding_mdelem(calld->compression_algorithm));
@@ -158,6 +198,16 @@ static grpc_error *process_send_initial_metadata(
GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
channeld->supported_compression_algorithms));
+ if (error != GRPC_ERROR_NONE) return error;
+
+ /* Do not overwrite accept-encoding header if it already presents. */
+ if (!initial_metadata->idx.named.accept_encoding) {
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata, &calld->accept_stream_encoding_storage,
+ GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(
+ channeld->supported_stream_compression_algorithms));
+ }
+
return error;
}
@@ -170,6 +220,18 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
+static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ // Note: The call to grpc_call_next_op() results in yielding the
+ // call combiner, so we need to clear calld->send_message_batch
+ // before we do that.
+ grpc_transport_stream_op_batch *send_message_batch =
+ calld->send_message_batch;
+ calld->send_message_batch = NULL;
+ grpc_call_next_op(exec_ctx, elem, send_message_batch);
+}
+
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
@@ -178,11 +240,11 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&tmp);
uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags;
- const bool did_compress = grpc_msg_compress(
- exec_ctx, calld->compression_algorithm, &calld->slices, &tmp);
+ bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
+ &calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name;
+ const char *algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
@@ -196,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name;
+ const char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG,
@@ -217,7 +279,19 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
calld->original_send_message_on_complete =
calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete;
- grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
+ send_message_batch_continue(exec_ctx, elem);
+}
+
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ call_data *calld = (call_data *)arg;
+ if (calld->send_message_batch != NULL) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ calld->send_message_batch = NULL;
+ }
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
@@ -237,21 +311,25 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
+static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) return error;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
break;
}
}
- return GRPC_ERROR_NONE;
}
// Async callback for grpc_byte_stream_next().
@@ -259,142 +337,118 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ return;
+ }
error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
} else {
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // this function again.
- error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) goto fail;
+ continue_reading_send_message(exec_ctx, elem);
}
- return;
-fail:
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch,
- bool has_compression_algorithm) {
+static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *unused) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (!skip_compression(elem, batch->payload->send_message.send_message->flags,
- has_compression_algorithm)) {
- calld->send_message_batch = batch;
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // on_send_message_next_done().
- grpc_error *error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
- }
+ if (skip_compression(
+ elem,
+ calld->send_message_batch->payload->send_message.send_message->flags,
+ calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) {
+ send_message_batch_continue(exec_ctx, elem);
} else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, batch);
+ continue_reading_send_message(exec_ctx, elem);
}
}
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
-
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
-
+ // Handle cancel_stream.
if (batch->cancel_stream) {
- // TODO(roth): As part of the upcoming call combiner work, change
- // this to call grpc_byte_stream_shutdown() on the incoming byte
- // stream, to cancel any in-flight calls to grpc_byte_stream_next().
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- gpr_atm cur = gpr_atm_full_xchg(
- &calld->send_initial_metadata_state,
- CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error);
- switch (cur) {
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- case INITIAL_METADATA_UNSEEN:
- break;
- default:
- if ((cur & CANCELLED_BIT) == 0) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, (grpc_transport_stream_op_batch *)cur,
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
- } else {
- GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
- }
- break;
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (calld->send_message_batch != NULL) {
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
+ } else {
+ grpc_byte_stream_shutdown(
+ exec_ctx,
+ calld->send_message_batch->payload->send_message.send_message,
+ GRPC_ERROR_REF(calld->cancel_error));
+ }
}
+ } else if (calld->cancel_error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
+ calld->call_combiner);
+ goto done;
}
-
+ // Handle send_initial_metadata.
if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
grpc_error *error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- error);
- return;
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
+ goto done;
}
- gpr_atm cur;
- retry_send_im:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
- cur != NO_COMPRESSION_ALGORITHM);
- if ((cur & CANCELLED_BIT) == 0) {
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- has_compression_algorithm
- ? HAS_COMPRESSION_ALGORITHM
- : NO_COMPRESSION_ALGORITHM)) {
- goto retry_send_im;
- }
- if (cur != INITIAL_METADATA_UNSEEN) {
- start_send_message_batch(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur,
- has_compression_algorithm);
- }
+ calld->send_initial_metadata_state = has_compression_algorithm
+ ? HAS_COMPRESSION_ALGORITHM
+ : NO_COMPRESSION_ALGORITHM;
+ // If we had previously received a batch containing a send_message op,
+ // handle it now. Note that we need to re-enter the call combiner
+ // for this, since we can't send two batches down while holding the
+ // call combiner, since the connected_channel filter (at the bottom of
+ // the call stack) will release the call combiner for each batch it sees.
+ if (calld->send_message_batch != NULL) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
+ "starting send_message after send_initial_metadata");
}
}
+ // Handle send_message.
if (batch->send_message) {
- gpr_atm cur;
- retry_send:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- switch (cur) {
- case INITIAL_METADATA_UNSEEN:
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- (gpr_atm)batch)) {
- goto retry_send;
- }
- break;
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- start_send_message_batch(exec_ctx, elem, batch,
- cur == HAS_COMPRESSION_ALGORITHM);
- break;
- default:
- if (cur & CANCELLED_BIT) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch,
- GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
- } else {
- /* >1 send_message concurrently */
- GPR_UNREACHABLE_CODE(break);
- }
+ GPR_ASSERT(calld->send_message_batch == NULL);
+ calld->send_message_batch = batch;
+ // If we have not yet seen send_initial_metadata, then we have to
+ // wait. We save the batch in calld and then drop the call
+ // combiner, which we'll have to pick up again later when we get
+ // send_initial_metadata.
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_STOP(
+ exec_ctx, calld->call_combiner,
+ "send_message batch pending send_initial_metadata");
+ goto done;
}
+ start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE);
} else {
- /* pass control down the stack */
+ // Pass control down the stack.
grpc_call_next_op(exec_ctx, elem, batch);
}
-
+done:
GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
@@ -402,16 +456,16 @@ static void compress_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
-
- /* initialize members */
+ call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
+ calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
+ GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
elem, grpc_schedule_on_exec_ctx);
-
return GRPC_ERROR_NONE;
}
@@ -419,22 +473,18 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
- gpr_atm imstate =
- gpr_atm_no_barrier_load(&calld->send_initial_metadata_state);
- if (imstate & CANCELLED_BIT) {
- GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT));
- }
+ GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *channeld = elem->channel_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
+ /* Configuration for message compression */
channeld->enabled_algorithms_bitset =
grpc_channel_args_compression_algorithm_get_states(args->channel_args);
@@ -449,16 +499,32 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
- channeld->supported_compression_algorithms = 1; /* always support identity */
- for (grpc_compression_algorithm algo_idx = 1;
- algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
- /* skip disabled algorithms */
- if (!GPR_BITGET(channeld->enabled_algorithms_bitset, algo_idx)) {
- continue;
- }
- channeld->supported_compression_algorithms |= 1u << algo_idx;
+ channeld->supported_compression_algorithms =
+ (((1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1) &
+ channeld->enabled_algorithms_bitset) |
+ 1u;
+
+ /* Configuration for stream compression */
+ channeld->enabled_stream_compression_algorithms_bitset =
+ grpc_channel_args_stream_compression_algorithm_get_states(
+ args->channel_args);
+
+ channeld->default_stream_compression_algorithm =
+ grpc_channel_args_get_stream_compression_algorithm(args->channel_args);
+
+ if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
+ channeld->default_stream_compression_algorithm)) {
+ gpr_log(GPR_DEBUG,
+ "stream compression algorithm %d not enabled: switching to none",
+ channeld->default_stream_compression_algorithm);
+ channeld->default_stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
}
+ channeld->supported_stream_compression_algorithms =
+ (((1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1) &
+ channeld->enabled_stream_compression_algorithms_bitset) |
+ 1u;
+
GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE;
}
@@ -477,6 +543,5 @@ const grpc_channel_filter grpc_message_compress_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
- "compress"};
+ "message_compress"};
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c
index b145f12aff..03958136b4 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.c
@@ -32,6 +32,8 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -81,18 +83,18 @@ static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
}
static void add_error(const char *error_name, grpc_error **cumulative,
- grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+ grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
*cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
}
- *cumulative = grpc_error_add_child(*cumulative, new);
+ *cumulative = grpc_error_add_child(*cumulative, new_err);
}
static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_metadata_batch *b) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_error *error = GRPC_ERROR_NONE;
static const char *error_name = "Failed processing incoming headers";
@@ -261,8 +263,8 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -274,14 +276,18 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ // Re-enter call combiner for recv_message_ready, since the surface
+ // code will release the call combiner for each callback it receives.
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ calld->recv_message_ready, GRPC_ERROR_REF(err),
+ "resuming recv_message_ready from on_complete");
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
@@ -290,20 +296,25 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->seen_path_with_query) {
- /* do nothing. This is probably a GET request, and payload will be returned
- in hs_on_complete callback. */
+ // Do nothing. This is probably a GET request, and payload will be
+ // returned in hs_on_complete callback.
+ // Note that we release the call combiner here, so that other
+ // callbacks can run.
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "pausing recv_message_ready until on_complete");
} else {
GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
-static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->send_initial_metadata) {
grpc_error *error = GRPC_ERROR_NONE;
@@ -323,10 +334,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata));
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
if (op->recv_initial_metadata) {
@@ -359,21 +367,25 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
+
+ return GRPC_ERROR_NONE;
}
-static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- GPR_TIMER_BEGIN("hs_start_transport_op", 0);
- hs_mutate_op(exec_ctx, elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
- GPR_TIMER_END("hs_start_transport_op", 0);
+static void hs_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
+ call_data *calld = (call_data *)elem->call_data;
+ GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
+ grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
+ calld->call_combiner);
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+ GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@@ -381,8 +393,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* initialize members */
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
@@ -397,7 +410,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
@@ -414,7 +427,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
- hs_start_transport_op,
+ hs_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
@@ -423,6 +436,5 @@ const grpc_channel_filter grpc_http_server_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-server"};
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/server_load_reporting_filter.c
index 08474efb2e..ca8a3b2a13 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.c
@@ -24,8 +24,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
-#include "src/core/ext/filters/load_reporting/load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -56,8 +56,8 @@ typedef struct channel_data {
static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@@ -88,7 +88,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -111,7 +111,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -141,7 +141,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -176,8 +176,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_mdelem md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -189,7 +189,7 @@ static void lr_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -213,7 +213,7 @@ static void lr_start_transport_stream_op_batch(
GPR_TIMER_END("lr_start_transport_stream_op_batch", 0);
}
-const grpc_channel_filter grpc_load_reporting_filter = {
+const grpc_channel_filter grpc_server_load_reporting_filter = {
lr_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
@@ -223,6 +223,5 @@ const grpc_channel_filter grpc_load_reporting_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"load_reporting"};
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.h b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
index 1a5424e43a..9527868c9f 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
@@ -16,12 +16,13 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
-#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
+#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H
+#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_stack.h"
-extern const grpc_channel_filter grpc_load_reporting_filter;
+extern const grpc_channel_filter grpc_server_load_reporting_filter;
-#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H */
+#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \
+ */
diff --git a/src/core/ext/filters/load_reporting/load_reporting.c b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
index 9745763c91..2486ead427 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
@@ -25,8 +25,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/sync.h>
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
-#include "src/core/ext/filters/load_reporting/load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
@@ -37,28 +37,34 @@ static bool is_load_reporting_enabled(const grpc_channel_args *a) {
grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false);
}
-static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_server_load_reporting_filter(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- if (is_load_reporting_enabled(args)) {
- return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter->name);
+ const bool already_has_load_reporting_filter =
+ !grpc_channel_stack_builder_iterator_is_end(it);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ if (is_load_reporting_enabled(args) && !already_has_load_reporting_filter) {
+ return grpc_channel_stack_builder_prepend_filter(builder, filter, NULL,
+ NULL);
}
return true;
}
grpc_arg grpc_load_reporting_enable_arg() {
- return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1);
+ return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
+ 1);
}
/* Plugin registration */
-void grpc_load_reporting_plugin_init(void) {
+void grpc_server_load_reporting_plugin_init(void) {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
- maybe_add_load_reporting_filter,
- (void *)&grpc_load_reporting_filter);
+ maybe_add_server_load_reporting_filter,
+ (void *)&grpc_server_load_reporting_filter);
}
-void grpc_load_reporting_plugin_shutdown() {}
+void grpc_server_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/filters/load_reporting/load_reporting.h b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
index fc04d2826a..65a6d0900e 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H
-#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H
+#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H
+#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H
#include <grpc/impl/codegen/grpc_types.h>
@@ -55,4 +55,5 @@ typedef struct grpc_load_reporting_call_data {
/** Return a \a grpc_arg enabling load reporting */
grpc_arg grpc_load_reporting_enable_arg();
-#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H */
+#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \
+ */
diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c
index 7d748b9c32..0ac803ed41 100644
--- a/src/core/ext/filters/max_age/max_age_filter.c
+++ b/src/core/ext/filters/max_age/max_age_filter.c
@@ -273,7 +273,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
decrease_call_count(exec_ctx, chand);
}
@@ -391,7 +391,6 @@ const grpc_channel_filter grpc_max_age_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"max_age"};
@@ -403,7 +402,7 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx,
bool enable =
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_AGE_MS),
- MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX &&
+ MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX ||
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_IDLE_MS),
MAX_CONNECTION_IDLE_INTEGER_OPTIONS) != INT_MAX;
diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c
index 846c7df69a..47763b1deb 100644
--- a/src/core/ext/filters/message_size/message_size_filter.c
+++ b/src/core/ext/filters/message_size/message_size_filter.c
@@ -68,6 +68,7 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
}
typedef struct call_data {
+ grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
@@ -131,7 +132,8 @@ static void start_transport_stream_op_batch(
exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_RESOURCE_EXHAUSTED));
+ GRPC_STATUS_RESOURCE_EXHAUSTED),
+ calld->call_combiner);
gpr_free(message_string);
return;
}
@@ -152,6 +154,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
+ calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -259,7 +262,6 @@ const grpc_channel_filter grpc_message_size_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"message_size"};
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
index b4d2cb4b8c..c8b2fe5f99 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
@@ -177,7 +177,6 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
0,
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"workaround_cronet_compression"};
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index 983691bbad..202bcd47f5 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -93,8 +93,8 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- chttp2_connector *c = args->user_data;
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ chttp2_connector *c = (chttp2_connector *)args->user_data;
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
@@ -143,7 +143,7 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- chttp2_connector *c = arg;
+ chttp2_connector *c = (chttp2_connector *)arg;
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting);
c->connecting = false;
@@ -161,7 +161,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
}
gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, arg);
+ chttp2_connector_unref(exec_ctx, (grpc_connector *)arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
start_handshake_locked(exec_ctx, c);
@@ -198,7 +198,7 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_connect};
grpc_connector *grpc_chttp2_connector_create() {
- chttp2_connector *c = gpr_zalloc(sizeof(*c));
+ chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index cccb347bf1..6410a6043d 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -55,7 +55,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
- GRPC_ARG_SERVER_URI,
+ (char *)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
index 0346d50b6c..dd88136f7b 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -42,7 +42,7 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
(target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create(
- GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
+ (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority");
grpc_channel_args *final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index f207155900..60244e163b 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -52,7 +52,7 @@ typedef struct {
} server_state;
typedef struct {
- server_state *server_state;
+ server_state *svr_state;
grpc_pollset *accepting_pollset;
grpc_tcp_server_acceptor *acceptor;
grpc_handshake_manager *handshake_mgr;
@@ -60,10 +60,11 @@ typedef struct {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- server_connection_state *connection_state = args->user_data;
- gpr_mu_lock(&connection_state->server_state->mu);
- if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ server_connection_state *connection_state =
+ (server_connection_state *)args->user_data;
+ gpr_mu_lock(&connection_state->svr_state->mu);
+ if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);
@@ -88,7 +89,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport *transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
grpc_server_setup_transport(
- exec_ctx, connection_state->server_state->server, transport,
+ exec_ctx, connection_state->svr_state->server, transport,
connection_state->accepting_pollset, args->args);
grpc_chttp2_transport_start_reading(exec_ctx, transport,
args->read_buffer);
@@ -96,11 +97,11 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
}
grpc_handshake_manager_pending_list_remove(
- &connection_state->server_state->pending_handshake_mgrs,
+ &connection_state->svr_state->pending_handshake_mgrs,
connection_state->handshake_mgr);
- gpr_mu_unlock(&connection_state->server_state->mu);
+ gpr_mu_unlock(&connection_state->svr_state->mu);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
- grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
+ grpc_tcp_server_unref(exec_ctx, connection_state->svr_state->tcp_server);
gpr_free(connection_state->acceptor);
gpr_free(connection_state);
}
@@ -108,7 +109,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
if (state->shutdown) {
gpr_mu_unlock(&state->mu);
@@ -123,8 +124,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
server_connection_state *connection_state =
- gpr_malloc(sizeof(*connection_state));
- connection_state->server_state = state;
+ (server_connection_state *)gpr_malloc(sizeof(*connection_state));
+ connection_state->svr_state = state;
connection_state->accepting_pollset = accepting_pollset;
connection_state->acceptor = acceptor;
connection_state->handshake_mgr = handshake_mgr;
@@ -143,7 +144,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
void *arg, grpc_pollset **pollsets,
size_t pollset_count) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = false;
gpr_mu_unlock(&state->mu);
@@ -153,7 +154,7 @@ static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
@@ -178,7 +179,7 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
grpc_server *server, void *arg,
grpc_closure *destroy_done) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = true;
state->server_destroy_listener_done = destroy_done;
@@ -200,6 +201,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
grpc_error *err = GRPC_ERROR_NONE;
server_state *state = NULL;
grpc_error **errors = NULL;
+ size_t naddrs = 0;
*port_num = -1;
@@ -208,7 +210,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = gpr_zalloc(sizeof(*state));
+ state = (server_state *)gpr_zalloc(sizeof(*state));
GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
@@ -224,8 +226,8 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
state->shutdown = true;
gpr_mu_init(&state->mu);
- const size_t naddrs = resolved->naddrs;
- errors = gpr_malloc(sizeof(*errors) * naddrs);
+ naddrs = resolved->naddrs;
+ errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =
grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index 78551df9c3..6d09953830 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -23,6 +23,7 @@
void grpc_chttp2_plugin_init(void) {
grpc_register_tracer(&grpc_http_trace);
grpc_register_tracer(&grpc_flowctl_trace);
+ grpc_register_tracer(&grpc_trace_http2_stream_state);
#ifndef NDEBUG
grpc_register_tracer(&grpc_trace_chttp2_refcount);
#endif
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 7bad188f4e..acf49632ff 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -34,6 +34,7 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/compression/stream_compression.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer.h"
@@ -63,6 +64,11 @@
#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
#define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2
+#define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */
+#define DEFAULT_MAX_PING_STRIKES 2
+
static int g_default_client_keepalive_time_ms =
DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
static int g_default_client_keepalive_timeout_ms =
@@ -74,6 +80,13 @@ static int g_default_server_keepalive_timeout_ms =
static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
+static int g_default_min_sent_ping_interval_without_data_ms =
+ DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_min_recv_ping_interval_without_data_ms =
+ DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
+static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
+
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http");
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl");
@@ -83,8 +96,6 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
GRPC_TRACER_INITIALIZER(false, "chttp2_refcount");
#endif
-static const grpc_transport_vtable vtable;
-
/* forward declarations of various callbacks that we'll build closures around */
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@@ -145,18 +156,14 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
-static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
- grpc_closure *on_initiate,
- grpc_closure *on_complete);
+static void send_ping_locked(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
+ grpc_closure *on_complete,
+ grpc_chttp2_initiate_write_reason initiate_write_reason);
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
-#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
-#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
-#define DEFAULT_MAX_PING_STRIKES 2
-#define DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
-
/** keepalive-relevant functions */
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -247,6 +254,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
+static const grpc_transport_vtable *get_vtable(void);
+
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, bool is_client) {
@@ -256,7 +265,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
- t->base.vtable = &vtable;
+ t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
@@ -345,7 +354,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- grpc_chttp2_initiate_write(exec_ctx, t, "initial_write");
}
/* configure http2 the way we like it */
@@ -361,14 +369,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
- t->ping_policy = (grpc_chttp2_repeated_ping_policy){
- .max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA,
- .min_time_between_pings =
- gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN),
- .max_ping_strikes = DEFAULT_MAX_PING_STRIKES,
- .min_ping_interval_without_data = gpr_time_from_millis(
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, GPR_TIMESPAN),
- };
+ t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
+ t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
+ g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
+ t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
+ t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
+ g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
/* Keepalive setting */
if (t->is_client) {
@@ -427,29 +433,37 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PINGS_BETWEEN_DATA, 0, INT_MAX});
+ (grpc_integer_options){g_default_max_pings_without_data, 0,
+ INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PING_STRIKES, 0, INT_MAX});
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS)) {
- t->ping_policy.min_time_between_pings = gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, 0,
- INT_MAX}),
- GPR_TIMESPAN);
+ (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
- strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS)) {
- t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, 0, INT_MAX}),
- GPR_TIMESPAN);
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_sent_ping_interval_without_data =
+ gpr_time_from_millis(
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX}),
+ GPR_TIMESPAN);
+ } else if (0 ==
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_recv_ping_interval_without_data =
+ gpr_time_from_millis(
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX}),
+ GPR_TIMESPAN);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
@@ -556,13 +570,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
- GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
- t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
- ? grpc_executor_scheduler
- : grpc_schedule_on_exec_ctx);
-
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
+ /* No pings allowed before receiving a header or data frame. */
+ t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -582,13 +591,14 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
- grpc_chttp2_initiate_write(exec_ctx, t, "init");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
post_benign_reclaimer(exec_ctx, t);
}
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@@ -628,6 +638,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
+ if (t->ping_state.is_delayed_ping_timer_set) {
+ grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
+ }
switch (t->keepalive_state) {
case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
@@ -693,7 +706,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage);
+ grpc_slice_buffer_init(&s->compressed_data_buffer);
+ grpc_slice_buffer_init(&s->decompressed_data_buffer);
s->pending_byte_stream = false;
+ s->decompressed_header_bytes = 0;
GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner));
@@ -714,7 +730,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@@ -727,14 +743,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_destroy_internal(exec_ctx,
&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage);
- if (s->compressed_data_buffer) {
- grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer);
- gpr_free(s->compressed_data_buffer);
- }
- if (s->decompressed_data_buffer) {
- grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer);
- gpr_free(s->decompressed_data_buffer);
- }
+ grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer);
+ grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer);
grpc_chttp2_list_remove_stalled_by_transport(t, s);
grpc_chttp2_list_remove_stalled_by_stream(t, s);
@@ -798,7 +808,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id) {
- return grpc_chttp2_stream_map_find(&t->stream_map, id);
+ return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
@@ -850,13 +860,92 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
+static void inc_initiate_write_reason(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_initiate_write_reason reason) {
+ switch (reason) {
+ case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx);
+ break;
+ }
+}
+
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason) {
+ grpc_chttp2_transport *t,
+ grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
+ inc_initiate_write_reason(exec_ctx, reason);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ grpc_chttp2_initiate_write_reason_string(reason));
+ t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED(
exec_ctx,
@@ -867,7 +956,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- reason);
+ grpc_chttp2_initiate_write_reason_string(reason));
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
break;
@@ -875,52 +964,96 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-void grpc_chttp2_become_writable(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type stream_write_type, const char *reason) {
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
- switch (stream_write_type) {
- case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
+}
+
+static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
+ bool early_results_scheduled,
+ bool partial_write) {
+ /* if it's not the first write in a batch, always offload to the executor:
+ we'll probably end up queuing against the kernel anyway, so we'll likely
+ get better latency overall if we switch writing work elsewhere and continue
+ with application work above */
+ if (!t->is_first_write_in_batch) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ }
+ /* equivalently, if it's a partial write, we *know* we're going to be taking a
+ thread jump to write it because of the above, may as well do so
+ immediately */
+ if (partial_write) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ }
+ switch (t->opt_target) {
+ case GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT:
+ /* executor gives us the largest probability of being able to batch a
+ * write with others on this transport */
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY:
+ return grpc_schedule_on_exec_ctx;
+ }
+ GPR_UNREACHABLE_CODE(return NULL);
+}
+
+#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
+static const char *begin_writing_desc(bool partial, bool inlined) {
+ switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
+ case WRITE_STATE_TUPLE_TO_INT(false, false):
+ return "begin write in background";
+ case WRITE_STATE_TUPLE_TO_INT(false, true):
+ return "begin write in current thread";
+ case WRITE_STATE_TUPLE_TO_INT(true, false):
+ return "begin partial write in background";
+ case WRITE_STATE_TUPLE_TO_INT(true, true):
+ return "begin partial write in current thread";
}
+ GPR_UNREACHABLE_CODE(return "bad state tuple");
}
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
- switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
- : grpc_chttp2_begin_write(exec_ctx, t)) {
- case GRPC_CHTTP2_NOTHING_TO_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
- "begin writing nothing");
- GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
- break;
- case GRPC_CHTTP2_PARTIAL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- "begin writing partial");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
- case GRPC_CHTTP2_FULL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "begin writing");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
+ grpc_chttp2_begin_write_result r;
+ if (t->closed) {
+ r.writing = false;
+ } else {
+ r = grpc_chttp2_begin_write(exec_ctx, t);
+ }
+ if (r.writing) {
+ if (r.partial) {
+ GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
+ }
+ if (!t->is_first_write_in_batch) {
+ GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
+ }
+ grpc_closure_scheduler *scheduler =
+ write_scheduler(t, r.early_results_scheduled, r.partial);
+ if (scheduler != grpc_schedule_on_exec_ctx) {
+ GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
+ }
+ set_write_state(
+ exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
+ : GRPC_CHTTP2_WRITE_STATE_WRITING,
+ begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
+ write_action, t, scheduler),
+ GRPC_ERROR_NONE);
+ } else {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "begin writing nothing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@@ -932,7 +1065,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -957,7 +1090,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "continue writing [!covered]");
+ "continue writing");
+ t->is_first_write_in_batch = false;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_RUN(
exec_ctx,
@@ -1059,9 +1193,9 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "new_stream");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM);
}
/* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@@ -1110,12 +1244,14 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
- gpr_log(GPR_DEBUG,
- "complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
- closure,
- (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
- (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
- desc, errstr);
+ gpr_log(
+ GPR_DEBUG,
+ "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
+ "write_state=%s",
+ t, closure,
+ (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
+ (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc,
+ errstr, write_state_name(t->write_state));
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@@ -1156,9 +1292,9 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "op.send_message");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
}
}
@@ -1190,15 +1326,19 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
} else {
grpc_chttp2_write_cb *cb = t->write_cb_pool;
if (cb == NULL) {
- cb = gpr_malloc(sizeof(*cb));
+ cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
- cb->next = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb;
+ grpc_chttp2_write_cb **list =
+ s->fetching_send_message->flags & GRPC_WRITE_THROUGH
+ ? &s->on_write_finished_cbs
+ : &s->on_flow_controlled_cbs;
+ cb->next = *list;
+ *list = cb;
}
s->fetching_send_message = NULL;
return; /* early out */
@@ -1218,7 +1358,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
- grpc_chttp2_stream *s = gs;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
grpc_chttp2_transport *t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
@@ -1253,11 +1393,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
- grpc_transport_stream_op_batch *op = stream_op;
- grpc_chttp2_stream *s = op->handler_private.extra_arg;
+ grpc_transport_stream_op_batch *op =
+ (grpc_transport_stream_op_batch *)stream_op;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
+ GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx);
+
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
@@ -1291,13 +1434,27 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->cancel_stream) {
+ GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx);
grpc_chttp2_cancel_stream(exec_ctx, t, s,
op_payload->cancel_stream.cancel_error);
}
if (op->send_initial_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx);
GPR_ASSERT(s->send_initial_metadata_finished == NULL);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
+
+ /* Identify stream compression */
+ if (op_payload->send_initial_metadata.send_initial_metadata->idx.named
+ .content_encoding == NULL ||
+ grpc_stream_compression_method_parse(
+ GRPC_MDVALUE(
+ op_payload->send_initial_metadata.send_initial_metadata->idx
+ .named.content_encoding->md),
+ true, &s->stream_compression_method) == 0) {
+ s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
+ }
+
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
s->send_initial_metadata =
op_payload->send_initial_metadata.send_initial_metadata;
@@ -1341,15 +1498,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
} else {
GPR_ASSERT(s->id != 0);
- grpc_chttp2_stream_write_type write_type =
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
- if (op->send_message &&
- (op->payload->send_message.send_message->flags &
- GRPC_WRITE_BUFFER_HINT)) {
- write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ if (!(op->send_message &&
+ (op->payload->send_message.send_message->flags &
+ GRPC_WRITE_BUFFER_HINT))) {
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA);
}
- grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
- "op.send_initial_metadata");
}
} else {
s->send_initial_metadata = NULL;
@@ -1361,17 +1516,31 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"send_initial_metadata_finished");
}
}
+ if (op_payload->send_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
}
if (op->send_message) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx);
+ GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(
+ exec_ctx, op->payload->send_message.send_message->length);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) {
+ // Return an error unless the client has already received trailing
+ // metadata from the server, since an application using a
+ // streaming call might send another message before getting a
+ // recv_message failure, breaking out of its loop, and then
+ // starting recv_trailing_metadata.
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Attempt to send message after stream was closed",
- &s->write_closed_error, 1),
+ t->is_client && s->received_trailing_metadata
+ ? GRPC_ERROR_NONE
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Attempt to send message after stream was closed",
+ &s->write_closed_error, 1),
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
@@ -1401,6 +1570,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->send_trailing_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx);
GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
@@ -1442,14 +1612,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "op.send_trailing_metadata");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA);
}
}
}
if (op->recv_initial_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx);
GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
s->recv_initial_metadata_ready =
op_payload->recv_initial_metadata.recv_initial_metadata_ready;
@@ -1457,10 +1628,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata;
s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available;
+ if (op_payload->recv_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
if (op->recv_message) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx);
size_t already_received;
GPR_ASSERT(s->recv_message_ready == NULL);
GPR_ASSERT(!s->pending_byte_stream);
@@ -1482,6 +1658,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->recv_trailing_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx);
GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
s->recv_trailing_metadata =
@@ -1549,23 +1726,28 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
- grpc_closure *on_initiate, grpc_closure *on_ack) {
+static void send_ping_locked(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate,
+ grpc_closure *on_ack,
+ grpc_chttp2_initiate_write_reason initiate_write_reason) {
grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE);
if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE)) {
- grpc_chttp2_initiate_write(exec_ctx, t, "send_ping");
+ grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason);
}
}
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
- grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
+ if (error == GRPC_ERROR_NONE) {
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
+ }
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -1580,7 +1762,8 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
- grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS);
}
}
@@ -1593,7 +1776,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
- grpc_chttp2_initiate_write(exec_ctx, t, "goaway_sent");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
GRPC_ERROR_UNREF(error);
}
@@ -1615,8 +1799,9 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
- grpc_transport_op *op = stream_op;
- grpc_chttp2_transport *t = op->handler_private.extra_arg;
+ grpc_transport_op *op = (grpc_transport_op *)stream_op;
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->goaway_error) {
@@ -1639,7 +1824,8 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
if (op->send_ping) {
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL,
- op->send_ping);
+ op->send_ping,
+ GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}
if (op->on_connectivity_state_change != NULL) {
@@ -1715,20 +1901,20 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
&s->frame_storage);
s->unprocessed_incoming_frames_decompressed = false;
}
- if (s->stream_compression_recv_enabled &&
- !s->unprocessed_incoming_frames_decompressed) {
- GPR_ASSERT(s->decompressed_data_buffer->length == 0);
+ if (!s->unprocessed_incoming_frames_decompressed) {
+ GPR_ASSERT(s->decompressed_data_buffer.length == 0);
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx =
grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
- if (!grpc_stream_decompress(s->stream_decompression_ctx,
- &s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer, NULL,
- GRPC_HEADER_SIZE_IN_BYTES,
- &end_of_context)) {
+ if (!grpc_stream_decompress(
+ s->stream_decompression_ctx,
+ &s->unprocessed_incoming_frames_buffer,
+ &s->decompressed_data_buffer, NULL,
+ GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes,
+ &end_of_context)) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
&s->frame_storage);
grpc_slice_buffer_reset_and_unref_internal(
@@ -1736,9 +1922,13 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Stream decompression error.");
} else {
+ s->decompressed_header_bytes += s->decompressed_data_buffer.length;
+ if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) {
+ s->decompressed_header_bytes = 0;
+ }
error = grpc_deframe_unprocessed_incoming_frames(
- exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL,
- s->recv_message);
+ exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer,
+ NULL, s->recv_message);
if (end_of_context) {
grpc_stream_compression_context_destroy(
s->stream_decompression_ctx);
@@ -1787,15 +1977,14 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
bool pending_data = s->pending_byte_stream ||
s->unprocessed_incoming_frames_buffer.length > 0;
- if (s->stream_compression_recv_enabled && s->read_closed &&
- s->frame_storage.length > 0 && !pending_data && !s->seen_error &&
- s->recv_trailing_metadata_finished != NULL) {
+ if (s->read_closed && s->frame_storage.length > 0 && !pending_data &&
+ !s->seen_error && s->recv_trailing_metadata_finished != NULL) {
/* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and
* maybe decompress the next 5 bytes in the stream. */
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->frame_storage,
@@ -1808,6 +1997,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else {
if (s->unprocessed_incoming_frames_buffer.length > 0) {
s->unprocessed_incoming_frames_decompressed = true;
+ pending_data = true;
}
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
@@ -1815,8 +2005,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
}
}
- if (s->read_closed && s->frame_storage.length == 0 &&
- (!pending_data || s->seen_error) &&
+ if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
s->recv_trailing_metadata_finished != NULL) {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
@@ -1829,7 +2018,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@@ -1884,7 +2074,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, "rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
}
}
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
@@ -1960,6 +2151,21 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
+static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
+ grpc_error *error) {
+ while (*list) {
+ grpc_chttp2_write_cb *cb = *list;
+ *list = cb->next;
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
+ GRPC_ERROR_REF(error),
+ "on_write_finished_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
@@ -1979,16 +2185,9 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
- while (s->on_write_finished_cbs) {
- grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb->next;
- grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
- GRPC_ERROR_REF(error),
- "on_write_finished_cb");
- cb->next = t->write_cb_pool;
- t->write_cb_pool = cb;
- }
- GRPC_ERROR_UNREF(error);
+ flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
+ GRPC_ERROR_REF(error));
+ flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
@@ -2197,7 +2396,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
- grpc_chttp2_initiate_write(exec_ctx, t, "close_from_api");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
}
typedef struct {
@@ -2207,8 +2407,8 @@ typedef struct {
} cancel_stream_cb_args;
static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
- cancel_stream_cb_args *args = user_data;
- grpc_chttp2_stream *s = stream;
+ cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
@@ -2232,21 +2432,20 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "immediate stream flowctl");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL);
break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
- "queue stream flowctl");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
break;
}
switch (action.send_transport_update) {
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_initiate_write(exec_ctx, t, "immediate transport flowctl");
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL);
break;
// this is the same as no action b/c every time the transport enters the
// writing path it will maybe do an update
@@ -2264,7 +2463,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
(uint32_t)action.max_frame_size);
}
if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) {
- grpc_chttp2_initiate_write(exec_ctx, t, "immediate setting update");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS);
}
}
if (action.need_ping) {
@@ -2272,7 +2472,8 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator);
send_ping_locked(exec_ctx, t,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
- &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked);
+ &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked,
+ GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING);
}
}
@@ -2310,7 +2511,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
GRPC_ERROR_REF(error);
@@ -2351,9 +2552,10 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->flow_control.initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "unstalled");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
}
}
t->flow_control.initial_window_update = 0;
@@ -2395,7 +2597,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
}
@@ -2408,7 +2610,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
@@ -2450,6 +2652,36 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
&args->args[i],
(grpc_integer_options){g_default_keepalive_permit_without_calls,
0, 1});
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
+ g_default_max_ping_strikes = grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
+ g_default_max_pings_without_data = grpc_channel_arg_get_integer(
+ &args->args[i], (grpc_integer_options){
+ g_default_max_pings_without_data, 0, INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_sent_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_recv_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX});
}
}
}
@@ -2457,7 +2689,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || t->closed) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2468,7 +2700,8 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
&t->start_keepalive_ping_locked,
- &t->finish_keepalive_ping_locked);
+ &t->finish_keepalive_ping_locked,
+ GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else {
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(
@@ -2489,7 +2722,7 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(
exec_ctx, &t->keepalive_watchdog_timer,
@@ -2499,7 +2732,7 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
@@ -2516,7 +2749,7 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2583,7 +2816,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error));
- s->byte_stream_error = error;
+ s->byte_stream_error = GRPC_ERROR_REF(error);
}
}
@@ -2597,7 +2830,8 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = argp;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)argp;
grpc_chttp2_transport *t = bs->transport;
grpc_chttp2_stream *s = bs->stream;
@@ -2680,29 +2914,31 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_error *error;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
- if (s->stream_compression_recv_enabled &&
- !s->unprocessed_incoming_frames_decompressed) {
+ if (!s->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer, NULL, MAX_SIZE_T,
- &end_of_context)) {
+ &s->decompressed_data_buffer, NULL,
+ MAX_SIZE_T, &end_of_context)) {
error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error.");
return error;
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer);
+ &s->decompressed_data_buffer);
s->unprocessed_incoming_frames_decompressed = true;
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
s->stream_decompression_ctx = NULL;
}
+ if (s->unprocessed_incoming_frames_buffer.length == 0) {
+ *slice = grpc_empty_slice();
+ }
}
error = grpc_deframe_unprocessed_incoming_frames(
exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer,
@@ -2804,7 +3040,8 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = byte_stream;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
grpc_chttp2_transport *t = s->t;
@@ -2819,7 +3056,8 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags) {
grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
- gpr_malloc(sizeof(*incoming_byte_stream));
+ (grpc_chttp2_incoming_byte_stream *)gpr_malloc(
+ sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
incoming_byte_stream->remaining_bytes = frame_size;
incoming_byte_stream->base.flags = flags;
@@ -2860,7 +3098,7 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@@ -2890,11 +3128,12 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@@ -2920,16 +3159,58 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
/*******************************************************************************
- * INTEGRATION GLUE
+ * MONITORING
*/
-static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
+const char *grpc_chttp2_initiate_write_reason_string(
+ grpc_chttp2_initiate_write_reason reason) {
+ switch (reason) {
+ case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
+ return "INITIAL_WRITE";
+ case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
+ return "START_NEW_STREAM";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
+ return "SEND_MESSAGE";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
+ return "SEND_INITIAL_METADATA";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
+ return "SEND_TRAILING_METADATA";
+ case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
+ return "RETRY_SEND_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
+ return "CONTINUE_PINGS";
+ case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
+ return "GOAWAY_SENT";
+ case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
+ return "RST_STREAM";
+ case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
+ return "CLOSE_FROM_API";
+ case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
+ return "STREAM_FLOW_CONTROL";
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
+ return "TRANSPORT_FLOW_CONTROL";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
+ return "SEND_SETTINGS";
+ case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING:
+ return "BDP_ESTIMATOR_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
+ return "FLOW_CONTROL_UNSTALLED_BY_SETTING";
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
+ return "FLOW_CONTROL_UNSTALLED_BY_UPDATE";
+ case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
+ return "APPLICATION_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
+ return "KEEPALIVE_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
+ return "TRANSPORT_FLOW_CONTROL_UNSTALLED";
+ case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
+ return "PING_RESPONSE";
+ case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
+ return "FORCE_RST_STREAM";
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
}
-/*******************************************************************************
- * MONITORING
- */
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *t) {
return ((grpc_chttp2_transport *)t)->ep;
@@ -2944,13 +3225,15 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
- chttp2_get_peer,
chttp2_get_endpoint};
+static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
+
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
- grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 0c4e2a91c0..55fb1a8343 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -25,6 +25,7 @@
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
+extern grpc_tracer_flag grpc_trace_http2_stream_state;
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_chttp2_refcount;
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.c b/src/core/ext/transport/chttp2/transport/flow_control.c
index 8dbdd1290a..569a6349d3 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.c
+++ b/src/core/ext/transport/chttp2/transport/flow_control.c
@@ -18,6 +18,7 @@
#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include <limits.h>
#include <math.h>
#include <string.h>
@@ -59,24 +60,24 @@ static void pretrace(shadow_flow_control* shadow_fc,
#define TRACE_PADDING 30
-static char* fmt_int64_diff_str(int64_t old, int64_t new) {
+static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
char* str;
- if (old != new) {
- gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old, new);
+ if (old_val != new_val) {
+ gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old_val, new_val);
} else {
- gpr_asprintf(&str, "%" PRId64 "", old);
+ gpr_asprintf(&str, "%" PRId64 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
gpr_free(str);
return str_lp;
}
-static char* fmt_uint32_diff_str(uint32_t old, uint32_t new) {
+static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) {
char* str;
- if (new > 0 && old != new) {
- gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old, new);
+ if (new_val > 0 && old_val != new_val) {
+ gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old_val, new_val);
} else {
- gpr_asprintf(&str, "%" PRIu32 "", old);
+ gpr_asprintf(&str, "%" PRIu32 "", old_val);
}
char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
gpr_free(str);
@@ -482,8 +483,9 @@ grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
double bw_dbl = -1;
if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) {
// we target the max of BDP or bandwidth in microseconds.
- int32_t frame_size =
- GPR_CLAMP(GPR_MAX((int32_t)bw_dbl / 1000, bdp), 16384, 16777215);
+ int32_t frame_size = (int32_t)GPR_CLAMP(
+ GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384,
+ 16777215);
grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index 222d2177b2..73aaab1802 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
if (cur != end) {
grpc_slice_buffer_undo_take_first(
- &s->unprocessed_incoming_frames_buffer,
+ slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
}
grpc_slice_unref_internal(exec_ctx, slice);
@@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
p->state = GRPC_CHTTP2_DATA_FH_0;
cur += p->frame_size;
grpc_slice_buffer_undo_take_first(
- &s->unprocessed_incoming_frames_buffer,
+ slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index 4bce84f21c..78ec08e177 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -46,7 +46,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
gpr_free(p->debug_data);
p->debug_length = length - 8;
- p->debug_data = gpr_malloc(p->debug_length);
+ p->debug_data = (char *)gpr_malloc(p->debug_length);
p->debug_pos = 0;
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_ERROR_NONE;
@@ -60,7 +60,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_goaway_parser *p = parser;
+ grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser;
switch (p->state) {
case GRPC_CHTTP2_GOAWAY_LSI0:
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 3d7c6fbfad..d431d6b2df 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -75,7 +75,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_ping_parser *p = parser;
+ grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
@@ -92,7 +92,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- t->ping_policy.min_ping_interval_without_data);
+ t->ping_policy.min_recv_ping_interval_without_data);
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@@ -113,11 +113,12 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (!g_disable_ping_ack) {
if (t->ping_ack_count == t->ping_ack_capacity) {
t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3);
- t->ping_acks = gpr_realloc(
+ t->ping_acks = (uint64_t *)gpr_realloc(
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
- grpc_chttp2_initiate_write(exec_ctx, t, "ping response");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index 689dc8935c..0133b6efa2 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -77,7 +77,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_rst_stream_parser *p = parser;
+ grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 057d3d9ed3..2995bf7310 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -44,7 +44,8 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
return out;
}
-grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings,
+ const uint32_t *new_settings,
uint32_t force_mask, size_t count) {
size_t i;
uint32_t n = 0;
@@ -52,21 +53,21 @@ grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
uint8_t *p;
for (i = 0; i < count; i++) {
- n += (new[i] != old[i] || (force_mask & (1u << i)) != 0);
+ n += (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0);
}
output = GRPC_SLICE_MALLOC(9 + 6 * n);
p = fill_header(GRPC_SLICE_START_PTR(output), 6 * n, 0);
for (i = 0; i < count; i++) {
- if (new[i] != old[i] || (force_mask & (1u << i)) != 0) {
+ if (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0) {
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i] >> 8);
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i]);
- *p++ = (uint8_t)(new[i] >> 24);
- *p++ = (uint8_t)(new[i] >> 16);
- *p++ = (uint8_t)(new[i] >> 8);
- *p++ = (uint8_t)(new[i]);
- old[i] = new[i];
+ *p++ = (uint8_t)(new_settings[i] >> 24);
+ *p++ = (uint8_t)(new_settings[i] >> 16);
+ *p++ = (uint8_t)(new_settings[i] >> 8);
+ *p++ = (uint8_t)(new_settings[i]);
+ old_settings[i] = new_settings[i];
}
}
@@ -111,7 +112,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_settings_parser *parser = p;
+ grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p;
const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
const uint8_t *end = GRPC_SLICE_END_PTR(slice);
char *msg;
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 65f3b01d77..c9ab8d1b50 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -70,7 +70,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_window_update_parser *p = parser;
+ grpc_chttp2_window_update_parser *p =
+ (grpc_chttp2_window_update_parser *)parser;
while (p->byte != 4 && cur != end) {
p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
@@ -98,9 +99,10 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_chttp2_flowctl_recv_stream_update(
&t->flow_control, &s->flow_control, received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "stream.read_flow_control");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE);
}
}
} else {
@@ -109,7 +111,9 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
received_update);
bool is_zero = t->flow_control.remote_window <= 0;
if (was_zero && !is_zero) {
- grpc_chttp2_initiate_write(exec_ctx, t, "new_global_flow_control");
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED);
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index a0e748e7b1..a404b664e3 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -33,6 +33,7 @@
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_table.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/metadata.h"
@@ -51,8 +52,10 @@
#define MAX_DECODER_SPACE_USAGE 512
static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
-static const grpc_slice terminal_slice = {&terminal_slice_refcount,
- .data.refcounted = {0, 0}};
+static const grpc_slice terminal_slice = {
+ &terminal_slice_refcount, /* refcount */
+ {{0, 0}} /* data.refcounted */
+};
extern grpc_tracer_flag grpc_http_trace;
@@ -269,8 +272,10 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
}
}
-static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
+static void emit_indexed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx);
uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
len);
@@ -282,30 +287,31 @@ typedef struct {
bool insert_null_before_wire_value;
} wire_value;
-static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
+static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem,
+ bool true_binary_enabled) {
+ wire_value wire_val;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
if (true_binary_enabled) {
- return (wire_value){
- .huffman_prefix = 0x00,
- .insert_null_before_wire_value = true,
- .data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx);
+ wire_val.huffman_prefix = 0x00;
+ wire_val.insert_null_before_wire_value = true;
+ wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
+
} else {
- return (wire_value){
- .huffman_prefix = 0x80,
- .insert_null_before_wire_value = false,
- .data = grpc_chttp2_base64_encode_and_huffman_compress(
- GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx);
+ wire_val.huffman_prefix = 0x80;
+ wire_val.insert_null_before_wire_value = false;
+ wire_val.data =
+ grpc_chttp2_base64_encode_and_huffman_compress(GRPC_MDVALUE(elem));
}
} else {
/* TODO(ctiller): opportunistically compress non-binary headers */
- return (wire_value){
- .huffman_prefix = 0x00,
- .insert_null_before_wire_value = false,
- .data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
+ wire_val.huffman_prefix = 0x00;
+ wire_val.insert_null_before_wire_value = false;
+ wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
}
+ return wire_val;
}
static size_t wire_value_length(wire_value v) {
@@ -317,11 +323,14 @@ static void add_wire_value(framer_state *st, wire_value v) {
add_header_data(st, v.data);
}
-static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@@ -333,11 +342,14 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@@ -349,10 +361,14 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
grpc_mdelem elem, framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx);
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -367,10 +383,14 @@ static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
grpc_mdelem elem, framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx);
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -423,7 +443,7 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
gpr_free(v);
}
if (!GRPC_MDELEM_IS_INTERNED(elem)) {
- emit_lithdr_noidx_v(c, elem, st);
+ emit_lithdr_noidx_v(exec_ctx, c, elem, st);
return;
}
@@ -445,16 +465,16 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (first cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
- st);
+ emit_indexed(exec_ctx, c,
+ dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), st);
return;
}
if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (second cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
- st);
+ emit_indexed(exec_ctx, c,
+ dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), st);
return;
}
@@ -472,11 +492,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
+ emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
+ emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );
@@ -488,11 +508,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
+ emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
+ emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );
@@ -501,11 +521,11 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
/* no elem, key in the table... fall back to literal emission */
if (should_add_elem) {
- emit_lithdr_incidx_v(c, elem, st);
+ emit_lithdr_incidx_v(exec_ctx, c, elem, st);
add_elem(exec_ctx, c, elem);
return;
} else {
- emit_lithdr_noidx_v(c, elem, st);
+ emit_lithdr_noidx_v(exec_ctx, c, elem, st);
return;
}
GPR_UNREACHABLE_CODE(return );
@@ -536,7 +556,7 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size =
- gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ (uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
@@ -564,7 +584,8 @@ void grpc_chttp2_hpack_compressor_set_max_usable_size(
}
static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
- uint16_t *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
+ uint16_t *table_elem_size =
+ (uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint32_t i;
memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 7f37365558..3d1df19bc3 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -30,6 +30,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -777,8 +778,7 @@ static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
return parse_stream_dep1(exec_ctx, p, cur + 1, end);
}
-/* emit an indexed field; for now just logs it to console; jumps to
- begin the next field on completion */
+/* emit an indexed field; jumps to begin the next field on completion */
static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
@@ -792,6 +792,7 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
GRPC_MDELEM_REF(md);
+ GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx);
grpc_error *err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
return parse_begin(exec_ctx, p, cur, end);
@@ -820,14 +821,14 @@ static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
return parse_value0(exec_ctx, p, cur + 1, end);
}
-/* finish a literal header with incremental indexing: just log, and jump to '
- begin */
+/* finish a literal header with incremental indexing */
static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -842,6 +843,7 @@ static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -898,6 +900,7 @@ static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -912,6 +915,7 @@ static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -968,6 +972,7 @@ static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -982,6 +987,7 @@ static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -1284,7 +1290,7 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
str->data.copied.str =
- gpr_realloc(str->data.copied.str, str->data.copied.capacity);
+ (char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
memcpy(str->data.copied.str + str->data.copied.length, data, length);
GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
@@ -1310,9 +1316,11 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
/* 'true-binary' case */
++cur;
p->binary = NOT_BINARY;
+ GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx);
append_bytes(str, cur, (size_t)(end - cur));
return GRPC_ERROR_NONE;
}
+ GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx);
/* fallthrough */
b64_byte0:
case B64_BYTE0:
@@ -1510,6 +1518,7 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser_string *str) {
if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
p->current_slice_refcount != NULL) {
+ GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
str->copied = false;
str->data.referenced.refcount = p->current_slice_refcount;
str->data.referenced.data.refcounted.bytes = (uint8_t *)cur;
@@ -1523,6 +1532,20 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
+ switch (p->binary) {
+ case NOT_BINARY:
+ if (p->huff) {
+ GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx);
+ } else {
+ GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
+ }
+ break;
+ case BINARY_BEGIN:
+ /* stats incremented later: don't know true binary or not */
+ break;
+ default:
+ abort();
+ }
return parse_string(exec_ctx, p, cur, end);
}
@@ -1643,24 +1666,38 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, "force_rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
}
+static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_metadata_batch *initial_metadata) {
+ if (initial_metadata->idx.named.content_encoding == NULL ||
+ grpc_stream_compression_method_parse(
+ GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false,
+ &s->stream_decompression_method) == 0) {
+ s->stream_decompression_method =
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
+ }
+}
+
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_hpack_parser *parser = hpack_parser;
+ grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
@@ -1681,9 +1718,16 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (s != NULL) {
if (parser->is_boundary) {
if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Too many trailer frames");
}
+ /* Process stream compression md element if it exists */
+ if (s->header_frames_received ==
+ 0) { /* Only acts on initial metadata */
+ parse_stream_compression_md(exec_ctx, t, s,
+ &s->metadata_buffer[0].batch);
+ }
s->published_metadata[s->header_frames_received] =
GRPC_METADATA_PUBLISHED_FROM_WIRE;
maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 944d778011..bbd135a318 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -173,7 +173,7 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
tbl->max_entries = tbl->cap_entries =
entries_for_bytes(tbl->current_table_bytes);
- tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
@@ -228,7 +228,7 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
- grpc_mdelem *ents = gpr_malloc(sizeof(*ents) * new_cap);
+ grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
index cf0a9ca920..ba680a89db 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
@@ -42,8 +42,9 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
- exec_ctx, &buffer->batch,
- gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem);
+ exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
+ buffer->arena, sizeof(grpc_linked_mdelem)),
+ elem);
}
grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 3c41a8958f..49022155aa 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -79,16 +79,43 @@ typedef enum {
GRPC_CHTTP2_PCL_COUNT /* must be last */
} grpc_chttp2_ping_closure_list;
+typedef enum {
+ GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE,
+ GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA,
+ GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS,
+ GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT,
+ GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM,
+ GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API,
+ GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
+ GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
+ GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED,
+ GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE,
+ GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM,
+} grpc_chttp2_initiate_write_reason;
+
+const char *grpc_chttp2_initiate_write_reason_string(
+ grpc_chttp2_initiate_write_reason reason);
+
typedef struct {
grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT];
uint64_t inflight_id;
} grpc_chttp2_ping_queue;
typedef struct {
- gpr_timespec min_time_between_pings;
int max_pings_without_data;
int max_ping_strikes;
- gpr_timespec min_ping_interval_without_data;
+ gpr_timespec min_sent_ping_interval_without_data;
+ gpr_timespec min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {
@@ -262,6 +289,10 @@ struct grpc_chttp2_transport {
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
+ /** is this the first write in a series of writes?
+ set when we initiate writing from idle, cleared when we
+ initiate writing from writing+more */
+ bool is_first_write_in_batch;
/** is the transport destroying itself? */
uint8_t destroying;
@@ -483,6 +514,7 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
+ int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@@ -509,6 +541,8 @@ struct grpc_chttp2_stream {
/** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */
bool write_buffering;
+ /** Has trailing metadata been received. */
+ bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
@@ -553,29 +587,32 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
+ grpc_chttp2_write_cb *on_flow_controlled_cbs;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
- /** Whether stream compression send is enabled */
- bool stream_compression_recv_enabled;
- /** Whether stream compression recv is enabled */
- bool stream_compression_send_enabled;
- /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
- */
- bool unprocessed_incoming_frames_decompressed;
+ /* Stream compression method to be used. */
+ grpc_stream_compression_method stream_compression_method;
+ /* Stream decompression method to be used. */
+ grpc_stream_compression_method stream_decompression_method;
/** Stream compression decompress context */
grpc_stream_compression_context *stream_decompression_ctx;
/** Stream compression compress context */
grpc_stream_compression_context *stream_compression_ctx;
/** Buffer storing data that is compressed but not sent */
- grpc_slice_buffer *compressed_data_buffer;
+ grpc_slice_buffer compressed_data_buffer;
/** Amount of uncompressed bytes sent out when compressed_data_buffer is
* emptied */
size_t uncompressed_data_size;
/** Temporary buffer storing decompressed data */
- grpc_slice_buffer *decompressed_data_buffer;
+ grpc_slice_buffer decompressed_data_buffer;
+ /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
+ */
+ bool unprocessed_incoming_frames_decompressed;
+ /** gRPC header bytes that are already decompressed */
+ size_t decompressed_header_bytes;
};
/** Transport writing call flow:
@@ -591,12 +628,16 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason);
+ grpc_chttp2_transport *t,
+ grpc_chttp2_initiate_write_reason reason);
-typedef enum {
- GRPC_CHTTP2_NOTHING_TO_WRITE,
- GRPC_CHTTP2_PARTIAL_WRITE,
- GRPC_CHTTP2_FULL_WRITE,
+typedef struct {
+ /** are we writing? */
+ bool writing;
+ /** if writing: was it a complete flush (false) or a partial flush (true) */
+ bool partial;
+ /** did we queue any completions as part of beginning the write */
+ bool early_results_scheduled;
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
@@ -838,22 +879,11 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
-typedef enum {
- /* don't initiate a transport write, but piggyback on the next one */
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
- /* initiate a covered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- /* initiate an uncovered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED
-} grpc_chttp2_stream_write_type;
-
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
-void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type type,
- const char *reason);
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 18d163ee98..3db1ad4123 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -106,7 +106,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
++cur;
- ++t->deframe_state;
+ t->deframe_state =
+ (grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state);
}
if (cur == end) {
return GRPC_ERROR_NONE;
@@ -382,6 +383,9 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser;
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@@ -402,7 +406,7 @@ static void free_timeout(void *p) { gpr_free(p); }
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
@@ -426,11 +430,12 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
- gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
+ gpr_timespec *cached_timeout =
+ (gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout);
gpr_timespec timeout;
if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
- cached_timeout = gpr_malloc(sizeof(gpr_timespec));
+ cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec));
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
@@ -482,7 +487,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
@@ -557,6 +562,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) {
@@ -623,6 +632,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
*s->trailing_metadata_available = true;
}
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
} else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header;
@@ -631,6 +641,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 7cc85dea9c..47cd22d177 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -20,6 +20,27 @@
#include <grpc/support/log.h>
+static char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
+ switch (id) {
+ case GRPC_CHTTP2_LIST_WRITABLE:
+ return "writable";
+ case GRPC_CHTTP2_LIST_WRITING:
+ return "writing";
+ case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT:
+ return "stalled_by_transport";
+ case GRPC_CHTTP2_LIST_STALLED_BY_STREAM:
+ return "stalled_by_stream";
+ case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
+ return "waiting_for_concurrency";
+ case STREAM_LIST_COUNT:
+ GPR_UNREACHABLE_CODE(return "unknown");
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
+}
+
+grpc_tracer_flag grpc_trace_http2_stream_state =
+ GRPC_TRACER_INITIALIZER(false, "http2_stream_state");
+
/* core list management */
static bool stream_list_empty(grpc_chttp2_transport *t,
@@ -44,6 +65,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
s->included[id] = 0;
}
*stream = s;
+ if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
return s != 0;
}
@@ -62,6 +87,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
} else {
t->lists[id].tail = s->links[id].prev;
}
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
@@ -90,6 +119,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
}
t->lists[id].tail = s;
s->included[id] = 1;
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@@ -150,17 +183,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
- GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log(
- GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
@@ -170,23 +198,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id));
- return ret;
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.c
index e2f10bc208..d6079a9a33 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.c
@@ -27,8 +27,8 @@
void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
- map->keys = gpr_malloc(sizeof(uint32_t) * initial_capacity);
- map->values = gpr_malloc(sizeof(void *) * initial_capacity);
+ map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity);
+ map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity);
map->count = 0;
map->free = 0;
map->capacity = initial_capacity;
@@ -72,8 +72,10 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
/* resize when less than 25% of the table is free, because compaction
won't help much */
map->capacity = capacity = 3 * capacity / 2;
- map->keys = keys = gpr_realloc(keys, capacity * sizeof(uint32_t));
- map->values = values = gpr_realloc(values, capacity * sizeof(void *));
+ map->keys = keys =
+ (uint32_t *)gpr_realloc(keys, capacity * sizeof(uint32_t));
+ map->values = values =
+ (void **)gpr_realloc(values, capacity * sizeof(void *));
}
}
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 410c154206..be1af16019 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -22,6 +22,7 @@
#include <grpc/support/log.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/http2_errors.h"
@@ -67,7 +68,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
}
if (t->ping_state.pings_before_data_required == 0 &&
t->ping_policy.max_pings_without_data != 0) {
- /* need to send something of substance before sending a ping again */
+ /* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
@@ -77,11 +78,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
return;
}
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec elapsed = gpr_time_sub(now, t->ping_state.last_ping_sent_time);
- /*gpr_log(GPR_DEBUG, "elapsed:%d.%09d min:%d.%09d", (int)elapsed.tv_sec,
- elapsed.tv_nsec, (int)t->ping_policy.min_time_between_pings.tv_sec,
- (int)t->ping_policy.min_time_between_pings.tv_nsec);*/
- if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) {
+ gpr_timespec next_allowed_ping =
+ gpr_time_add(t->ping_state.last_ping_sent_time,
+ t->ping_policy.min_sent_ping_interval_without_data);
+ if (t->keepalive_permit_without_calls == 0 &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
+ gpr_time_from_seconds(7200, GPR_TIMESPAN));
+ }
+ /* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
+ (int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
+ (int)now.tv_sec, (int)now.tv_nsec); */
+ if (gpr_time_cmp(next_allowed_ping, now) > 0) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
@@ -92,9 +100,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
- gpr_time_add(t->ping_state.last_ping_sent_time,
- t->ping_policy.min_time_between_pings),
- &t->retry_initiate_ping_locked,
+ next_allowed_ping, &t->retry_initiate_ping_locked,
gpr_now(GPR_CLOCK_MONOTONIC));
}
return;
@@ -116,20 +122,30 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
grpc_slice_buffer_add(&t->outbuf,
grpc_chttp2_ping_create(false, pq->inflight_id));
+ GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
t->ping_state.last_ping_sent_time = now;
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
+ t->ping_state.pings_before_data_required,
+ t->ping_policy.max_pings_without_data);
+ }
t->ping_state.pings_before_data_required -=
(t->ping_state.pings_before_data_required != 0);
}
-static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
- grpc_chttp2_write_cb **list, grpc_error *error) {
+ grpc_chttp2_write_cb **list, int64_t *ctr,
+ grpc_error *error) {
+ bool sched_any = false;
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
- s->flow_controlled_bytes_written += send_bytes;
+ *ctr += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
- if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
+ if (cb->call_at_byte <= *ctr) {
+ sched_any = true;
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
@@ -137,6 +153,7 @@ static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
cb = next;
}
GRPC_ERROR_UNREF(error);
+ return sched_any;
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
@@ -154,23 +171,23 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) {
}
// Returns true if initial_metadata contains only default headers.
-//
-// TODO(roth): The fact that we hard-code these particular headers here
-// is fairly ugly. Need some better way to know which headers are
-// default, maybe via a bit in the static metadata table?
static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
- int num_default_fields =
- (initial_metadata->idx.named.status != NULL) +
- (initial_metadata->idx.named.content_type != NULL) +
- (initial_metadata->idx.named.grpc_encoding != NULL) +
- (initial_metadata->idx.named.grpc_accept_encoding != NULL);
- return (size_t)num_default_fields == initial_metadata->list.count;
+ return initial_metadata->list.default_count == initial_metadata->list.count;
}
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
+ /* stats histogram counters: we increment these throughout this function,
+ and at the end publish to the central stats histograms */
+ int flow_control_writes = 0;
+ int initial_metadata_writes = 0;
+ int trailing_metadata_writes = 0;
+ int message_writes = 0;
+
+ GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
+
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
if (t->dirtied_local_settings && !t->sent_local_settings) {
@@ -182,6 +199,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
+ GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
/* simple writes are queued to qbuf, and flushed here */
@@ -194,20 +212,19 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (t->flow_control.remote_window > 0) {
while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
- if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s) &&
- stream_ref_if_not_destroyed(&s->refcount->refs)) {
- grpc_chttp2_initiate_write(exec_ctx, t, "transport.read_flow_control");
+ if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
+ stream_ref_if_not_destroyed(&s->refcount->refs);
}
}
}
- bool partial_write = false;
+ grpc_chttp2_begin_write_result result = {false, false, false};
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (true) {
if (t->outbuf.length > target_write_size(t)) {
- partial_write = true;
+ result.partial = true;
break;
}
@@ -252,13 +269,12 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
now_writing = true;
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
+ initial_metadata_writes++;
} else {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
@@ -274,10 +290,15 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.content_type->md;
}
+ trailing_metadata_writes++;
}
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
+ result.early_results_scheduled = true;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
+ "send_initial_metadata_finished");
}
/* send any window updates */
uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
@@ -286,19 +307,17 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
+ flow_control_writes++;
}
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
if (s->flow_controlled_buffer.length > 0 ||
- (s->stream_compression_send_enabled &&
- s->compressed_data_buffer->length > 0)) {
+ s->compressed_data_buffer.length > 0) {
uint32_t stream_remote_window = (uint32_t)GPR_MAX(
0,
s->flow_control.remote_window_delta +
@@ -311,77 +330,61 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (max_outgoing > 0) {
bool is_last_data_frame = false;
bool is_last_frame = false;
- if (s->stream_compression_send_enabled) {
- while ((s->flow_controlled_buffer.length > 0 ||
- s->compressed_data_buffer->length > 0) &&
- max_outgoing > 0) {
- if (s->compressed_data_buffer->length > 0) {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, s->compressed_data_buffer->length);
- is_last_data_frame =
- (send_bytes == s->compressed_data_buffer->length &&
- s->flow_controlled_buffer.length == 0 &&
- s->fetching_send_message == NULL);
- if (is_last_data_frame && s->send_trailing_metadata != NULL &&
- s->stream_compression_ctx != NULL) {
- if (!grpc_stream_compress(
- s->stream_compression_ctx, &s->flow_controlled_buffer,
- s->compressed_data_buffer, NULL, MAX_SIZE_T,
- GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
- gpr_log(GPR_ERROR, "Stream compression failed.");
- }
- grpc_stream_compression_context_destroy(
- s->stream_compression_ctx);
- s->stream_compression_ctx = NULL;
- /* After finish, bytes in s->compressed_data_buffer may be
- * more than max_outgoing. Start another round of the current
- * while loop so that send_bytes and is_last_data_frame are
- * recalculated. */
- continue;
- }
- is_last_frame =
- is_last_data_frame && s->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(s->send_trailing_metadata);
- grpc_chttp2_encode_data(s->id, s->compressed_data_buffer,
- send_bytes, is_last_frame,
- &s->stats.outgoing, &t->outbuf);
- grpc_chttp2_flowctl_sent_data(&t->flow_control,
- &s->flow_control, send_bytes);
- max_outgoing -= send_bytes;
- if (s->compressed_data_buffer->length == 0) {
- s->sending_bytes += s->uncompressed_data_size;
+ size_t sending_bytes_before = s->sending_bytes;
+ while ((s->flow_controlled_buffer.length > 0 ||
+ s->compressed_data_buffer.length > 0) &&
+ max_outgoing > 0) {
+ if (s->compressed_data_buffer.length > 0) {
+ uint32_t send_bytes = (uint32_t)GPR_MIN(
+ max_outgoing, s->compressed_data_buffer.length);
+ is_last_data_frame =
+ (send_bytes == s->compressed_data_buffer.length &&
+ s->flow_controlled_buffer.length == 0 &&
+ s->fetching_send_message == NULL);
+ if (is_last_data_frame && s->send_trailing_metadata != NULL &&
+ s->stream_compression_ctx != NULL) {
+ if (!grpc_stream_compress(
+ s->stream_compression_ctx, &s->flow_controlled_buffer,
+ &s->compressed_data_buffer, NULL, MAX_SIZE_T,
+ GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
+ gpr_log(GPR_ERROR, "Stream compression failed.");
}
- } else {
- if (s->stream_compression_ctx == NULL) {
- s->stream_compression_ctx =
- grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_COMPRESS);
- }
- s->uncompressed_data_size = s->flow_controlled_buffer.length;
- GPR_ASSERT(grpc_stream_compress(
- s->stream_compression_ctx, &s->flow_controlled_buffer,
- s->compressed_data_buffer, NULL, MAX_SIZE_T,
- GRPC_STREAM_COMPRESSION_FLUSH_SYNC));
- GPR_ASSERT(s->compressed_data_buffer->length > 0);
+ grpc_stream_compression_context_destroy(
+ s->stream_compression_ctx);
+ s->stream_compression_ctx = NULL;
+ /* After finish, bytes in s->compressed_data_buffer may be
+ * more than max_outgoing. Start another round of the current
+ * while loop so that send_bytes and is_last_data_frame are
+ * recalculated. */
+ continue;
+ }
+ is_last_frame =
+ is_last_data_frame && s->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(s->send_trailing_metadata);
+ grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer,
+ send_bytes, is_last_frame,
+ &s->stats.outgoing, &t->outbuf);
+ grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
+ send_bytes);
+ max_outgoing -= send_bytes;
+ if (s->compressed_data_buffer.length == 0) {
+ s->sending_bytes += s->uncompressed_data_size;
+ }
+ } else {
+ if (s->stream_compression_ctx == NULL) {
+ s->stream_compression_ctx =
+ grpc_stream_compression_context_create(
+ s->stream_compression_method);
+ }
+ s->uncompressed_data_size = s->flow_controlled_buffer.length;
+ if (!grpc_stream_compress(
+ s->stream_compression_ctx, &s->flow_controlled_buffer,
+ &s->compressed_data_buffer, NULL, MAX_SIZE_T,
+ GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
+ gpr_log(GPR_ERROR, "Stream compression failed.");
}
}
- } else {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, s->flow_controlled_buffer.length);
- is_last_data_frame = s->fetching_send_message == NULL &&
- send_bytes == s->flow_controlled_buffer.length;
- is_last_frame =
- is_last_data_frame && s->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(s->send_trailing_metadata);
- grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer,
- send_bytes, is_last_frame,
- &s->stats.outgoing, &t->outbuf);
- grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
- send_bytes);
- s->sending_bytes += send_bytes;
}
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -395,14 +398,21 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
}
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
+ GRPC_ERROR_NONE);
}
+ result.early_results_scheduled |=
+ update_list(exec_ctx, t, s,
+ (int64_t)(s->sending_bytes - sending_bytes_before),
+ &s->on_flow_controlled_cbs,
+ &s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
- (s->stream_compression_send_enabled &&
- s->compressed_data_buffer->length > 0)) {
+ s->compressed_data_buffer.length > 0) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
+ message_writes++;
} else if (t->flow_control.remote_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
@@ -414,8 +424,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (s->send_trailing_metadata != NULL &&
s->fetching_send_message == NULL &&
s->flow_controlled_buffer.length == 0 &&
- (!s->stream_compression_send_enabled ||
- s->compressed_data_buffer->length == 0)) {
+ s->compressed_data_buffer.length == 0) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
@@ -438,6 +447,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
+ trailing_metadata_writes++;
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
@@ -446,11 +456,25 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&t->outbuf, grpc_chttp2_rst_stream_create(
s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
+ GRPC_ERROR_NONE);
now_writing = true;
+ result.early_results_scheduled = true;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_NONE, "send_trailing_metadata_finished");
}
}
if (now_writing) {
+ GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
+ exec_ctx, initial_metadata_writes);
+ GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes);
+ GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
+ exec_ctx, trailing_metadata_writes);
+ GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx,
+ flow_control_writes);
+
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
@@ -469,8 +493,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -488,9 +510,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
- return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
- : GRPC_CHTTP2_FULL_WRITE)
- : GRPC_CHTTP2_NOTHING_TO_WRITE;
+ result.writing = t->outbuf.count > 0;
+ return result;
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -499,23 +520,12 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
- if (s->sent_initial_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_REF(error), "send_initial_metadata_finished");
- }
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
- &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
+ &s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
+ GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
- if (s->sent_trailing_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_trailing_metadata_finished,
- GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
- grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
- GRPC_ERROR_REF(error));
- }
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf);
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index abb558982b..587a3b83b5 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -187,9 +187,34 @@ struct stream_obj {
/* Mutex to protect storage */
gpr_mu mu;
+
+ /* Refcount object of the stream */
+ grpc_stream_refcount *refcount;
};
typedef struct stream_obj stream_obj;
+#ifndef NDEBUG
+#define GRPC_CRONET_STREAM_REF(stream, reason) \
+ grpc_cronet_stream_ref((stream), (reason))
+#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_cronet_stream_unref((exec_ctx), (stream), (reason))
+void grpc_cronet_stream_ref(stream_obj *s, const char *reason) {
+ grpc_stream_ref(s->refcount, reason);
+}
+void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s,
+ const char *reason) {
+ grpc_stream_unref(exec_ctx, s->refcount, reason);
+}
+#else
+#define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream))
+#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_cronet_stream_unref((exec_ctx), (stream))
+void grpc_cronet_stream_ref(stream_obj *s) { grpc_stream_ref(s->refcount); }
+void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s) {
+ grpc_stream_unref(exec_ctx, s->refcount);
+}
+#endif
+
static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
struct op_and_state *oas);
@@ -346,13 +371,12 @@ static void remove_from_storage(struct stream_obj *s,
This can get executed from the Cronet network thread via cronet callback
or on the application supplied thread via the perform_stream_op function.
*/
-static void execute_from_storage(stream_obj *s) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) {
gpr_mu_lock(&s->mu);
for (struct op_and_state *curr = s->storage.head; curr != NULL;) {
CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done);
GPR_ASSERT(curr->done == 0);
- enum e_op_result result = execute_stream_op(&exec_ctx, curr);
+ enum e_op_result result = execute_stream_op(exec_ctx, curr);
CRONET_LOG(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr,
op_result_string(result));
/* if this op is done, then remove it and free memory */
@@ -369,7 +393,6 @@ static void execute_from_storage(stream_obj *s) {
}
}
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -377,6 +400,8 @@ static void execute_from_storage(stream_obj *s) {
*/
static void on_failed(bidirectional_stream *stream, int net_error) {
CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -392,7 +417,9 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
}
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -400,6 +427,8 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
*/
static void on_canceled(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -415,7 +444,9 @@ static void on_canceled(bidirectional_stream *stream) {
}
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -423,6 +454,8 @@ static void on_canceled(bidirectional_stream *stream) {
*/
static void on_succeeded(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -430,7 +463,9 @@ static void on_succeeded(bidirectional_stream *stream) {
s->cbs = NULL;
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -438,6 +473,7 @@ static void on_succeeded(bidirectional_stream *stream) {
*/
static void on_stream_ready(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
gpr_mu_lock(&s->mu);
@@ -457,7 +493,8 @@ static void on_stream_ready(bidirectional_stream *stream) {
}
}
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -513,14 +550,15 @@ static void on_response_headers_received(
s->state.pending_read_from_cronet = true;
}
gpr_mu_unlock(&s->mu);
+ execute_from_storage(&exec_ctx, s);
grpc_exec_ctx_finish(&exec_ctx);
- execute_from_storage(s);
}
/*
Cronet callback
*/
static void on_write_completed(bidirectional_stream *stream, const char *data) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
gpr_mu_lock(&s->mu);
@@ -530,7 +568,8 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) {
}
s->state.state_callback_received[OP_SEND_MESSAGE] = true;
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -538,6 +577,7 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) {
*/
static void on_read_completed(bidirectional_stream *stream, char *data,
int count) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
count);
@@ -563,14 +603,15 @@ static void on_read_completed(bidirectional_stream *stream, char *data,
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
} else {
null_and_maybe_free_read_buffer(s);
s->state.rs.read_stream_closed = true;
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -625,12 +666,11 @@ static void on_response_trailers_received(
s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
} else {
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -1313,6 +1353,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
const void *server_data, gpr_arena *arena) {
stream_obj *s = (stream_obj *)gs;
+
+ s->refcount = refcount;
+ GRPC_CRONET_STREAM_REF(s, "cronet transport");
memset(&s->storage, 0, sizeof(s->storage));
s->storage.head = NULL;
memset(&s->state, 0, sizeof(s->state));
@@ -1370,7 +1413,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
stream_obj *s = (stream_obj *)gs;
add_to_storage(s, op);
- execute_from_storage(s);
+ execute_from_storage(exec_ctx, s);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -1386,10 +1429,6 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- return NULL;
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) {
return NULL;
@@ -1408,7 +1447,6 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
perform_op,
destroy_stream,
destroy_transport,
- get_peer,
get_endpoint};
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c
index 6f4b429ee2..31739d07dd 100644
--- a/src/core/ext/transport/inproc/inproc_transport.c
+++ b/src/core/ext/transport/inproc/inproc_transport.c
@@ -37,7 +37,6 @@
if (GRPC_TRACER_ON(grpc_inproc_trace)) gpr_log(__VA_ARGS__); \
} while (0)
-static const grpc_transport_vtable inproc_vtable;
static grpc_slice g_empty_slice;
static grpc_slice g_fake_path_key;
static grpc_slice g_fake_path_value;
@@ -120,7 +119,7 @@ static void slice_buffer_list_append_entry(slice_buffer_list *l,
}
static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) {
- sb_list_entry *next = gpr_malloc(sizeof(*next));
+ sb_list_entry *next = (sb_list_entry *)gpr_malloc(sizeof(*next));
grpc_slice_buffer_init(&next->sb);
slice_buffer_list_append_entry(l, next);
return &next->sb;
@@ -327,7 +326,8 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem *elem = metadata->list.head;
(elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
- grpc_linked_mdelem *nelem = gpr_arena_alloc(s->arena, sizeof(*nelem));
+ grpc_linked_mdelem *nelem =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem));
nelem->md = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@@ -531,12 +531,14 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
- grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
+ grpc_linked_mdelem *path_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md));
path_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
GRPC_ERROR_NONE);
- grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
+ grpc_linked_mdelem *auth_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md));
auth_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
@@ -1164,6 +1166,55 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
}
/*******************************************************************************
+ * INTEGRATION GLUE
+ */
+
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset *pollset) {
+ // Nothing to do here
+}
+
+static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset_set *pollset_set) {
+ // Nothing to do here
+}
+
+static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
+ return NULL;
+}
+
+/*******************************************************************************
+ * GLOBAL INIT AND DESTROY
+ */
+static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+
+void grpc_inproc_transport_init(void) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
+ g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
+
+ grpc_slice key_tmp = grpc_slice_from_static_string(":path");
+ g_fake_path_key = grpc_slice_intern(key_tmp);
+ grpc_slice_unref_internal(&exec_ctx, key_tmp);
+
+ g_fake_path_value = grpc_slice_from_static_string("/");
+
+ grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
+ g_fake_auth_key = grpc_slice_intern(auth_tmp);
+ grpc_slice_unref_internal(&exec_ctx, auth_tmp);
+
+ g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static const grpc_transport_vtable inproc_vtable = {
+ sizeof(inproc_stream), "inproc", init_stream,
+ set_pollset, set_pollset_set, perform_stream_op,
+ perform_transport_op, destroy_stream, destroy_transport,
+ get_endpoint};
+
+/*******************************************************************************
* Main inproc transport functions
*/
static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
@@ -1172,10 +1223,10 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
grpc_transport **client_transport,
const grpc_channel_args *client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
- inproc_transport *st = gpr_zalloc(sizeof(*st));
- inproc_transport *ct = gpr_zalloc(sizeof(*ct));
+ inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
+ inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
- st->mu = ct->mu = gpr_malloc(sizeof(*st->mu));
+ st->mu = ct->mu = (shared_mu *)gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);
gpr_ref_init(&st->mu->refs, 2);
st->base.vtable = &inproc_vtable;
@@ -1212,8 +1263,8 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
- default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
- default_authority_arg.value.string = "inproc.authority";
+ default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY;
+ default_authority_arg.value.string = (char *)"inproc.authority";
grpc_channel_args *client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
@@ -1237,61 +1288,6 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
return channel;
}
-/*******************************************************************************
- * INTEGRATION GLUE
- */
-
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset *pollset) {
- // Nothing to do here
-}
-
-static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset_set *pollset_set) {
- // Nothing to do here
-}
-
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup("inproc");
-}
-
-static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return NULL;
-}
-
-static const grpc_transport_vtable inproc_vtable = {
- sizeof(inproc_stream), "inproc",
- init_stream, set_pollset,
- set_pollset_set, perform_stream_op,
- perform_transport_op, destroy_stream,
- destroy_transport, get_peer,
- get_endpoint};
-
-/*******************************************************************************
- * GLOBAL INIT AND DESTROY
- */
-static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
-
-void grpc_inproc_transport_init(void) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
- grpc_schedule_on_exec_ctx);
- g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
-
- grpc_slice key_tmp = grpc_slice_from_static_string(":path");
- g_fake_path_key = grpc_slice_intern(key_tmp);
- grpc_slice_unref_internal(&exec_ctx, key_tmp);
-
- g_fake_path_value = grpc_slice_from_static_string("/");
-
- grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
- g_fake_auth_key = grpc_slice_intern(auth_tmp);
- grpc_slice_unref_internal(&exec_ctx, auth_tmp);
-
- g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
void grpc_inproc_transport_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_unref_internal(&exec_ctx, g_empty_slice);
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 8fdef0bc64..30248b3c60 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
- grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *dst =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
+ dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
- grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
+ grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
- const grpc_arg *const *a = ap;
- const grpc_arg *const *b = bp;
+ const grpc_arg *const *a = (const grpc_arg *const *)ap;
+ const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
- grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
- grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *b =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
- b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
@@ -210,7 +212,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
- if (a == NULL) return 0;
+ if (a == NULL) return GRPC_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, a->args[i].key)) {
@@ -221,12 +223,37 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
return GRPC_COMPRESS_NONE;
}
+grpc_stream_compression_algorithm
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
+ size_t i;
+ if (a == NULL) return GRPC_STREAM_COMPRESS_NONE;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ a->args[i].key)) {
+ return (grpc_stream_compression_algorithm)a->args[i].value.integer;
+ break;
+ }
+ }
+ return GRPC_STREAM_COMPRESS_NONE;
+}
+
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm) {
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.value.integer = algorithm;
+ return grpc_channel_args_copy_and_add(a, &tmp, 1);
+}
+
+grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args *a, grpc_stream_compression_algorithm algorithm) {
+ GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
+ grpc_arg tmp;
+ tmp.type = GRPC_ARG_INTEGER;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -251,6 +278,26 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
return 0; /* GPR_FALSE */
}
+/** Returns 1 if the argument for compression algorithm's enabled states bitset
+ * was found in \a a, returning the arg's value in \a states. Otherwise, returns
+ * 0. */
+static int find_stream_compression_algorithm_states_bitset(
+ const grpc_channel_args *a, int **states_arg) {
+ if (a != NULL) {
+ size_t i;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
+ a->args[i].key)) {
+ *states_arg = &a->args[i].value.integer;
+ **states_arg |= 0x1; /* forcefully enable support for no compression */
+ return 1;
+ }
+ }
+ }
+ return 0; /* GPR_FALSE */
+}
+
grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
grpc_compression_algorithm algorithm, int state) {
@@ -261,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The "
@@ -277,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
@@ -292,6 +339,48 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
return result;
}
+grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+ grpc_stream_compression_algorithm algorithm, int state) {
+ int *states_arg = NULL;
+ grpc_channel_args *result = *a;
+ const int states_arg_found =
+ find_stream_compression_algorithm_states_bitset(*a, &states_arg);
+
+ if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
+ state == 0) {
+ const char *algo_name = NULL;
+ GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
+ 0);
+ gpr_log(GPR_ERROR,
+ "Tried to disable default stream compression algorithm '%s'. The "
+ "operation has been ignored.",
+ algo_name);
+ } else if (states_arg_found) {
+ if (state != 0) {
+ GPR_BITSET((unsigned *)states_arg, algorithm);
+ } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ GPR_BITCLEAR((unsigned *)states_arg, algorithm);
+ }
+ } else {
+ /* create a new arg */
+ grpc_arg tmp;
+ tmp.type = GRPC_ARG_INTEGER;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ /* all enabled by default */
+ tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
+ if (state != 0) {
+ GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
+ } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
+ }
+ result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
+ grpc_channel_args_destroy(exec_ctx, *a);
+ *a = result;
+ }
+ return result;
+}
+
uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a) {
int *states_arg;
@@ -302,6 +391,17 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
}
}
+uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
+ const grpc_channel_args *a) {
+ int *states_arg;
+ if (find_stream_compression_algorithm_states_bitset(a, &states_arg)) {
+ return (uint32_t)*states_arg;
+ } else {
+ return (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) -
+ 1; /* All algs. enabled */
+ }
+}
+
grpc_channel_args *grpc_channel_args_set_socket_mutator(
grpc_channel_args *a, grpc_socket_mutator *mutator) {
grpc_arg tmp = grpc_socket_mutator_to_arg(mutator);
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index f649a8d9ec..0599e189c3 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -59,12 +59,24 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a);
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a);
+/** Returns the stream compression algorithm set in \a a. */
+grpc_stream_compression_algorithm
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a);
+
/** Returns a channel arg instance with compression enabled. If \a a is
* non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression
* for the channel. */
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm);
+/** Returns a channel arg instance with stream compression enabled. If \a a is
+ * non-NULL, its args are copied. N.B. GRPC_STREAM_COMPRESS_NONE disables
+ * stream compression for the channel. If a value other than
+ * GRPC_STREAM_COMPRESS_NONE is set, it takes precedence over message-wise
+ * compression algorithms. */
+grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args *a, grpc_stream_compression_algorithm algorithm);
+
/** Sets the support for the given compression algorithm. By default, all
* compression algorithms are enabled. It's an error to disable an algorithm set
* by grpc_channel_args_set_compression_algorithm.
@@ -76,6 +88,17 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
grpc_compression_algorithm algorithm, int enabled);
+/** Sets the support for the given stream compression algorithm. By default, all
+ * stream compression algorithms are enabled. It's an error to disable an
+ * algorithm set by grpc_channel_args_set_stream_compression_algorithm.
+ *
+ * Returns an instance with the updated algorithm states. The \a a pointer is
+ * modified to point to the returned instance (which may be different from the
+ * input value of \a a). */
+grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+ grpc_stream_compression_algorithm algorithm, int enabled);
+
/** Returns the bitset representing the support state (true for enabled, false
* for disabled) for compression algorithms.
*
@@ -84,6 +107,14 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a);
+/** Returns the bitset representing the support state (true for enabled, false
+ * for disabled) for stream compression algorithms.
+ *
+ * The i-th bit of the returned bitset corresponds to the i-th entry in the
+ * grpc_stream_compression_algorithm enum. */
+uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
+ const grpc_channel_args *a);
+
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 0f8e33c4be..775c8bc667 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -233,15 +233,10 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1;
+ GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- grpc_call_element *next_elem = elem + 1;
- return next_elem->filter->get_peer(exec_ctx, next_elem);
-}
-
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *channel_info) {
@@ -265,12 +260,3 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
-
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
- op->cancel_stream = true;
- op->payload->cancel_stream.cancel_error = error;
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
-}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index a80f8aa826..f0de80f0c0 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -40,6 +40,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h"
@@ -71,6 +72,7 @@ typedef struct {
gpr_timespec start_time;
gpr_timespec deadline;
gpr_arena *arena;
+ grpc_call_combiner *call_combiner;
} grpc_call_element_args;
typedef struct {
@@ -150,9 +152,6 @@ typedef struct {
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem);
- /* Implement grpc_call_get_peer() */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
-
/* Implement grpc_channel_get_info() */
void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info);
@@ -271,8 +270,6 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op);
-/* Pass through a request to get_peer to the next child element */
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Pass through a request to get_channel_info() to the next child element */
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -284,14 +281,10 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
/* Given the top element of a call stack, get the call stack itself */
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_error *error);
-
extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index c369e33073..b663ebfb52 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
+ grpc_channel_stack_builder *b =
+ (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
- grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
+ grpc_channel_stack_builder_iterator *it =
+ (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@@ -124,6 +126,20 @@ bool grpc_channel_stack_builder_move_prev(
return true;
}
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ GPR_ASSERT(filter_name != NULL);
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_create_iterator_at_first(builder);
+ while (grpc_channel_stack_builder_move_next(it)) {
+ if (grpc_channel_stack_builder_iterator_is_end(it)) break;
+ const char *filter_name_at_it =
+ grpc_channel_stack_builder_iterator_filter_name(it);
+ if (strcmp(filter_name, filter_name_at_it) == 0) break;
+ }
+ return it;
+}
+
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
@@ -169,6 +185,21 @@ bool grpc_channel_stack_builder_append_filter(
return ok;
}
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter_name);
+ if (grpc_channel_stack_builder_iterator_is_end(it)) {
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return false;
+ }
+ it->node->prev->next = it->node->next;
+ it->node->next->prev = it->node->prev;
+ gpr_free(it->node);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return true;
+}
+
bool grpc_channel_stack_builder_prepend_filter(
grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func, void *user_data) {
@@ -183,13 +214,13 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
- filter_node *new = gpr_malloc(sizeof(*new));
- new->next = before->next;
- new->prev = before;
- new->next->prev = new->prev->next = new;
- new->filter = filter;
- new->init = post_init_func;
- new->init_arg = user_data;
+ filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node));
+ new_node->next = before->next;
+ new_node->prev = before;
+ new_node->next->prev = new_node->prev->next = new_node;
+ new_node->filter = filter;
+ new_node->init = post_init_func;
+ new_node->init_arg = user_data;
}
bool grpc_channel_stack_builder_add_filter_before(
@@ -237,7 +268,7 @@ grpc_error *grpc_channel_stack_builder_finish(
// create an array of filters
const grpc_channel_filter **filters =
- gpr_malloc(sizeof(*filters) * num_filters);
+ (const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters);
size_t i = 0;
for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
filters[i++] = p->filter;
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index d43e427962..fdff2a2b6d 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -95,6 +95,11 @@ bool grpc_channel_stack_builder_move_next(
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
+/// Return an iterator at \a filter_name, or at the end of the list if not
+/// found.
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
typedef void (*grpc_post_filter_create_init_func)(
grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg);
@@ -132,6 +137,11 @@ bool grpc_channel_stack_builder_append_filter(
grpc_post_filter_create_init_func post_init_func,
void *user_data) GRPC_MUST_USE_RESULT;
+/// Remove any filter whose name is \a filter_name from \a builder. Returns true
+/// if \a filter_name was not found.
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
/// Terminate iteration and destroy \a iterator
void grpc_channel_stack_builder_iterator_destroy(
grpc_channel_stack_builder_iterator *iterator);
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index af06ca802e..4f37908958 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -36,7 +36,57 @@ typedef struct connected_channel_channel_data {
grpc_transport *transport;
} channel_data;
-typedef struct connected_channel_call_data { void *unused; } call_data;
+typedef struct {
+ grpc_closure closure;
+ grpc_closure *original_closure;
+ grpc_call_combiner *call_combiner;
+ const char *reason;
+} callback_state;
+
+typedef struct connected_channel_call_data {
+ grpc_call_combiner *call_combiner;
+ // Closures used for returning results on the call combiner.
+ callback_state on_complete[6]; // Max number of pending batches.
+ callback_state recv_initial_metadata_ready;
+ callback_state recv_message_ready;
+} call_data;
+
+static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ callback_state *state = (callback_state *)arg;
+ GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
+ state->original_closure, GRPC_ERROR_REF(error),
+ state->reason);
+}
+
+static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ run_in_call_combiner(exec_ctx, arg, error);
+ gpr_free(arg);
+}
+
+static void intercept_callback(call_data *calld, callback_state *state,
+ bool free_when_done, const char *reason,
+ grpc_closure **original_closure) {
+ state->original_closure = *original_closure;
+ state->call_combiner = calld->call_combiner;
+ state->reason = reason;
+ *original_closure = GRPC_CLOSURE_INIT(
+ &state->closure,
+ free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner,
+ state, grpc_schedule_on_exec_ctx);
+}
+
+static callback_state *get_state_for_batch(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) return &calld->on_complete[0];
+ if (batch->send_message) return &calld->on_complete[1];
+ if (batch->send_trailing_metadata) return &calld->on_complete[2];
+ if (batch->recv_initial_metadata) return &calld->on_complete[3];
+ if (batch->recv_message) return &calld->on_complete[4];
+ if (batch->recv_trailing_metadata) return &calld->on_complete[5];
+ GPR_UNREACHABLE_CODE(return NULL);
+}
/* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal
@@ -49,19 +99,44 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
into transport stream operations */
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
+ grpc_transport_stream_op_batch *batch) {
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ if (batch->recv_initial_metadata) {
+ callback_state *state = &calld->recv_initial_metadata_ready;
+ intercept_callback(
+ calld, state, false, "recv_initial_metadata_ready",
+ &batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
+ }
+ if (batch->recv_message) {
+ callback_state *state = &calld->recv_message_ready;
+ intercept_callback(calld, state, false, "recv_message_ready",
+ &batch->payload->recv_message.recv_message_ready);
+ }
+ if (batch->cancel_stream) {
+ // There can be more than one cancellation batch in flight at any
+ // given time, so we can't just pick out a fixed index into
+ // calld->on_complete like we can for the other ops. However,
+ // cancellation isn't in the fast path, so we just allocate a new
+ // closure for each one.
+ callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
+ intercept_callback(calld, state, true, "on_complete (cancel_stream)",
+ &batch->on_complete);
+ } else {
+ callback_state *state = get_state_for_batch(calld, batch);
+ intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
+ }
grpc_transport_perform_stream_op(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ batch);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "passed batch to transport");
}
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@@ -69,8 +144,9 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena);
@@ -82,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@@ -92,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@@ -118,11 +194,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
}
-static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- return grpc_transport_get_peer(exec_ctx, chand->transport);
-}
-
/* No-op. */
static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -138,7 +209,6 @@ const grpc_channel_filter grpc_connected_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- con_get_peer,
con_get_channel_info,
"connected",
};
@@ -148,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = t;
+ cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -156,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
- channel_stack->call_stack_size += grpc_transport_stream_size(t);
+ channel_stack->call_stack_size +=
+ grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@@ -170,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 2cb83f4114..1753da5721 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
- grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
+ grpc_handshake_manager* mgr =
+ (grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
- mgr->handshakers =
- gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
+ mgr->handshakers = (grpc_handshaker**)gpr_realloc(
+ mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
- mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
+ mgr->args.read_buffer =
+ (grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
diff --git a/src/core/lib/channel/handshaker_registry.c b/src/core/lib/channel/handshaker_registry.c
index 8c4bc3aa00..c6bc87d704 100644
--- a/src/core/lib/channel/handshaker_registry.c
+++ b/src/core/lib/channel/handshaker_registry.c
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
- list->list = gpr_realloc(
+ list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h
index 4717af6e2b..08feafc1bb 100644
--- a/src/core/lib/compression/algorithm_metadata.h
+++ b/src/core/lib/compression/algorithm_metadata.h
@@ -26,13 +26,27 @@
grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm);
+/** Return stream compression algorithm based metadata value */
+grpc_slice grpc_stream_compression_algorithm_slice(
+ grpc_stream_compression_algorithm algorithm);
+
/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm);
+/** Return stream compression algorithm based metadata element
+ * (content-encoding: xxx) */
+grpc_mdelem grpc_stream_compression_encoding_mdelem(
+ grpc_stream_compression_algorithm algorithm);
+
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str);
+/** Find stream compression algorithm based on passed in mdstr - returns
+ * GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */
+grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
+ grpc_slice str);
+
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.c
index 8deae2798f..1cfac23129 100644
--- a/src/core/lib/compression/compression.c
+++ b/src/core/lib/compression/compression.c
@@ -46,8 +46,21 @@ int grpc_compression_algorithm_parse(grpc_slice name,
}
}
+int grpc_stream_compression_algorithm_parse(
+ grpc_slice name, grpc_stream_compression_algorithm *algorithm) {
+ if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) {
+ *algorithm = GRPC_STREAM_COMPRESS_NONE;
+ return 1;
+ } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) {
+ *algorithm = GRPC_STREAM_COMPRESS_GZIP;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
- char **name) {
+ const char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
switch (algorithm) {
@@ -66,6 +79,24 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
return 0;
}
+int grpc_stream_compression_algorithm_name(
+ grpc_stream_compression_algorithm algorithm, const char **name) {
+ GRPC_API_TRACE(
+ "grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
+ ((int)algorithm, name));
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ *name = "identity";
+ return 1;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ *name = "gzip";
+ return 1;
+ case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT:
+ return 0;
+ }
+ return 0;
+}
+
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE;
@@ -74,6 +105,13 @@ grpc_compression_algorithm grpc_compression_algorithm_from_slice(
return GRPC_COMPRESS_ALGORITHMS_COUNT;
}
+grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
+ grpc_slice str) {
+ if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE;
+ if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP;
+ return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT;
+}
+
grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
@@ -89,6 +127,19 @@ grpc_slice grpc_compression_algorithm_slice(
return grpc_empty_slice();
}
+grpc_slice grpc_stream_compression_algorithm_slice(
+ grpc_stream_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ return GRPC_MDSTR_IDENTITY;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ return GRPC_MDSTR_GZIP;
+ case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT:
+ return grpc_empty_slice();
+ }
+ return grpc_empty_slice();
+}
+
grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
@@ -104,10 +155,25 @@ grpc_mdelem grpc_compression_encoding_mdelem(
return GRPC_MDNULL;
}
+grpc_mdelem grpc_stream_compression_encoding_mdelem(
+ grpc_stream_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ return GRPC_MDELEM_CONTENT_ENCODING_IDENTITY;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ return GRPC_MDELEM_CONTENT_ENCODING_GZIP;
+ default:
+ break;
+ }
+ return GRPC_MDNULL;
+}
+
void grpc_compression_options_init(grpc_compression_options *opts) {
memset(opts, 0, sizeof(*opts));
/* all enabled by default */
opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ opts->enabled_stream_compression_algorithms_bitset =
+ (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
}
void grpc_compression_options_enable_algorithm(
@@ -126,6 +192,13 @@ int grpc_compression_options_is_algorithm_enabled(
return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
}
+int grpc_compression_options_is_stream_compression_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_stream_compression_algorithm algorithm) {
+ return GPR_BITGET(opts->enabled_stream_compression_algorithms_bitset,
+ algorithm);
+}
+
/* TODO(dgq): Add the ability to specify parameters to the individual
* compression algorithms */
grpc_compression_algorithm grpc_compression_algorithm_for_level(
@@ -181,3 +254,30 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
abort();
};
}
+
+GRPCAPI grpc_stream_compression_algorithm
+grpc_stream_compression_algorithm_for_level(
+ grpc_stream_compression_level level, uint32_t accepted_stream_encodings) {
+ GRPC_API_TRACE("grpc_stream_compression_algorithm_for_level(level=%d)", 1,
+ ((int)level));
+ if (level > GRPC_STREAM_COMPRESS_LEVEL_HIGH) {
+ gpr_log(GPR_ERROR, "Unknown compression level %d.", (int)level);
+ abort();
+ }
+
+ switch (level) {
+ case GRPC_STREAM_COMPRESS_LEVEL_NONE:
+ return GRPC_STREAM_COMPRESS_NONE;
+ case GRPC_STREAM_COMPRESS_LEVEL_LOW:
+ case GRPC_STREAM_COMPRESS_LEVEL_MED:
+ case GRPC_STREAM_COMPRESS_LEVEL_HIGH:
+ if (GPR_BITGET(accepted_stream_encodings, GRPC_STREAM_COMPRESS_GZIP) ==
+ 1) {
+ return GRPC_STREAM_COMPRESS_GZIP;
+ } else {
+ return GRPC_STREAM_COMPRESS_NONE;
+ }
+ default:
+ abort();
+ }
+}
diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression.c
index df13d53e06..411489f029 100644
--- a/src/core/lib/compression/stream_compression.c
+++ b/src/core/lib/compression/stream_compression.c
@@ -16,176 +16,62 @@
*
*/
-#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/compression/stream_compression.h"
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/compression/stream_compression_gzip.h"
-#define OUTPUT_BLOCK_SIZE (1024)
-
-static bool gzip_flate(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size, int flush,
- bool *end_of_context) {
- GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
- /* Full flush is not allowed when inflating. */
- GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
-
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- int r;
- bool eoc = false;
- size_t original_max_output_size = max_output_size;
- while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
- size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
- : OUTPUT_BLOCK_SIZE;
- grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
- ctx->zs.avail_out = (uInt)slice_size;
- ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
- while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
- grpc_slice slice = grpc_slice_buffer_take_first(in);
- ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
- ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
- r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
- if (r < 0 && r != Z_BUF_ERROR) {
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- } else if (r == Z_STREAM_END && ctx->flate == inflate) {
- eoc = true;
- }
- if (ctx->zs.avail_in > 0) {
- grpc_slice_buffer_undo_take_first(
- in,
- grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
- GRPC_SLICE_LENGTH(slice)));
- }
- grpc_slice_unref_internal(&exec_ctx, slice);
- }
- if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
- GPR_ASSERT(in->length == 0);
- r = ctx->flate(&ctx->zs, flush);
- if (flush == Z_SYNC_FLUSH) {
- switch (r) {
- case Z_OK:
- /* Maybe flush is not complete; just made some partial progress. */
- if (ctx->zs.avail_out > 0) {
- flush = 0;
- }
- break;
- case Z_BUF_ERROR:
- case Z_STREAM_END:
- flush = 0;
- break;
- default:
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- }
- } else if (flush == Z_FINISH) {
- switch (r) {
- case Z_OK:
- case Z_BUF_ERROR:
- /* Wait for the next loop to assign additional output space. */
- GPR_ASSERT(ctx->zs.avail_out == 0);
- break;
- case Z_STREAM_END:
- flush = 0;
- break;
- default:
- gpr_log(GPR_ERROR, "zlib error (%d)", r);
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- grpc_exec_ctx_finish(&exec_ctx);
- return false;
- }
- }
- }
-
- if (ctx->zs.avail_out == 0) {
- grpc_slice_buffer_add(out, slice_out);
- } else if (ctx->zs.avail_out < slice_size) {
- slice_out.data.refcounted.length -= ctx->zs.avail_out;
- grpc_slice_buffer_add(out, slice_out);
- } else {
- grpc_slice_unref_internal(&exec_ctx, slice_out);
- }
- max_output_size -= (slice_size - ctx->zs.avail_out);
- }
- grpc_exec_ctx_finish(&exec_ctx);
- if (end_of_context) {
- *end_of_context = eoc;
- }
- if (output_size) {
- *output_size = original_max_output_size - max_output_size;
- }
- return true;
-}
+extern const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
bool grpc_stream_compress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
grpc_stream_compression_flush flush) {
- GPR_ASSERT(ctx->flate == deflate);
- int gzip_flush;
- switch (flush) {
- case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
- gzip_flush = 0;
- break;
- case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
- gzip_flush = Z_SYNC_FLUSH;
- break;
- case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
- gzip_flush = Z_FINISH;
- break;
- default:
- gzip_flush = 0;
- }
- return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush,
- NULL);
+ return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
+ flush);
}
bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size,
bool *end_of_context) {
- GPR_ASSERT(ctx->flate == inflate);
- return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH,
- end_of_context);
+ return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
+ end_of_context);
}
grpc_stream_compression_context *grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
- grpc_stream_compression_context *ctx =
- gpr_zalloc(sizeof(grpc_stream_compression_context));
- int r;
- if (ctx == NULL) {
- return NULL;
- }
- if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) {
- r = inflateInit2(&ctx->zs, 0x1F);
- ctx->flate = inflate;
- } else {
- r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
- Z_DEFAULT_STRATEGY);
- ctx->flate = deflate;
- }
- if (r != Z_OK) {
- gpr_free(ctx);
- return NULL;
+ switch (method) {
+ case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS:
+ return grpc_stream_compression_identity_vtable.context_create(method);
+ case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS:
+ return grpc_stream_compression_gzip_vtable.context_create(method);
+ default:
+ gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method);
+ return NULL;
}
-
- return ctx;
}
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx) {
- if (ctx->flate == inflate) {
- inflateEnd(&ctx->zs);
+ ctx->vtable->context_destroy(ctx);
+}
+
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress,
+ grpc_stream_compression_method *method) {
+ if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
+ : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
+ return 1;
+ } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS
+ : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS;
+ return 1;
} else {
- deflateEnd(&ctx->zs);
+ return 0;
}
- gpr_free(ctx);
}
diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h
index 844dff81a3..6d073280fa 100644
--- a/src/core/lib/compression/stream_compression.h
+++ b/src/core/lib/compression/stream_compression.h
@@ -24,15 +24,20 @@
#include <grpc/slice_buffer.h>
#include <zlib.h>
+#include "src/core/lib/transport/static_metadata.h"
+
+typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
+
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
- z_stream zs;
- int (*flate)(z_stream *zs, int flush);
+ const grpc_stream_compression_vtable *vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
- GRPC_STREAM_COMPRESSION_COMPRESS = 0,
- GRPC_STREAM_COMPRESSION_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0,
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_COMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS,
GRPC_STREAM_COMPRESSION_METHOD_COUNT
} grpc_stream_compression_method;
@@ -43,6 +48,19 @@ typedef enum grpc_stream_compression_flush {
GRPC_STREAM_COMPRESSION_FLUSH_COUNT
} grpc_stream_compression_flush;
+struct grpc_stream_compression_vtable {
+ bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size,
+ size_t max_output_size, grpc_stream_compression_flush flush);
+ bool (*decompress)(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size,
+ bool *end_of_context);
+ grpc_stream_compression_context *(*context_create)(
+ grpc_stream_compression_method method);
+ void (*context_destroy)(grpc_stream_compression_context *ctx);
+};
+
/**
* Compress bytes provided in \a in with a given context, with an optional flush
* at the end of compression. Emits at most \a max_output_size compressed bytes
@@ -87,4 +105,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx);
+/**
+ * Parse stream compression method based on algorithm name
+ */
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
+
#endif
diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c
new file mode 100644
index 0000000000..abcbdb3a91
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_gzip.c
@@ -0,0 +1,228 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression_gzip.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define OUTPUT_BLOCK_SIZE (1024)
+
+typedef struct grpc_stream_compression_context_gzip {
+ grpc_stream_compression_context base;
+
+ z_stream zs;
+ int (*flate)(z_stream *zs, int flush);
+} grpc_stream_compression_context_gzip;
+
+static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size, int flush,
+ bool *end_of_context) {
+ GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
+ /* Full flush is not allowed when inflating. */
+ GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
+
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ int r;
+ bool eoc = false;
+ size_t original_max_output_size = max_output_size;
+ while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) {
+ size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size
+ : OUTPUT_BLOCK_SIZE;
+ grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size);
+ ctx->zs.avail_out = (uInt)slice_size;
+ ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out);
+ while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) {
+ grpc_slice slice = grpc_slice_buffer_take_first(in);
+ ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice);
+ ctx->zs.next_in = GRPC_SLICE_START_PTR(slice);
+ r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
+ if (r < 0 && r != Z_BUF_ERROR) {
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ } else if (r == Z_STREAM_END && ctx->flate == inflate) {
+ eoc = true;
+ }
+ if (ctx->zs.avail_in > 0) {
+ grpc_slice_buffer_undo_take_first(
+ in,
+ grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
+ GRPC_SLICE_LENGTH(slice)));
+ }
+ grpc_slice_unref_internal(&exec_ctx, slice);
+ }
+ if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
+ GPR_ASSERT(in->length == 0);
+ r = ctx->flate(&ctx->zs, flush);
+ if (flush == Z_SYNC_FLUSH) {
+ switch (r) {
+ case Z_OK:
+ /* Maybe flush is not complete; just made some partial progress. */
+ if (ctx->zs.avail_out > 0) {
+ flush = 0;
+ }
+ break;
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ flush = 0;
+ break;
+ default:
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ }
+ } else if (flush == Z_FINISH) {
+ switch (r) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ /* Wait for the next loop to assign additional output space. */
+ GPR_ASSERT(ctx->zs.avail_out == 0);
+ break;
+ case Z_STREAM_END:
+ flush = 0;
+ break;
+ default:
+ gpr_log(GPR_ERROR, "zlib error (%d)", r);
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return false;
+ }
+ }
+ }
+
+ if (ctx->zs.avail_out == 0) {
+ grpc_slice_buffer_add(out, slice_out);
+ } else if (ctx->zs.avail_out < slice_size) {
+ slice_out.data.refcounted.length -= ctx->zs.avail_out;
+ grpc_slice_buffer_add(out, slice_out);
+ } else {
+ grpc_slice_unref_internal(&exec_ctx, slice_out);
+ }
+ max_output_size -= (slice_size - ctx->zs.avail_out);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ if (end_of_context) {
+ *end_of_context = eoc;
+ }
+ if (output_size) {
+ *output_size = original_max_output_size - max_output_size;
+ }
+ return true;
+}
+
+static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == deflate);
+ int gzip_flush;
+ switch (flush) {
+ case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
+ gzip_flush = 0;
+ break;
+ case GRPC_STREAM_COMPRESSION_FLUSH_SYNC:
+ gzip_flush = Z_SYNC_FLUSH;
+ break;
+ case GRPC_STREAM_COMPRESSION_FLUSH_FINISH:
+ gzip_flush = Z_FINISH;
+ break;
+ default:
+ gzip_flush = 0;
+ }
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush,
+ NULL);
+}
+
+static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == inflate);
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
+ Z_SYNC_FLUSH, end_of_context);
+}
+
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_gzip(
+ grpc_stream_compression_method method) {
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)gpr_zalloc(
+ sizeof(grpc_stream_compression_context_gzip));
+ int r;
+ if (gzip_ctx == NULL) {
+ return NULL;
+ }
+ if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) {
+ r = inflateInit2(&gzip_ctx->zs, 0x1F);
+ gzip_ctx->flate = inflate;
+ } else {
+ r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
+ Z_DEFAULT_STRATEGY);
+ gzip_ctx->flate = deflate;
+ }
+ if (r != Z_OK) {
+ gpr_free(gzip_ctx);
+ return NULL;
+ }
+
+ gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
+ return (grpc_stream_compression_context *)gzip_ctx;
+}
+
+static void grpc_stream_compression_context_destroy_gzip(
+ grpc_stream_compression_context *ctx) {
+ if (ctx == NULL) {
+ return;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ if (gzip_ctx->flate == inflate) {
+ inflateEnd(&gzip_ctx->zs);
+ } else {
+ deflateEnd(&gzip_ctx->zs);
+ }
+ gpr_free(ctx);
+}
+
+const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = {
+ .compress = grpc_stream_compress_gzip,
+ .decompress = grpc_stream_decompress_gzip,
+ .context_create = grpc_stream_compression_context_create_gzip,
+ .context_destroy = grpc_stream_compression_context_destroy_gzip};
diff --git a/src/core/lib/support/thd_internal.h b/src/core/lib/compression/stream_compression_gzip.h
index cc468c7846..7cf49a0de9 100644
--- a/src/core/lib/support/thd_internal.h
+++ b/src/core/lib/compression/stream_compression_gzip.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,9 +16,11 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H
-#define GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
-/* Internal interfaces between modules within the gpr support library. */
+#include "src/core/lib/compression/stream_compression.h"
-#endif /* GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H */
+extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
+
+#endif
diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c
new file mode 100644
index 0000000000..3dfcf53b85
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_identity.c
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression_identity.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define OUTPUT_BLOCK_SIZE (1024)
+
+/* Singleton context used for all identity streams. */
+static grpc_stream_compression_context identity_ctx = {
+ .vtable = &grpc_stream_compression_identity_vtable};
+
+static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size) {
+ if (max_output_size >= in->length) {
+ if (output_size) {
+ *output_size = in->length;
+ }
+ grpc_slice_buffer_move_into(in, out);
+ } else {
+ if (output_size) {
+ *output_size = max_output_size;
+ }
+ grpc_slice_buffer_move_first(in, max_output_size, out);
+ }
+}
+
+static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ return true;
+}
+
+static bool grpc_stream_decompress_identity(
+ grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ if (end_of_context) {
+ *end_of_context = false;
+ }
+ return true;
+}
+
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_identity(
+ grpc_stream_compression_method method) {
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
+ /* No context needed in this case. Use fake context instead. */
+ return (grpc_stream_compression_context *)&identity_ctx;
+}
+
+static void grpc_stream_compression_context_destroy_identity(
+ grpc_stream_compression_context *ctx) {
+ return;
+}
+
+const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = {
+ .compress = grpc_stream_compress_identity,
+ .decompress = grpc_stream_decompress_identity,
+ .context_create = grpc_stream_compression_context_create_identity,
+ .context_destroy = grpc_stream_compression_context_destroy_identity};
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h b/src/core/lib/compression/stream_compression_identity.h
index 2ca68cc9d1..41926e949e 100644
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
+++ b/src/core/lib/compression/stream_compression_identity.h
@@ -16,13 +16,12 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
+#include "src/core/lib/compression/stream_compression.h"
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly);
+extern const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */
+#endif
diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c
new file mode 100644
index 0000000000..4096384dd9
--- /dev/null
+++ b/src/core/lib/debug/stats.c
@@ -0,0 +1,174 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/debug/stats.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/support/string.h"
+
+grpc_stats_data *grpc_stats_per_cpu_storage = NULL;
+static size_t g_num_cores;
+
+void grpc_stats_init(void) {
+ g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
+ grpc_stats_per_cpu_storage =
+ (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
+}
+
+void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
+
+void grpc_stats_collect(grpc_stats_data *output) {
+ memset(output, 0, sizeof(*output));
+ for (size_t core = 0; core < g_num_cores; core++) {
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ output->counters[i] += gpr_atm_no_barrier_load(
+ &grpc_stats_per_cpu_storage[core].counters[i]);
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
+ output->histograms[i] += gpr_atm_no_barrier_load(
+ &grpc_stats_per_cpu_storage[core].histograms[i]);
+ }
+ }
+}
+
+void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
+ grpc_stats_data *c) {
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ c->counters[i] = b->counters[i] - a->counters[i];
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
+ c->histograms[i] = b->histograms[i] - a->histograms[i];
+ }
+}
+
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
+ const int *table, int table_size) {
+ GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx);
+ const int *const start = table;
+ while (table_size > 0) {
+ int step = table_size / 2;
+ const int *it = table + step;
+ if (value >= *it) {
+ table = it + 1;
+ table_size -= step + 1;
+ } else {
+ table_size = step;
+ }
+ }
+ return (int)(table - start) - 1;
+}
+
+size_t grpc_stats_histo_count(const grpc_stats_data *stats,
+ grpc_stats_histograms histogram) {
+ size_t sum = 0;
+ for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) {
+ sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i];
+ }
+ return sum;
+}
+
+static double threshold_for_count_below(const gpr_atm *bucket_counts,
+ const int *bucket_boundaries,
+ int num_buckets, double count_below) {
+ double count_so_far;
+ double lower_bound;
+ double upper_bound;
+ int lower_idx;
+ int upper_idx;
+
+ /* find the lowest bucket that gets us above count_below */
+ count_so_far = 0.0;
+ for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) {
+ count_so_far += (double)bucket_counts[lower_idx];
+ if (count_so_far >= count_below) {
+ break;
+ }
+ }
+ if (count_so_far == count_below) {
+ /* this bucket hits the threshold exactly... we should be midway through
+ any run of zero values following the bucket */
+ for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) {
+ if (bucket_counts[upper_idx]) {
+ break;
+ }
+ }
+ return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0;
+ } else {
+ /* treat values as uniform throughout the bucket, and find where this value
+ should lie */
+ lower_bound = bucket_boundaries[lower_idx];
+ upper_bound = bucket_boundaries[lower_idx + 1];
+ return upper_bound -
+ (upper_bound - lower_bound) * (count_so_far - count_below) /
+ (double)bucket_counts[lower_idx];
+ }
+}
+
+double grpc_stats_histo_percentile(const grpc_stats_data *stats,
+ grpc_stats_histograms histogram,
+ double percentile) {
+ size_t count = grpc_stats_histo_count(stats, histogram);
+ if (count == 0) return 0.0;
+ return threshold_for_count_below(
+ stats->histograms + grpc_stats_histo_start[histogram],
+ grpc_stats_histo_bucket_boundaries[histogram],
+ grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0);
+}
+
+char *grpc_stats_data_as_json(const grpc_stats_data *data) {
+ gpr_strvec v;
+ char *tmp;
+ bool is_first = true;
+ gpr_strvec_init(&v);
+ gpr_strvec_add(&v, gpr_strdup("{"));
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ gpr_asprintf(&tmp, "%s\"%s\": %" PRIdPTR, is_first ? "" : ", ",
+ grpc_stats_counter_name[i], data->counters[i]);
+ gpr_strvec_add(&v, tmp);
+ is_first = false;
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
+ gpr_asprintf(&tmp, "%s\"%s\": [", is_first ? "" : ", ",
+ grpc_stats_histogram_name[i]);
+ gpr_strvec_add(&v, tmp);
+ for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
+ gpr_asprintf(&tmp, "%s%" PRIdPTR, j == 0 ? "" : ",",
+ data->histograms[grpc_stats_histo_start[i] + j]);
+ gpr_strvec_add(&v, tmp);
+ }
+ gpr_asprintf(&tmp, "], \"%s_bkt\": [", grpc_stats_histogram_name[i]);
+ gpr_strvec_add(&v, tmp);
+ for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
+ gpr_asprintf(&tmp, "%s%d", j == 0 ? "" : ",",
+ grpc_stats_histo_bucket_boundaries[i][j]);
+ gpr_strvec_add(&v, tmp);
+ }
+ gpr_strvec_add(&v, gpr_strdup("]"));
+ is_first = false;
+ }
+ gpr_strvec_add(&v, gpr_strdup("}"));
+ tmp = gpr_strvec_flatten(&v, NULL);
+ gpr_strvec_destroy(&v);
+ return tmp;
+}
diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h
new file mode 100644
index 0000000000..09d190d488
--- /dev/null
+++ b/src/core/lib/debug/stats.h
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_DEBUG_STATS_H
+#define GRPC_CORE_LIB_DEBUG_STATS_H
+
+#include <grpc/support/atm.h>
+#include "src/core/lib/debug/stats_data.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+typedef struct grpc_stats_data {
+ gpr_atm counters[GRPC_STATS_COUNTER_COUNT];
+ gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS];
+} grpc_stats_data;
+
+extern grpc_stats_data *grpc_stats_per_cpu_storage;
+
+#define GRPC_THREAD_STATS_DATA(exec_ctx) \
+ (&grpc_stats_per_cpu_storage[(exec_ctx)->starting_cpu])
+
+#define GRPC_STATS_INC_COUNTER(exec_ctx, ctr) \
+ (gpr_atm_no_barrier_fetch_add( \
+ &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1))
+
+#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \
+ (gpr_atm_no_barrier_fetch_add( \
+ &GRPC_THREAD_STATS_DATA((exec_ctx)) \
+ ->histograms[histogram##_FIRST_SLOT + (index)], \
+ 1))
+
+void grpc_stats_init(void);
+void grpc_stats_shutdown(void);
+void grpc_stats_collect(grpc_stats_data *output);
+// c = b-a
+void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
+ grpc_stats_data *c);
+char *grpc_stats_data_as_json(const grpc_stats_data *data);
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
+ const int *table, int table_size);
+double grpc_stats_histo_percentile(const grpc_stats_data *data,
+ grpc_stats_histograms histogram,
+ double percentile);
+size_t grpc_stats_histo_count(const grpc_stats_data *data,
+ grpc_stats_histograms histogram);
+
+#endif
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c
new file mode 100644
index 0000000000..fb6055f795
--- /dev/null
+++ b/src/core/lib/debug/stats_data.c
@@ -0,0 +1,735 @@
+/*
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Automatically generated by tools/codegen/core/gen_stats_data.py
+ */
+
+#include "src/core/lib/debug/stats_data.h"
+#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
+ "client_calls_created",
+ "server_calls_created",
+ "cqs_created",
+ "client_channels_created",
+ "client_subchannels_created",
+ "server_channels_created",
+ "syscall_poll",
+ "syscall_wait",
+ "pollset_kick",
+ "pollset_kicked_without_poller",
+ "pollset_kicked_again",
+ "pollset_kick_wakeup_fd",
+ "pollset_kick_wakeup_cv",
+ "pollset_kick_own_thread",
+ "histogram_slow_lookups",
+ "syscall_write",
+ "syscall_read",
+ "tcp_backup_pollers_created",
+ "tcp_backup_poller_polls",
+ "http2_op_batches",
+ "http2_op_cancel",
+ "http2_op_send_initial_metadata",
+ "http2_op_send_message",
+ "http2_op_send_trailing_metadata",
+ "http2_op_recv_initial_metadata",
+ "http2_op_recv_message",
+ "http2_op_recv_trailing_metadata",
+ "http2_settings_writes",
+ "http2_pings_sent",
+ "http2_writes_begun",
+ "http2_writes_offloaded",
+ "http2_writes_continued",
+ "http2_partial_writes",
+ "http2_initiate_write_due_to_initial_write",
+ "http2_initiate_write_due_to_start_new_stream",
+ "http2_initiate_write_due_to_send_message",
+ "http2_initiate_write_due_to_send_initial_metadata",
+ "http2_initiate_write_due_to_send_trailing_metadata",
+ "http2_initiate_write_due_to_retry_send_ping",
+ "http2_initiate_write_due_to_continue_pings",
+ "http2_initiate_write_due_to_goaway_sent",
+ "http2_initiate_write_due_to_rst_stream",
+ "http2_initiate_write_due_to_close_from_api",
+ "http2_initiate_write_due_to_stream_flow_control",
+ "http2_initiate_write_due_to_transport_flow_control",
+ "http2_initiate_write_due_to_send_settings",
+ "http2_initiate_write_due_to_bdp_estimator_ping",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update",
+ "http2_initiate_write_due_to_application_ping",
+ "http2_initiate_write_due_to_keepalive_ping",
+ "http2_initiate_write_due_to_transport_flow_control_unstalled",
+ "http2_initiate_write_due_to_ping_response",
+ "http2_initiate_write_due_to_force_rst_stream",
+ "hpack_recv_indexed",
+ "hpack_recv_lithdr_incidx",
+ "hpack_recv_lithdr_incidx_v",
+ "hpack_recv_lithdr_notidx",
+ "hpack_recv_lithdr_notidx_v",
+ "hpack_recv_lithdr_nvridx",
+ "hpack_recv_lithdr_nvridx_v",
+ "hpack_recv_uncompressed",
+ "hpack_recv_huffman",
+ "hpack_recv_binary",
+ "hpack_recv_binary_base64",
+ "hpack_send_indexed",
+ "hpack_send_lithdr_incidx",
+ "hpack_send_lithdr_incidx_v",
+ "hpack_send_lithdr_notidx",
+ "hpack_send_lithdr_notidx_v",
+ "hpack_send_lithdr_nvridx",
+ "hpack_send_lithdr_nvridx_v",
+ "hpack_send_uncompressed",
+ "hpack_send_huffman",
+ "hpack_send_binary",
+ "hpack_send_binary_base64",
+ "combiner_locks_initiated",
+ "combiner_locks_scheduled_items",
+ "combiner_locks_scheduled_final_items",
+ "combiner_locks_offloaded",
+ "executor_scheduled_short_items",
+ "executor_scheduled_long_items",
+ "executor_scheduled_to_self",
+ "executor_wakeup_initiated",
+ "executor_queue_drained",
+ "executor_push_retries",
+ "executor_threads_created",
+ "executor_threads_used",
+ "server_requested_calls",
+ "server_slowpath_requests_queued",
+};
+const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
+ "Number of client side calls created by this process",
+ "Number of server side calls created by this process",
+ "Number of completion queues created", "Number of client channels created",
+ "Number of client subchannels created", "Number of server channels created",
+ "Number of polling syscalls (epoll_wait, poll, etc) made by this process",
+ "Number of sleeping syscalls made by this process",
+ "How many polling wakeups were performed by the process (only valid for "
+ "epoll1 right now)",
+ "How many times was a polling wakeup requested without an active poller "
+ "(only valid for epoll1 right now)",
+ "How many times was the same polling worker awoken repeatedly before "
+ "waking up (only valid for epoll1 right now)",
+ "How many times was an eventfd used as the wakeup vector for a polling "
+ "wakeup (only valid for epoll1 right now)",
+ "How many times was a condition variable used as the wakeup vector for a "
+ "polling wakeup (only valid for epoll1 right now)",
+ "How many times could a polling wakeup be satisfied by keeping the waking "
+ "thread awake? (only valid for epoll1 right now)",
+ "Number of times histogram increments went through the slow (binary "
+ "search) path",
+ "Number of write syscalls (or equivalent - eg sendmsg) made by this "
+ "process",
+ "Number of read syscalls (or equivalent - eg recvmsg) made by this process",
+ "Number of times a backup poller has been created (this can be expensive)",
+ "Number of polls performed on the backup poller",
+ "Number of batches received by HTTP2 transport",
+ "Number of cancelations received by HTTP2 transport",
+ "Number of batches containing send initial metadata",
+ "Number of batches containing send message",
+ "Number of batches containing send trailing metadata",
+ "Number of batches containing receive initial metadata",
+ "Number of batches containing receive message",
+ "Number of batches containing receive trailing metadata",
+ "Number of settings frames sent", "Number of HTTP2 pings sent by process",
+ "Number of HTTP2 writes initiated",
+ "Number of HTTP2 writes offloaded to the executor from application threads",
+ "Number of HTTP2 writes that finished seeing more data needed to be "
+ "written",
+ "Number of HTTP2 writes that were made knowing there was still more data "
+ "to be written (we cap maximum write size to syscall_write)",
+ "Number of HTTP2 writes initiated due to 'initial_write'",
+ "Number of HTTP2 writes initiated due to 'start_new_stream'",
+ "Number of HTTP2 writes initiated due to 'send_message'",
+ "Number of HTTP2 writes initiated due to 'send_initial_metadata'",
+ "Number of HTTP2 writes initiated due to 'send_trailing_metadata'",
+ "Number of HTTP2 writes initiated due to 'retry_send_ping'",
+ "Number of HTTP2 writes initiated due to 'continue_pings'",
+ "Number of HTTP2 writes initiated due to 'goaway_sent'",
+ "Number of HTTP2 writes initiated due to 'rst_stream'",
+ "Number of HTTP2 writes initiated due to 'close_from_api'",
+ "Number of HTTP2 writes initiated due to 'stream_flow_control'",
+ "Number of HTTP2 writes initiated due to 'transport_flow_control'",
+ "Number of HTTP2 writes initiated due to 'send_settings'",
+ "Number of HTTP2 writes initiated due to 'bdp_estimator_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_setting'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_update'",
+ "Number of HTTP2 writes initiated due to 'application_ping'",
+ "Number of HTTP2 writes initiated due to 'keepalive_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'transport_flow_control_unstalled'",
+ "Number of HTTP2 writes initiated due to 'ping_response'",
+ "Number of HTTP2 writes initiated due to 'force_rst_stream'",
+ "Number of HPACK indexed fields received",
+ "Number of HPACK literal headers received with incremental indexing",
+ "Number of HPACK literal headers received with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers received with no indexing",
+ "Number of HPACK literal headers received with no indexing and literal "
+ "keys",
+ "Number of HPACK literal headers received with never-indexing",
+ "Number of HPACK literal headers received with never-indexing and literal "
+ "keys",
+ "Number of uncompressed strings received in metadata",
+ "Number of huffman encoded strings received in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
+ "Number of HPACK indexed fields sent",
+ "Number of HPACK literal headers sent with incremental indexing",
+ "Number of HPACK literal headers sent with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers sent with no indexing",
+ "Number of HPACK literal headers sent with no indexing and literal keys",
+ "Number of HPACK literal headers sent with never-indexing",
+ "Number of HPACK literal headers sent with never-indexing and literal keys",
+ "Number of uncompressed strings sent in metadata",
+ "Number of huffman encoded strings sent in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
+ "Number of combiner lock entries by process (first items queued to a "
+ "combiner)",
+ "Number of items scheduled against combiner locks",
+ "Number of final items scheduled against combiner locks",
+ "Number of combiner locks offloaded to different threads",
+ "Number of finite runtime closures scheduled against the executor (gRPC "
+ "thread pool)",
+ "Number of potentially infinite runtime closures scheduled against the "
+ "executor (gRPC thread pool)",
+ "Number of closures scheduled by the executor to the executor",
+ "Number of thread wakeups initiated within the executor",
+ "Number of times an executor queue was drained",
+ "Number of times we raced and were forced to retry pushing a closure to "
+ "the executor",
+ "Size of the backing thread pool for overflow gRPC Core work",
+ "How many executor threads actually got used",
+ "How many calls were requested (not necessarily received) by the server",
+ "How many times was the server slow path taken (indicates too few "
+ "outstanding requests)",
+};
+const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
+ "call_initial_size",
+ "poll_events_returned",
+ "tcp_write_size",
+ "tcp_write_iov_size",
+ "tcp_read_size",
+ "tcp_read_offer",
+ "tcp_read_offer_iov_size",
+ "http2_send_message_size",
+ "http2_send_initial_metadata_per_write",
+ "http2_send_message_per_write",
+ "http2_send_trailing_metadata_per_write",
+ "http2_send_flowctl_per_write",
+ "executor_closures_per_wakeup",
+ "server_cqs_checked",
+};
+const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
+ "Initial size of the grpc_call arena created at call start",
+ "How many events are called for each syscall_poll",
+ "Number of bytes offered to each syscall_write",
+ "Number of byte segments offered to each syscall_write",
+ "Number of bytes received by each syscall_read",
+ "Number of bytes offered to each syscall_read",
+ "Number of byte segments offered to each syscall_read",
+ "Size of messages received by HTTP2 transport",
+ "Number of streams initiated written per TCP write",
+ "Number of streams whose payload was written per TCP write",
+ "Number of streams terminated per TCP write",
+ "Number of flow control updates written per TCP write",
+ "Number of closures executed each time an executor wakes up",
+ "How many completion queues were checked looking for a CQ that had "
+ "requested the incoming call",
+};
+const int grpc_stats_table_0[65] = {
+ 0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
+ 17, 21, 26, 32, 39, 47, 57, 68, 82, 98,
+ 117, 140, 167, 199, 238, 284, 339, 404, 482, 575,
+ 685, 816, 972, 1158, 1380, 1644, 1959, 2334, 2780, 3312,
+ 3945, 4699, 5597, 6667, 7941, 9459, 11267, 13420, 15984, 19038,
+ 22676, 27009, 32169, 38315, 45635, 54353, 64737, 77104, 91834, 109378,
+ 130273, 155159, 184799, 220100, 262144};
+const uint8_t grpc_stats_table_1[124] = {
+ 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
+ 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
+ 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 22, 23, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 29, 29, 30, 30, 30, 31, 31, 32, 33,
+ 33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
+ 51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
+const int grpc_stats_table_2[129] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
+ 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
+ 114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
+ 192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
+ 317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
+ 510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
+ 809, 834, 859, 885, 912, 939, 967, 996, 1024};
+const uint8_t grpc_stats_table_3[166] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
+ 17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
+ 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
+ 40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
+ 65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
+ 76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
+const int grpc_stats_table_4[65] = {
+ 0, 1, 2, 3, 4, 6, 8, 11,
+ 15, 20, 26, 34, 44, 57, 73, 94,
+ 121, 155, 199, 255, 327, 419, 537, 688,
+ 881, 1128, 1444, 1848, 2365, 3026, 3872, 4954,
+ 6338, 8108, 10373, 13270, 16976, 21717, 27782, 35541,
+ 45467, 58165, 74409, 95189, 121772, 155778, 199281, 254933,
+ 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
+ 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
+ 16777216};
+const uint8_t grpc_stats_table_5[87] = {
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
+ 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
+ 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
+ 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
+const int grpc_stats_table_6[65] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
+ 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
+ 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
+ 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
+const uint8_t grpc_stats_table_7[102] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
+ 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
+ 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
+const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
+const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 262144);
+ if (value < 6) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4651092515166879744ull) {
+ int bucket =
+ grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
+ _bkt.dbl = grpc_stats_table_0[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_0, 64));
+}
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 29) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4642789003353915392ull) {
+ int bucket =
+ grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
+ _bkt.dbl = grpc_stats_table_2[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_2, 128));
+}
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
+ int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
+ /* Automatically generated by tools/codegen/core/gen_stats_data.py */
+ value = GPR_CLAMP(value, 0, 64);
+ if (value < 3) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4625196817309499392ull) {
+ int bucket =
+ grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
+ _bkt.dbl = grpc_stats_table_8[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_8, 8));
+}
+const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 8};
+const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448,
+ 512, 576, 640, 704, 768, 832, 896};
+const int *const grpc_stats_histo_bucket_boundaries[14] = {
+ grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_8};
+void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = {
+ grpc_stats_inc_call_initial_size,
+ grpc_stats_inc_poll_events_returned,
+ grpc_stats_inc_tcp_write_size,
+ grpc_stats_inc_tcp_write_iov_size,
+ grpc_stats_inc_tcp_read_size,
+ grpc_stats_inc_tcp_read_offer,
+ grpc_stats_inc_tcp_read_offer_iov_size,
+ grpc_stats_inc_http2_send_message_size,
+ grpc_stats_inc_http2_send_initial_metadata_per_write,
+ grpc_stats_inc_http2_send_message_per_write,
+ grpc_stats_inc_http2_send_trailing_metadata_per_write,
+ grpc_stats_inc_http2_send_flowctl_per_write,
+ grpc_stats_inc_executor_closures_per_wakeup,
+ grpc_stats_inc_server_cqs_checked};
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
new file mode 100644
index 0000000000..6c0ad30543
--- /dev/null
+++ b/src/core/lib/debug/stats_data.h
@@ -0,0 +1,484 @@
+/*
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Automatically generated by tools/codegen/core/gen_stats_data.py
+ */
+
+#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H
+#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H
+
+#include <inttypes.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+typedef enum {
+ GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
+ GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
+ GRPC_STATS_COUNTER_CQS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
+ GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
+ GRPC_STATS_COUNTER_SYSCALL_POLL,
+ GRPC_STATS_COUNTER_SYSCALL_WAIT,
+ GRPC_STATS_COUNTER_POLLSET_KICK,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
+ GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
+ GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
+ GRPC_STATS_COUNTER_SYSCALL_WRITE,
+ GRPC_STATS_COUNTER_SYSCALL_READ,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
+ GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
+ GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
+ GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
+ GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
+ GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64,
+ GRPC_STATS_COUNTER_HPACK_SEND_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
+ GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
+ GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
+ GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
+ GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
+ GRPC_STATS_COUNTER_COUNT
+} grpc_stats_counters;
+extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
+extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
+typedef enum {
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+ GRPC_STATS_HISTOGRAM_COUNT
+} grpc_stats_histograms;
+extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
+extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
+typedef enum {
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832,
+ GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
+ GRPC_STATS_HISTOGRAM_BUCKETS = 904
+} grpc_stats_histogram_constants;
+#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
+#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
+#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED)
+#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
+#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
+#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
+#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
+#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
+#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
+#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
+#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
+#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
+#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
+#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
+#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
+#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
+#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
+#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
+#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
+#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
+#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
+#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
+#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
+#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
+#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
+#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
+#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
+#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
+#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
+#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
+#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
+#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
+#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
+ grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
+ grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
+ (int)(value))
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
+ grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
+void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
+ grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
+extern const int grpc_stats_histo_buckets[14];
+extern const int grpc_stats_histo_start[14];
+extern const int *const grpc_stats_histo_bucket_boundaries[14];
+extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx,
+ int x);
+
+#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
new file mode 100644
index 0000000000..de575f01c7
--- /dev/null
+++ b/src/core/lib/debug/stats_data.yaml
@@ -0,0 +1,280 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stats data declaration
+# use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h
+
+# overall
+- counter: client_calls_created
+ doc: Number of client side calls created by this process
+- counter: server_calls_created
+ doc: Number of server side calls created by this process
+- histogram: call_initial_size
+ max: 262144
+ buckets: 64
+ doc: Initial size of the grpc_call arena created at call start
+- counter: cqs_created
+ doc: Number of completion queues created
+- counter: client_channels_created
+ doc: Number of client channels created
+- counter: client_subchannels_created
+ doc: Number of client subchannels created
+- counter: server_channels_created
+ doc: Number of server channels created
+# polling
+- counter: syscall_poll
+ doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
+- counter: syscall_wait
+ doc: Number of sleeping syscalls made by this process
+- histogram: poll_events_returned
+ max: 1024
+ buckets: 128
+ doc: How many events are called for each syscall_poll
+- counter: pollset_kick
+ doc: How many polling wakeups were performed by the process
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_without_poller
+ doc: How many times was a polling wakeup requested without an active poller
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_again
+ doc: How many times was the same polling worker awoken repeatedly before
+ waking up
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_fd
+ doc: How many times was an eventfd used as the wakeup vector for a polling
+ wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_cv
+ doc: How many times was a condition variable used as the wakeup vector for a
+ polling wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_own_thread
+ doc: How many times could a polling wakeup be satisfied by keeping the waking
+ thread awake?
+ (only valid for epoll1 right now)
+# stats system
+- counter: histogram_slow_lookups
+ doc: Number of times histogram increments went through the slow
+ (binary search) path
+# tcp
+- counter: syscall_write
+ doc: Number of write syscalls (or equivalent - eg sendmsg) made by this process
+- counter: syscall_read
+ doc: Number of read syscalls (or equivalent - eg recvmsg) made by this process
+- histogram: tcp_write_size
+ max: 16777216 # 16 meg max write tracked
+ buckets: 64
+ doc: Number of bytes offered to each syscall_write
+- histogram: tcp_write_iov_size
+ max: 1024
+ buckets: 64
+ doc: Number of byte segments offered to each syscall_write
+- histogram: tcp_read_size
+ max: 16777216
+ buckets: 64
+ doc: Number of bytes received by each syscall_read
+- histogram: tcp_read_offer
+ max: 16777216
+ buckets: 64
+ doc: Number of bytes offered to each syscall_read
+- histogram: tcp_read_offer_iov_size
+ max: 1024
+ buckets: 64
+ doc: Number of byte segments offered to each syscall_read
+- counter: tcp_backup_pollers_created
+ doc: Number of times a backup poller has been created (this can be expensive)
+- counter: tcp_backup_poller_polls
+ doc: Number of polls performed on the backup poller
+# chttp2
+- counter: http2_op_batches
+ doc: Number of batches received by HTTP2 transport
+- counter: http2_op_cancel
+ doc: Number of cancelations received by HTTP2 transport
+- counter: http2_op_send_initial_metadata
+ doc: Number of batches containing send initial metadata
+- counter: http2_op_send_message
+ doc: Number of batches containing send message
+- counter: http2_op_send_trailing_metadata
+ doc: Number of batches containing send trailing metadata
+- counter: http2_op_recv_initial_metadata
+ doc: Number of batches containing receive initial metadata
+- counter: http2_op_recv_message
+ doc: Number of batches containing receive message
+- counter: http2_op_recv_trailing_metadata
+ doc: Number of batches containing receive trailing metadata
+- histogram: http2_send_message_size
+ max: 16777216
+ buckets: 64
+ doc: Size of messages received by HTTP2 transport
+- histogram: http2_send_initial_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams initiated written per TCP write
+- histogram: http2_send_message_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams whose payload was written per TCP write
+- histogram: http2_send_trailing_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams terminated per TCP write
+- histogram: http2_send_flowctl_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of flow control updates written per TCP write
+- counter: http2_settings_writes
+ doc: Number of settings frames sent
+- counter: http2_pings_sent
+ doc: Number of HTTP2 pings sent by process
+- counter: http2_writes_begun
+ doc: Number of HTTP2 writes initiated
+- counter: http2_writes_offloaded
+ doc: Number of HTTP2 writes offloaded to the executor from application threads
+- counter: http2_writes_continued
+ doc: Number of HTTP2 writes that finished seeing more data needed to be
+ written
+- counter: http2_partial_writes
+ doc: Number of HTTP2 writes that were made knowing there was still more data
+ to be written (we cap maximum write size to syscall_write)
+- counter: http2_initiate_write_due_to_initial_write
+ doc: Number of HTTP2 writes initiated due to 'initial_write'
+- counter: http2_initiate_write_due_to_start_new_stream
+ doc: Number of HTTP2 writes initiated due to 'start_new_stream'
+- counter: http2_initiate_write_due_to_send_message
+ doc: Number of HTTP2 writes initiated due to 'send_message'
+- counter: http2_initiate_write_due_to_send_initial_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_initial_metadata'
+- counter: http2_initiate_write_due_to_send_trailing_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_trailing_metadata'
+- counter: http2_initiate_write_due_to_retry_send_ping
+ doc: Number of HTTP2 writes initiated due to 'retry_send_ping'
+- counter: http2_initiate_write_due_to_continue_pings
+ doc: Number of HTTP2 writes initiated due to 'continue_pings'
+- counter: http2_initiate_write_due_to_goaway_sent
+ doc: Number of HTTP2 writes initiated due to 'goaway_sent'
+- counter: http2_initiate_write_due_to_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'rst_stream'
+- counter: http2_initiate_write_due_to_close_from_api
+ doc: Number of HTTP2 writes initiated due to 'close_from_api'
+- counter: http2_initiate_write_due_to_stream_flow_control
+ doc: Number of HTTP2 writes initiated due to 'stream_flow_control'
+- counter: http2_initiate_write_due_to_transport_flow_control
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control'
+- counter: http2_initiate_write_due_to_send_settings
+ doc: Number of HTTP2 writes initiated due to 'send_settings'
+- counter: http2_initiate_write_due_to_bdp_estimator_ping
+ doc: Number of HTTP2 writes initiated due to 'bdp_estimator_ping'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_setting
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_setting'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_update
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_update'
+- counter: http2_initiate_write_due_to_application_ping
+ doc: Number of HTTP2 writes initiated due to 'application_ping'
+- counter: http2_initiate_write_due_to_keepalive_ping
+ doc: Number of HTTP2 writes initiated due to 'keepalive_ping'
+- counter: http2_initiate_write_due_to_transport_flow_control_unstalled
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control_unstalled'
+- counter: http2_initiate_write_due_to_ping_response
+ doc: Number of HTTP2 writes initiated due to 'ping_response'
+- counter: http2_initiate_write_due_to_force_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
+- counter: hpack_recv_indexed
+ doc: Number of HPACK indexed fields received
+- counter: hpack_recv_lithdr_incidx
+ doc: Number of HPACK literal headers received with incremental indexing
+- counter: hpack_recv_lithdr_incidx_v
+ doc: Number of HPACK literal headers received with incremental indexing and literal keys
+- counter: hpack_recv_lithdr_notidx
+ doc: Number of HPACK literal headers received with no indexing
+- counter: hpack_recv_lithdr_notidx_v
+ doc: Number of HPACK literal headers received with no indexing and literal keys
+- counter: hpack_recv_lithdr_nvridx
+ doc: Number of HPACK literal headers received with never-indexing
+- counter: hpack_recv_lithdr_nvridx_v
+ doc: Number of HPACK literal headers received with never-indexing and literal keys
+- counter: hpack_recv_uncompressed
+ doc: Number of uncompressed strings received in metadata
+- counter: hpack_recv_huffman
+ doc: Number of huffman encoded strings received in metadata
+- counter: hpack_recv_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_recv_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
+- counter: hpack_send_indexed
+ doc: Number of HPACK indexed fields sent
+- counter: hpack_send_lithdr_incidx
+ doc: Number of HPACK literal headers sent with incremental indexing
+- counter: hpack_send_lithdr_incidx_v
+ doc: Number of HPACK literal headers sent with incremental indexing and literal keys
+- counter: hpack_send_lithdr_notidx
+ doc: Number of HPACK literal headers sent with no indexing
+- counter: hpack_send_lithdr_notidx_v
+ doc: Number of HPACK literal headers sent with no indexing and literal keys
+- counter: hpack_send_lithdr_nvridx
+ doc: Number of HPACK literal headers sent with never-indexing
+- counter: hpack_send_lithdr_nvridx_v
+ doc: Number of HPACK literal headers sent with never-indexing and literal keys
+- counter: hpack_send_uncompressed
+ doc: Number of uncompressed strings sent in metadata
+- counter: hpack_send_huffman
+ doc: Number of huffman encoded strings sent in metadata
+- counter: hpack_send_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_send_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
+# combiner locks
+- counter: combiner_locks_initiated
+ doc: Number of combiner lock entries by process
+ (first items queued to a combiner)
+- counter: combiner_locks_scheduled_items
+ doc: Number of items scheduled against combiner locks
+- counter: combiner_locks_scheduled_final_items
+ doc: Number of final items scheduled against combiner locks
+- counter: combiner_locks_offloaded
+ doc: Number of combiner locks offloaded to different threads
+# executor
+- counter: executor_scheduled_short_items
+ doc: Number of finite runtime closures scheduled against the executor
+ (gRPC thread pool)
+- counter: executor_scheduled_long_items
+ doc: Number of potentially infinite runtime closures scheduled against the
+ executor (gRPC thread pool)
+- counter: executor_scheduled_to_self
+ doc: Number of closures scheduled by the executor to the executor
+- counter: executor_wakeup_initiated
+ doc: Number of thread wakeups initiated within the executor
+- counter: executor_queue_drained
+ doc: Number of times an executor queue was drained
+- counter: executor_push_retries
+ doc: Number of times we raced and were forced to retry pushing a closure to
+ the executor
+- counter: executor_threads_created
+ doc: Size of the backing thread pool for overflow gRPC Core work
+- counter: executor_threads_used
+ doc: How many executor threads actually got used
+- histogram: executor_closures_per_wakeup
+ max: 1024
+ buckets: 64
+ doc: Number of closures executed each time an executor wakes up
+# server
+- counter: server_requested_calls
+ doc: How many calls were requested (not necessarily received) by the server
+- histogram: server_cqs_checked
+ buckets: 8
+ max: 64
+ doc: How many completion queues were checked looking for a CQ that had
+ requested the incoming call
+- counter: server_slowpath_requests_queued
+ doc: How many times was the server slow path taken (indicates too few
+ outstanding requests)
diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql
new file mode 100644
index 0000000000..0611ccaff0
--- /dev/null
+++ b/src/core/lib/debug/stats_data_bq_schema.sql
@@ -0,0 +1,90 @@
+client_calls_created_per_iteration:FLOAT,
+server_calls_created_per_iteration:FLOAT,
+cqs_created_per_iteration:FLOAT,
+client_channels_created_per_iteration:FLOAT,
+client_subchannels_created_per_iteration:FLOAT,
+server_channels_created_per_iteration:FLOAT,
+syscall_poll_per_iteration:FLOAT,
+syscall_wait_per_iteration:FLOAT,
+pollset_kick_per_iteration:FLOAT,
+pollset_kicked_without_poller_per_iteration:FLOAT,
+pollset_kicked_again_per_iteration:FLOAT,
+pollset_kick_wakeup_fd_per_iteration:FLOAT,
+pollset_kick_wakeup_cv_per_iteration:FLOAT,
+pollset_kick_own_thread_per_iteration:FLOAT,
+histogram_slow_lookups_per_iteration:FLOAT,
+syscall_write_per_iteration:FLOAT,
+syscall_read_per_iteration:FLOAT,
+tcp_backup_pollers_created_per_iteration:FLOAT,
+tcp_backup_poller_polls_per_iteration:FLOAT,
+http2_op_batches_per_iteration:FLOAT,
+http2_op_cancel_per_iteration:FLOAT,
+http2_op_send_initial_metadata_per_iteration:FLOAT,
+http2_op_send_message_per_iteration:FLOAT,
+http2_op_send_trailing_metadata_per_iteration:FLOAT,
+http2_op_recv_initial_metadata_per_iteration:FLOAT,
+http2_op_recv_message_per_iteration:FLOAT,
+http2_op_recv_trailing_metadata_per_iteration:FLOAT,
+http2_settings_writes_per_iteration:FLOAT,
+http2_pings_sent_per_iteration:FLOAT,
+http2_writes_begun_per_iteration:FLOAT,
+http2_writes_offloaded_per_iteration:FLOAT,
+http2_writes_continued_per_iteration:FLOAT,
+http2_partial_writes_per_iteration:FLOAT,
+http2_initiate_write_due_to_initial_write_per_iteration:FLOAT,
+http2_initiate_write_due_to_start_new_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_message_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_initial_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_trailing_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_retry_send_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_continue_pings_per_iteration:FLOAT,
+http2_initiate_write_due_to_goaway_sent_per_iteration:FLOAT,
+http2_initiate_write_due_to_rst_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_close_from_api_per_iteration:FLOAT,
+http2_initiate_write_due_to_stream_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_settings_per_iteration:FLOAT,
+http2_initiate_write_due_to_bdp_estimator_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_setting_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_update_per_iteration:FLOAT,
+http2_initiate_write_due_to_application_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
+http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
+http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
+hpack_recv_indexed_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_recv_uncompressed_per_iteration:FLOAT,
+hpack_recv_huffman_per_iteration:FLOAT,
+hpack_recv_binary_per_iteration:FLOAT,
+hpack_recv_binary_base64_per_iteration:FLOAT,
+hpack_send_indexed_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_send_uncompressed_per_iteration:FLOAT,
+hpack_send_huffman_per_iteration:FLOAT,
+hpack_send_binary_per_iteration:FLOAT,
+hpack_send_binary_base64_per_iteration:FLOAT,
+combiner_locks_initiated_per_iteration:FLOAT,
+combiner_locks_scheduled_items_per_iteration:FLOAT,
+combiner_locks_scheduled_final_items_per_iteration:FLOAT,
+combiner_locks_offloaded_per_iteration:FLOAT,
+executor_scheduled_short_items_per_iteration:FLOAT,
+executor_scheduled_long_items_per_iteration:FLOAT,
+executor_scheduled_to_self_per_iteration:FLOAT,
+executor_wakeup_initiated_per_iteration:FLOAT,
+executor_queue_drained_per_iteration:FLOAT,
+executor_push_retries_per_iteration:FLOAT,
+executor_threads_created_per_iteration:FLOAT,
+executor_threads_used_per_iteration:FLOAT,
+server_requested_calls_per_iteration:FLOAT,
+server_slowpath_requests_queued_per_iteration:FLOAT
diff --git a/src/core/lib/debug/trace.c b/src/core/lib/debug/trace.c
index c6c1853e20..7cb2789a19 100644
--- a/src/core/lib/debug/trace.c
+++ b/src/core/lib/debug/trace.c
@@ -39,7 +39,7 @@ static tracer *tracers;
#endif
void grpc_register_tracer(grpc_tracer_flag *flag) {
- tracer *t = gpr_malloc(sizeof(*t));
+ tracer *t = (tracer *)gpr_malloc(sizeof(*t));
t->flag = flag;
t->next = tracers;
TRACER_SET(*flag, false);
@@ -53,10 +53,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index dd9e6a30fe..64f2e3fc33 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -35,7 +35,7 @@ typedef struct {
#else
bool value;
#endif
- char *name;
+ const char *name;
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER
diff --git a/src/core/lib/http/format_request.c b/src/core/lib/http/format_request.c
index f887726eea..88fb0ab0b6 100644
--- a/src/core/lib/http/format_request.c
+++ b/src/core/lib/http/format_request.c
@@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
- tmp = gpr_realloc(tmp, out_len + body_size);
+ tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 77af7b7c08..db995943a9 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
- internal_request *req = user_data;
+ internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@@ -217,7 +217,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
- GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
+ (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
@@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@@ -243,7 +243,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
- internal_request *req = gpr_malloc(sizeof(internal_request));
+ internal_request *req =
+ (internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 97c2886525..c553fa3981 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
+ c->handshaker_factory = NULL;
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index 9c5e93f4e5..0950bd655e 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
- char *out = gpr_malloc(length + 1);
+ char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
- *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
+ *hdrs = (grpc_http_header *)gpr_realloc(
+ *hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
- *body = gpr_realloc((void *)*body, parser->body_capacity);
+ *body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;
diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c
new file mode 100644
index 0000000000..48d8eaec18
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.c
@@ -0,0 +1,202 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/call_combiner.h"
+
+#include <grpc/support/log.h>
+
+grpc_tracer_flag grpc_call_combiner_trace =
+ GRPC_TRACER_INITIALIZER(false, "call_combiner");
+
+static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
+ if (cancel_state & 1) {
+ return (grpc_error*)(cancel_state & ~(gpr_atm)1);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static gpr_atm encode_cancel_state_error(grpc_error* error) {
+ return (gpr_atm)1 | (gpr_atm)error;
+}
+
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_init(&call_combiner->queue);
+}
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_destroy(&call_combiner->queue);
+ GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
+}
+
+#ifndef NDEBUG
+#define DEBUG_ARGS , const char *file, int line
+#define DEBUG_FMT_STR "%s:%d: "
+#define DEBUG_FMT_ARGS , file, line
+#else
+#define DEBUG_ARGS
+#define DEBUG_FMT_STR
+#define DEBUG_FMT_ARGS
+#endif
+
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
+ "%s] error=%s",
+ call_combiner, closure DEBUG_FMT_ARGS, reason,
+ grpc_error_string(error));
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size + 1);
+ }
+ if (prev_size == 0) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
+ }
+ // Queue was empty, so execute this closure immediately.
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+ } else {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_INFO, " QUEUING");
+ }
+ // Queue was not empty, so add closure to queue.
+ closure->error_data.error = error;
+ gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
+ }
+}
+
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
+ call_combiner DEBUG_FMT_ARGS, reason);
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size - 1);
+ }
+ GPR_ASSERT(prev_size >= 1);
+ if (prev_size > 1) {
+ while (true) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " checking queue");
+ }
+ bool empty;
+ grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
+ &call_combiner->queue, &empty);
+ if (closure == NULL) {
+ // This can happen either due to a race condition within the mpscq
+ // code or because of a race with grpc_call_combiner_start().
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue returned no result; checking again");
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
+ closure, grpc_error_string(closure->error_data.error));
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
+ break;
+ }
+ } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue empty");
+ }
+}
+
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure) {
+ while (true) {
+ // Decode original state.
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ // If error is set, invoke the cancellation closure immediately.
+ // Otherwise, store the new closure.
+ if (original_error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p "
+ "for pre-existing cancellation",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
+ break;
+ } else {
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ (gpr_atm)closure)) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
+ call_combiner, closure);
+ }
+ // If we replaced an earlier closure, invoke the original
+ // closure with GRPC_ERROR_NONE. This allows callers to clean
+ // up any resources they may be holding for the callback.
+ if (original_state != 0) {
+ closure = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling old cancel callback=%p",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+ }
+ break;
+ }
+ }
+ // cas failed, try again.
+ }
+}
+
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error) {
+ while (true) {
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ if (original_error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(error);
+ break;
+ }
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ encode_cancel_state_error(error))) {
+ if (original_state != 0) {
+ grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p",
+ call_combiner, notify_on_cancel);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
+ }
+ break;
+ }
+ // cas failed, try again.
+ }
+}
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
new file mode 100644
index 0000000000..5cfb3f0c07
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+
+#include <stddef.h>
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/mpscq.h"
+
+// A simple, lock-free mechanism for serializing activity related to a
+// single call. This is similar to a combiner but is more lightweight.
+//
+// It requires the callback (or, in the common case where the callback
+// actually kicks off a chain of callbacks, the last callback in that
+// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP())
+// when it is done with the action that was kicked off by the original
+// callback.
+
+extern grpc_tracer_flag grpc_call_combiner_trace;
+
+typedef struct {
+ gpr_atm size; // size_t, num closures in queue or currently executing
+ gpr_mpscq queue;
+ // Either 0 (if not cancelled and no cancellation closure set),
+ // a grpc_closure* (if the lowest bit is 0),
+ // or a grpc_error* (if the lowest bit is 1).
+ gpr_atm cancel_state;
+} grpc_call_combiner;
+
+// Assumes memory was initialized to zero.
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
+
+#ifndef NDEBUG
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
+ (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* file, int line, const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* file, int line, const char* reason);
+#else
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* reason);
+#endif
+
+/// Registers \a closure to be invoked by \a call_combiner when
+/// grpc_call_combiner_cancel() is called.
+///
+/// Once a closure is registered, it will always be scheduled exactly
+/// once; this allows the closure to hold references that will be freed
+/// regardless of whether or not the call was cancelled. If a cancellation
+/// does occur, the closure will be scheduled with the cancellation error;
+/// otherwise, it will be scheduled with GRPC_ERROR_NONE.
+///
+/// The closure will be scheduled in the following cases:
+/// - If grpc_call_combiner_cancel() was called prior to registering the
+/// closure, it will be scheduled immediately with the cancelation error.
+/// - If grpc_call_combiner_cancel() is called after registering the
+/// closure, the closure will be scheduled with the cancellation error.
+/// - If grpc_call_combiner_set_notify_on_cancel() is called again to
+/// register a new cancellation closure, the previous cancellation
+/// closure will be scheduled with GRPC_ERROR_NONE.
+///
+/// If \a closure is NULL, then no closure will be invoked on
+/// cancellation; this effectively unregisters the previously set closure.
+/// However, most filters will not need to explicitly unregister their
+/// callbacks, as this is done automatically when the call is destroyed.
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure);
+
+/// Indicates that the call has been cancelled.
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error);
+
+#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 26f9cbe0fa..00edefc6ae 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_closure *wc = arg;
+ wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
- wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
@@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
@@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 9b66987b68..f899b25f10 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -24,6 +24,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
@@ -73,14 +74,15 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_combiner *grpc_combiner_create(void) {
- grpc_combiner *lock = gpr_zalloc(sizeof(*lock));
+ grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
+ GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -153,6 +155,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
GPR_TIMER_BEGIN("combiner.execute", 0);
grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
@@ -160,6 +163,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
if (last == 1) {
+ GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx);
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
(gpr_atm)exec_ctx);
// first element on this list: add it to the list of combiner locks
@@ -190,11 +194,12 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
}
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_combiner *lock = arg;
+ grpc_combiner *lock = (grpc_combiner *)arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+ GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
@@ -325,6 +330,7 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
grpc_combiner *lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
@@ -350,7 +356,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
- combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
+ combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
+ GRPC_ERROR_REF(error));
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 3759dda992..aa05501537 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
- *err = gpr_realloc(
+ *err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@@ -278,13 +278,13 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
-static void internal_add_error(grpc_error **err, grpc_error *new) {
- grpc_linked_error new_last = {new, UINT8_MAX};
+static void internal_add_error(grpc_error **err, grpc_error *new_err) {
+ grpc_linked_error new_last = {new_err, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
if (slot == UINT8_MAX) {
- gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err, new,
- grpc_error_string(new));
- GRPC_ERROR_UNREF(new);
+ gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
+ new_err, grpc_error_string(new_err));
+ GRPC_ERROR_UNREF(new_err);
return;
}
if ((*err)->first_err == UINT8_MAX) {
@@ -321,8 +321,8 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
uint8_t initial_arena_capacity = (uint8_t)(
DEFAULT_ERROR_CAPACITY +
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
- grpc_error *err =
- gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
+ grpc_error *err = (grpc_error *)gpr_malloc(
+ sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
@@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
- out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
+ out = (grpc_error *)gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -431,10 +432,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
intptr_t value) {
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_int(&new, which, value);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_int(&new_err, which, value);
GPR_TIMER_END("grpc_error_set_int", 0);
- return new;
+ return new_err;
}
typedef struct {
@@ -476,10 +477,10 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_str(&new, which, str);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_str(&new_err, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
- return new;
+ return new_err;
}
bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
@@ -506,10 +507,10 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_add_error(&new, child);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_add_error(&new_err, child);
GPR_TIMER_END("grpc_error_add_child", 0);
- return new;
+ return new_err;
}
static const char *no_error_string = "\"No Error\"";
@@ -530,7 +531,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = gpr_realloc(*s, *cap);
+ *s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
- kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ kvs->kvs =
+ (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@@ -639,7 +641,7 @@ static char *key_time(grpc_error_times which) {
static char *fmt_time(gpr_timespec tm) {
char *out;
- char *pfx = "!!";
+ const char *pfx = "!!";
switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:";
@@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
- const kv_pair *ka = a;
- const kv_pair *kb = b;
+ const kv_pair *ka = (const kv_pair *)a;
+ const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}
@@ -731,7 +733,7 @@ const char *grpc_error_string(grpc_error *err) {
void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
- return p;
+ return (const char *)p;
}
kv_pairs kvs;
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index 6b034ca960..3ac12ab56f 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -39,6 +39,7 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
@@ -48,7 +49,60 @@
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
-static int g_epfd;
+
+/*******************************************************************************
+ * Singleton epoll set related fields
+ */
+
+#define MAX_EPOLL_EVENTS 100
+#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
+
+/* NOTE ON SYNCHRONIZATION:
+ * - Fields in this struct are only modified by the designated poller. Hence
+ * there is no need for any locks to protect the struct.
+ * - num_events and cursor fields have to be of atomic type to provide memory
+ * visibility guarantees only. i.e In case of multiple pollers, the designated
+ * polling thread keeps changing; the thread that wrote these values may be
+ * different from the thread reading the values
+ */
+typedef struct epoll_set {
+ int epfd;
+
+ /* The epoll_events after the last call to epoll_wait() */
+ struct epoll_event events[MAX_EPOLL_EVENTS];
+
+ /* The number of epoll_events after the last call to epoll_wait() */
+ gpr_atm num_events;
+
+ /* Index of the first event in epoll_events that has to be processed. This
+ * field is only valid if num_events > 0 */
+ gpr_atm cursor;
+} epoll_set;
+
+/* The global singleton epoll set */
+static epoll_set g_epoll_set;
+
+/* Must be called *only* once */
+static bool epoll_set_init() {
+ g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC);
+ if (g_epoll_set.epfd < 0) {
+ gpr_log(GPR_ERROR, "epoll unavailable");
+ return false;
+ }
+
+ gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
+ gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
+ gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
+ return true;
+}
+
+/* epoll_set_init() MUST be called before calling this. */
+static void epoll_set_shutdown() {
+ if (g_epoll_set.epfd >= 0) {
+ close(g_epoll_set.epfd);
+ g_epoll_set.epfd = -1;
+ }
+}
/*******************************************************************************
* Fd Declarations
@@ -91,7 +145,7 @@ static const char *kick_state_string(kick_state st) {
}
struct grpc_pollset_worker {
- kick_state kick_state;
+ kick_state state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
@@ -100,29 +154,29 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work;
};
-#define SET_KICK_STATE(worker, state) \
+#define SET_KICK_STATE(worker, kick_state) \
do { \
- (worker)->kick_state = (state); \
+ (worker)->state = (kick_state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
-#define MAX_NEIGHBOURHOODS 1024
+#define MAX_NEIGHBORHOODS 1024
-typedef struct pollset_neighbourhood {
+typedef struct pollset_neighborhood {
gpr_mu mu;
grpc_pollset *active_root;
char pad[GPR_CACHELINE_SIZE];
-} pollset_neighbourhood;
+} pollset_neighborhood;
struct grpc_pollset {
gpr_mu mu;
- pollset_neighbourhood *neighbourhood;
- bool reassigning_neighbourhood;
+ pollset_neighborhood *neighborhood;
+ bool reassigning_neighborhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
/* Set to true if the pollset is observed to have no workers available to
- * poll */
+ poll */
bool seen_inactive;
bool shutting_down; /* Is the pollset shutting down ? */
grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
@@ -206,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@@ -226,9 +280,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
#endif
gpr_free(fd_name);
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
- .data.ptr = new_fd};
- if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.data.ptr = new_fd;
+ if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
@@ -326,9 +381,12 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
GPR_TLS_DECL(g_current_thread_pollset);
GPR_TLS_DECL(g_current_thread_worker);
+
+/* The designated poller */
static gpr_atm g_active_poller;
-static pollset_neighbourhood *g_neighbourhoods;
-static size_t g_num_neighbourhoods;
+
+static pollset_neighborhood *g_neighborhoods;
+static size_t g_num_neighborhoods;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@@ -367,8 +425,8 @@ static worker_remove_result worker_remove(grpc_pollset *pollset,
}
}
-static size_t choose_neighbourhood(void) {
- return (size_t)gpr_cpu_current_cpu() % g_num_neighbourhoods;
+static size_t choose_neighborhood(void) {
+ return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
}
static grpc_error *pollset_global_init(void) {
@@ -378,16 +436,18 @@ static grpc_error *pollset_global_init(void) {
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = &global_wakeup_fd};
- if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = &global_wakeup_fd;
+ if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
+ &ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
- g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
- g_neighbourhoods =
- gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_init(&g_neighbourhoods[i].mu);
+ g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
+ g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
+ sizeof(*g_neighborhoods) * g_num_neighborhoods);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_init(&g_neighborhoods[i].mu);
}
return GRPC_ERROR_NONE;
}
@@ -396,17 +456,17 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_destroy(&g_neighbourhoods[i].mu);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_destroy(&g_neighborhoods[i].mu);
}
- gpr_free(g_neighbourhoods);
+ gpr_free(g_neighborhoods);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
- pollset->reassigning_neighbourhood = false;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
+ pollset->reassigning_neighborhood = false;
pollset->root_worker = NULL;
pollset->kicked_without_poller = false;
pollset->seen_inactive = true;
@@ -419,46 +479,52 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- if (pollset->neighbourhood != neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (pollset->neighborhood != neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
pollset->prev->next = pollset->next;
pollset->next->prev = pollset->prev;
- if (pollset == pollset->neighbourhood->active_root) {
- pollset->neighbourhood->active_root =
+ if (pollset == pollset->neighborhood->active_root) {
+ pollset->neighborhood->active_root =
pollset->next == pollset ? NULL : pollset->next;
}
}
- gpr_mu_unlock(&pollset->neighbourhood->mu);
+ gpr_mu_unlock(&pollset->neighborhood->mu);
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_destroy(&pollset->mu);
}
-static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
+static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
+ GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
- switch (worker->kick_state) {
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ switch (worker->state) {
case KICKED:
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@@ -470,7 +536,7 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
}
// TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
// in the else case
-
+ GPR_TIMER_END("pollset_kick_all", 0);
return error;
}
@@ -478,6 +544,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
pollset->begin_refs == 0) {
+ GPR_TIMER_MARK("pollset_finish_shutdown", 0);
GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = NULL;
}
@@ -485,16 +552,16 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
+ GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(pollset->shutdown_closure == NULL);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
+ GPR_TIMER_END("pollset_shutdown", 0);
}
-#define MAX_EPOLL_EVENTS 100
-
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
@@ -507,58 +574,105 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
+ 0, /* tv_sec */
+ GPR_NS_PER_MS - 1, /* tv_nsec */
+ GPR_TIMESPAN /* clock_type */
+ };
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
}
-static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- gpr_timespec now, gpr_timespec deadline) {
- struct epoll_event events[MAX_EPOLL_EVENTS];
- static const char *err_desc = "pollset_poll";
-
- int timeout = poll_deadline_to_millis_timeout(deadline, now);
-
- if (timeout != 0) {
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- }
- int r;
- do {
- r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
- } while (r < 0 && errno == EINTR);
- if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- }
+/* Process the epoll events found by do_epoll_wait() function.
+ - g_epoll_set.cursor points to the index of the first event to be processed
+ - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
+ updates the g_epoll_set.cursor
+
+ NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
+ called by g_active_poller thread. So there is no need for synchronization
+ when accessing fields in g_epoll_set */
+static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
+ static const char *err_desc = "process_events";
+ grpc_error *error = GRPC_ERROR_NONE;
- if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
+ GPR_TIMER_BEGIN("process_epoll_events", 0);
+ long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
+ long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
+ for (int idx = 0;
+ (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
+ idx++) {
+ long c = cursor++;
+ struct epoll_event *ev = &g_epoll_set.events[c];
+ void *data_ptr = ev->data.ptr;
- grpc_error *error = GRPC_ERROR_NONE;
- for (int i = 0; i < r; i++) {
- void *data_ptr = events[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
grpc_fd *fd = (grpc_fd *)(data_ptr);
- bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
- bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
- bool write_ev = (events[i].events & EPOLLOUT) != 0;
+ bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
+ bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
+ bool write_ev = (ev->events & EPOLLOUT) != 0;
+
if (read_ev || cancel) {
fd_become_readable(exec_ctx, fd, pollset);
}
+
if (write_ev || cancel) {
fd_become_writable(exec_ctx, fd);
}
}
}
-
+ gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
+ GPR_TIMER_END("process_epoll_events", 0);
return error;
}
+/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
+ "process" any of the events yet; that is done in process_epoll_events().
+ *See process_epoll_events() function for more details.
+
+ NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
+ (i.e the designated poller thread) will be calling this function. So there is
+ no need for any synchronization when accesing fields in g_epoll_set */
+static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
+ gpr_timespec now, gpr_timespec deadline) {
+ GPR_TIMER_BEGIN("do_epoll_wait", 0);
+
+ int r;
+ int timeout = poll_deadline_to_millis_timeout(deadline, now);
+ if (timeout != 0) {
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ }
+ do {
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+ r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
+ timeout);
+ } while (r < 0 && errno == EINTR);
+ if (timeout != 0) {
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ }
+
+ if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
+
+ GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
+
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
+ }
+
+ gpr_atm_rel_store(&g_epoll_set.num_events, r);
+ gpr_atm_rel_store(&g_epoll_set.cursor, 0);
+
+ GPR_TIMER_END("do_epoll_wait", 0);
+ return GRPC_ERROR_NONE;
+}
+
static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl, gpr_timespec *now,
gpr_timespec deadline) {
+ GPR_TIMER_BEGIN("begin_worker", 0);
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
SET_KICK_STATE(worker, UNKICKED);
@@ -573,69 +687,77 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
// pollset has been observed to be inactive, we need to move back to the
// active list
bool is_reassigning = false;
- if (!pollset->reassigning_neighbourhood) {
+ if (!pollset->reassigning_neighborhood) {
is_reassigning = true;
- pollset->reassigning_neighbourhood = true;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
+ pollset->reassigning_neighborhood = true;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
}
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
if (pollset->seen_inactive) {
- if (neighbourhood != pollset->neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (neighborhood != pollset->neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
- pollset->seen_inactive = false;
- if (neighbourhood->active_root == NULL) {
- neighbourhood->active_root = pollset->next = pollset->prev = pollset;
- /* TODO: sreek. Why would this worker state be other than UNKICKED
- * here ? (since the worker isn't added to the pollset yet, there is no
- * way it can be "found" by other threads to get kicked). */
-
- /* If there is no designated poller, make this the designated poller */
- if (worker->kick_state == UNKICKED &&
- gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
- SET_KICK_STATE(worker, DESIGNATED_POLLER);
+
+ /* In the brief time we released the pollset locks above, the worker MAY
+ have been kicked. In this case, the worker should get out of this
+ pollset ASAP and hence this should neither add the pollset to
+ neighborhood nor mark the pollset as active.
+
+ On a side note, the only way a worker's kick state could have changed
+ at this point is if it were "kicked specifically". Since the worker has
+ not added itself to the pollset yet (by calling worker_insert()), it is
+ not visible in the "kick any" path yet */
+ if (worker->state == UNKICKED) {
+ pollset->seen_inactive = false;
+ if (neighborhood->active_root == NULL) {
+ neighborhood->active_root = pollset->next = pollset->prev = pollset;
+ /* Make this the designated poller if there isn't one already */
+ if (worker->state == UNKICKED &&
+ gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
+ SET_KICK_STATE(worker, DESIGNATED_POLLER);
+ }
+ } else {
+ pollset->next = neighborhood->active_root;
+ pollset->prev = pollset->next->prev;
+ pollset->next->prev = pollset->prev->next = pollset;
}
- } else {
- pollset->next = neighbourhood->active_root;
- pollset->prev = pollset->next->prev;
- pollset->next->prev = pollset->prev->next = pollset;
}
}
if (is_reassigning) {
- GPR_ASSERT(pollset->reassigning_neighbourhood);
- pollset->reassigning_neighbourhood = false;
+ GPR_ASSERT(pollset->reassigning_neighborhood);
+ pollset->reassigning_neighborhood = false;
}
- gpr_mu_unlock(&neighbourhood->mu);
+ gpr_mu_unlock(&neighborhood->mu);
}
worker_insert(pollset, worker);
pollset->begin_refs--;
- if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
+ if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
- while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
+ while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
- worker->kick_state == UNKICKED) {
+ worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
@@ -648,12 +770,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller);
}
/* We release pollset lock in this function at a couple of places:
- * 1. Briefly when assigning pollset to a neighbourhood
+ * 1. Briefly when assigning pollset to a neighborhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
@@ -663,17 +785,20 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
+ GPR_TIMER_END("begin_worker", 0);
return false;
}
- return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
+ GPR_TIMER_END("begin_worker", 0);
+ return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
}
-static bool check_neighbourhood_for_available_poller(
- pollset_neighbourhood *neighbourhood) {
+static bool check_neighborhood_for_available_poller(
+ grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
+ GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
- grpc_pollset *inspect = neighbourhood->active_root;
+ grpc_pollset *inspect = neighborhood->active_root;
if (inspect == NULL) {
break;
}
@@ -682,7 +807,7 @@ static bool check_neighbourhood_for_available_poller(
grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
- switch (inspect_worker->kick_state) {
+ switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
@@ -692,6 +817,8 @@ static bool check_neighbourhood_for_available_poller(
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
+ GPR_TIMER_MARK("signal worker", 0);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@@ -717,8 +844,8 @@ static bool check_neighbourhood_for_available_poller(
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
- if (inspect == neighbourhood->active_root) {
- neighbourhood->active_root =
+ if (inspect == neighborhood->active_root) {
+ neighborhood->active_root =
inspect->next == inspect ? NULL : inspect->next;
}
inspect->next->prev = inspect->prev;
@@ -727,12 +854,14 @@ static bool check_neighbourhood_for_available_poller(
}
gpr_mu_unlock(&inspect->mu);
} while (!found_worker);
+ GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
return found_worker;
}
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
+ GPR_TIMER_BEGIN("end_worker", 0);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
}
@@ -742,13 +871,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
- if (worker->next != worker && worker->next->kick_state == UNKICKED) {
+ if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
@@ -757,32 +887,33 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
- size_t poller_neighbourhood_idx =
- (size_t)(pollset->neighbourhood - g_neighbourhoods);
+ size_t poller_neighborhood_idx =
+ (size_t)(pollset->neighborhood - g_neighborhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
- bool scan_state[MAX_NEIGHBOURHOODS];
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- if (gpr_mu_trylock(&neighbourhood->mu)) {
+ bool scan_state[MAX_NEIGHBORHOODS];
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker =
- check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
scan_state[i] = false;
}
}
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
if (scan_state[i]) continue;
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- gpr_mu_lock(&neighbourhood->mu);
- found_worker = check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ gpr_mu_lock(&neighborhood->mu);
+ found_worker =
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@@ -802,42 +933,72 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
+ GPR_TIMER_END("end_worker", 0);
}
/* pollset->po.mu lock must be held by the caller before calling this.
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
grpc_pollset_worker worker;
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollset_work";
- if (pollset->kicked_without_poller) {
- pollset->kicked_without_poller = false;
+ GPR_TIMER_BEGIN("pollset_work", 0);
+ if (ps->kicked_without_poller) {
+ ps->kicked_without_poller = false;
+ GPR_TIMER_END("pollset_work", 0);
return GRPC_ERROR_NONE;
}
- if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
+
+ if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) {
+ gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
- GPR_ASSERT(!pollset->shutting_down);
- GPR_ASSERT(!pollset->seen_inactive);
- gpr_mu_unlock(&pollset->mu);
- append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
- err_desc);
- gpr_mu_lock(&pollset->mu);
+ GPR_ASSERT(!ps->shutting_down);
+ GPR_ASSERT(!ps->seen_inactive);
+
+ gpr_mu_unlock(&ps->mu); /* unlock */
+ /* This is the designated polling thread at this point and should ideally do
+ polling. However, if there are unprocessed events left from a previous
+ call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
+ process the pending epoll events.
+
+ The reason for decoupling do_epoll_wait and process_epoll_events is to
+ better distrubute the work (i.e handling epoll events) across multiple
+ threads
+
+ process_epoll_events() returns very quickly: It just queues the work on
+ exec_ctx but does not execute it (the actual exectution or more
+ accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
+ a designated poller). So we are not waiting long periods without a
+ designated poller */
+ if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
+ gpr_atm_acq_load(&g_epoll_set.num_events)) {
+ append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline),
+ err_desc);
+ }
+ append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
+
+ gpr_mu_lock(&ps->mu); /* lock */
+
gpr_tls_set(&g_current_thread_worker, 0);
} else {
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
+ gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
}
- end_worker(exec_ctx, pollset, &worker, worker_hdl);
+ end_worker(exec_ctx, ps, &worker, worker_hdl);
+
gpr_tls_set(&g_current_thread_pollset, 0);
+ GPR_TIMER_END("pollset_work", 0);
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *pollset,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
+ GPR_TIMER_BEGIN("pollset_kick", 0);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ grpc_error *ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
gpr_strvec_init(&log);
@@ -849,14 +1010,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
- kick_state_string(pollset->root_worker->kick_state),
+ kick_state_string(pollset->root_worker->state),
pollset->root_worker->next,
- kick_state_string(pollset->root_worker->next->kick_state));
+ kick_state_string(pollset->root_worker->next->state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
- kick_state_string(specific_worker->kick_state));
+ kick_state_string(specific_worker->state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
@@ -864,49 +1025,56 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_log(GPR_ERROR, "%s", tmp);
gpr_free(tmp);
}
+
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
}
- return GRPC_ERROR_NONE;
+ goto done;
}
grpc_pollset_worker *next_worker = root_worker->next;
- if (root_worker->kick_state == KICKED) {
+ if (root_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
- return GRPC_ERROR_NONE;
- } else if (next_worker->kick_state == KICKED) {
+ goto done;
+ } else if (next_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
- return GRPC_ERROR_NONE;
+ goto done;
} else if (root_worker ==
next_worker && // only try and wake up a poller if
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
- return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
- } else if (next_worker->kick_state == UNKICKED) {
+ ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
+ goto done;
+ } else if (next_worker->state == UNKICKED) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
- return GRPC_ERROR_NONE;
- } else if (next_worker->kick_state == DESIGNATED_POLLER) {
- if (root_worker->kick_state != DESIGNATED_POLLER) {
+ goto done;
+ } else if (next_worker->state == DESIGNATED_POLLER) {
+ if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
@@ -915,61 +1083,78 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv);
}
- return GRPC_ERROR_NONE;
+ goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
SET_KICK_STATE(next_worker, KICKED);
- return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
+ ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
+ goto done;
}
} else {
- GPR_ASSERT(next_worker->kick_state == KICKED);
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+ GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
- return GRPC_ERROR_NONE;
+ goto done;
}
} else {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
- return GRPC_ERROR_NONE;
+ goto done;
}
- } else if (specific_worker->kick_state == KICKED) {
+
+ GPR_UNREACHABLE_CODE(goto done);
+ }
+
+ if (specific_worker->state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
- return GRPC_ERROR_NONE;
+ goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
- return GRPC_ERROR_NONE;
+ goto done;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
- return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
+ ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
+ goto done;
} else if (specific_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
- return GRPC_ERROR_NONE;
+ goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
- return GRPC_ERROR_NONE;
+ goto done;
}
+done:
+ GPR_TIMER_END("pollset_kick", 0);
+ return ret_err;
}
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@@ -1013,67 +1198,60 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
- close(g_epfd);
+ epoll_set_shutdown();
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
+ * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
+ * support is available */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
- if (!explicit_request) {
- return NULL;
- }
-
if (!grpc_has_wakeup_fd()) {
return NULL;
}
- g_epfd = epoll_create1(EPOLL_CLOEXEC);
- if (g_epfd < 0) {
- gpr_log(GPR_ERROR, "epoll unavailable");
+ if (!epoll_set_init()) {
return NULL;
}
fd_global_init();
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- close(g_epfd);
fd_global_shutdown();
+ epoll_set_shutdown();
return NULL;
}
- gpr_log(GPR_ERROR, "grpc epoll fd: %d", g_epfd);
-
return &vtable;
}
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
deleted file mode 100644
index f2f3e15704..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ /dev/null
@@ -1,1957 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-#include "src/core/lib/support/env.h"
-
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/* The maximum number of polling threads per polling island. By default no
- limit */
-static int g_max_pollers_per_pi = INT_MAX;
-
-static int grpc_wakeup_signal = -1;
-static bool is_grpc_wakeup_signal_initialized = false;
-
-/* Implements the function defined in grpc_posix.h. This function might be
- * called before even calling grpc_init() to set either a different signal to
- * use. If signum == -1, then the use of signals is disabled */
-static void grpc_use_signal(int signum) {
- grpc_wakeup_signal = signum;
- is_grpc_wakeup_signal_initialized = true;
-
- if (grpc_wakeup_signal < 0) {
- gpr_log(GPR_INFO,
- "Use of signals is disabled. Epoll engine will not be used");
- } else {
- gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
- grpc_wakeup_signal);
- }
-}
-
-struct polling_island;
-
-typedef enum {
- POLL_OBJ_FD,
- POLL_OBJ_POLLSET,
- POLL_OBJ_POLLSET_SET
-} poll_obj_type;
-
-typedef struct poll_obj {
-#ifndef NDEBUG
- poll_obj_type obj_type;
-#endif
- gpr_mu mu;
- struct polling_island *pi;
-} poll_obj;
-
-static const char *poll_obj_string(poll_obj_type po_type) {
- switch (po_type) {
- case POLL_OBJ_FD:
- return "fd";
- case POLL_OBJ_POLLSET:
- return "pollset";
- case POLL_OBJ_POLLSET_SET:
- return "pollset_set";
- }
-
- GPR_UNREACHABLE_CODE(return "UNKNOWN");
-}
-
-/*******************************************************************************
- * Fd Declarations
- */
-
-#define FD_FROM_PO(po) ((grpc_fd *)(po))
-
-struct grpc_fd {
- poll_obj po;
-
- int fd;
- /* refst format:
- bit 0 : 1=Active / 0=Orphaned
- bits 1-n : refcount
- Ref/Unref by two to avoid altering the orphaned bit */
- gpr_atm refst;
-
- /* The fd is either closed or we relinquished control of it. In either
- cases, this indicates that the 'fd' on this structure is no longer
- valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- /* The pollset that last noticed that the fd is readable. The actual type
- * stored in this is (grpc_pollset *) */
- gpr_atm read_notifier_pollset;
-
- grpc_iomgr_object iomgr_object;
-};
-
-/* Reference counting for fds */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
-#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
-#endif
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * Polling island Declarations
- */
-
-#ifndef NDEBUG
-
-#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(exec_ctx, p, r) \
- pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct worker_node {
- struct worker_node *next;
- struct worker_node *prev;
-} worker_node;
-
-/* This is also used as grpc_workqueue (by directly casing it) */
-typedef struct polling_island {
- gpr_mu mu;
- /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
- the refcount.
- Once the ref count becomes zero, this structure is destroyed which means
- we should ensure that there is never a scenario where a PI_ADD_REF() is
- racing with a PI_UNREF() that just made the ref_count zero. */
- gpr_atm ref_count;
-
- /* Pointer to the polling_island this merged into.
- * merged_to value is only set once in polling_island's lifetime (and that too
- * only if the island is merged with another island). Because of this, we can
- * use gpr_atm type here so that we can do atomic access on this and reduce
- * lock contention on 'mu' mutex.
- *
- * Note that if this field is not NULL (i.e not 0), all the remaining fields
- * (except mu and ref_count) are invalid and must be ignored. */
- gpr_atm merged_to;
-
- /* Number of threads currently polling on this island */
- gpr_atm poller_count;
-
- /* The list of workers waiting to do polling on this polling island */
- gpr_mu worker_list_mu;
- worker_node worker_list_head;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-
- /* The file descriptors in the epoll set */
- size_t fd_cnt;
- size_t fd_capacity;
- grpc_fd **fds;
-} polling_island;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-#define WORKER_FROM_WORKER_LIST_NODE(p) \
- (struct grpc_pollset_worker *)(((char *)(p)) - \
- offsetof(grpc_pollset_worker, pi_list_link))
-struct grpc_pollset_worker {
- /* Thread id of this worker */
- pthread_t pt_id;
-
- /* Used to prevent a worker from getting kicked multiple times */
- gpr_atm is_kicked;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-
- /* Indicates if it is this worker's turn to do epoll */
- gpr_atm is_polling_turn;
-
- /* Node in the polling island's worker list. */
- worker_node pi_list_link;
-};
-
-struct grpc_pollset {
- poll_obj po;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- poll_obj po;
-};
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * Polling island Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in a Polling island. This
- is useful in the polling island merge operation where we need to wakeup all
- the threads currently polling the smaller polling island (so that they can
- start polling the new/merged polling island)
-
- NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
- threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd polling_island_wakeup_fd;
-
-/* The polling island being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread polling_island *g_current_thread_polling_island;
-
-/* Forward declaration */
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void pi_add_ref(polling_island *pi);
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifndef NDEBUG
-static void pi_add_ref_dbg(polling_island *pi, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, old_cnt + 1, reason, file, line);
- }
- pi_add_ref(pi);
-}
-
-static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, (old_cnt - 1), reason, file, line);
- }
- pi_unref(exec_ctx, pi);
-}
-#endif
-
-static void pi_add_ref(polling_island *pi) {
- gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
-}
-
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- /* If ref count went to zero, delete the polling island.
- Note that this deletion not be done under a lock. Once the ref count goes
- to zero, we are guaranteed that no one else holds a reference to the
- polling island (and that there is no racing pi_add_ref() call either).
-
- Also, if we are deleting the polling island and the merged_to field is
- non-empty, we should remove a ref to the merged_to polling island
- */
- if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(exec_ctx, pi);
- if (next != NULL) {
- PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
- }
- }
-}
-
-static void worker_node_init(worker_node *node) {
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void push_back_worker_node(worker_node *head, worker_node *node) {
- node->next = head;
- node->prev = head->prev;
- head->prev->next = node;
- head->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void remove_worker_node(worker_node *node) {
- node->next->prev = node->prev;
- node->prev->next = node->next;
- /* If node's next and prev point to itself, the node is considered detached
- * from the list*/
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static worker_node *pop_front_worker_node(worker_node *head) {
- worker_node *node = head->next;
- if (node != head) {
- remove_worker_node(node);
- } else {
- node = NULL;
- }
-
- return node;
-}
-
-/* Returns true if the node's next and prev are pointing to itself (which
- indicates that the node is not in the list */
-static bool is_worker_node_detached(worker_node *node) {
- return (node->next == node->prev && node->next == node);
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function
- */
-static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
- size_t fd_count, bool add_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "polling_island_add_fds";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- for (i = 0; i < fd_count; i++) {
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fds[i];
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
-
- if (err < 0) {
- if (errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- continue;
- }
-
- if (pi->fd_cnt == pi->fd_capacity) {
- pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
- }
-
- pi->fds[pi->fd_cnt++] = fds[i];
- if (add_fd_refs) {
- GRPC_FD_REF(fds[i], "polling_island");
- }
- }
-}
-
-/* The caller is expected to hold pi->mu before calling this */
-static void polling_island_add_wakeup_fd_locked(polling_island *pi,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "polling_island_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_all_fds_locked(polling_island *pi,
- bool remove_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fds";
-
- for (i = 0; i < pi->fd_cnt; i++) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- if (remove_fd_refs) {
- GRPC_FD_UNREF(pi->fds[i], "polling_island");
- }
- }
-
- pi->fd_cnt = 0;
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
- bool is_fd_closed,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-
- for (i = 0; i < pi->fd_cnt; i++) {
- if (pi->fds[i] == fd) {
- pi->fds[i] = pi->fds[--pi->fd_cnt];
- GRPC_FD_UNREF(fd, "polling_island");
- break;
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
- grpc_fd *initial_fd,
- grpc_error **error) {
- polling_island *pi = NULL;
- const char *err_desc = "polling_island_create";
-
- *error = GRPC_ERROR_NONE;
-
- pi = gpr_malloc(sizeof(*pi));
- gpr_mu_init(&pi->mu);
- pi->fd_cnt = 0;
- pi->fd_capacity = 0;
- pi->fds = NULL;
- pi->epoll_fd = -1;
-
- gpr_atm_rel_store(&pi->ref_count, 0);
- gpr_atm_rel_store(&pi->poller_count, 0);
- gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
-
- gpr_mu_init(&pi->worker_list_mu);
- worker_node_init(&pi->worker_list_head);
-
- pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (pi->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
- if (initial_fd != NULL) {
- polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- polling_island_delete(exec_ctx, pi);
- pi = NULL;
- }
- return pi;
-}
-
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- GPR_ASSERT(pi->fd_cnt == 0);
-
- if (pi->epoll_fd >= 0) {
- close(pi->epoll_fd);
- }
- gpr_mu_destroy(&pi->mu);
- gpr_mu_destroy(&pi->worker_list_mu);
- GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
-
- gpr_free(pi->fds);
- gpr_free(pi);
-}
-
-/* Attempts to gets the last polling island in the linked list (liked by the
- * 'merged_to' field). Since this does not lock the polling island, there are no
- * guarantees that the island returned is the last island */
-static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- while (next != NULL) {
- pi = next;
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling island i.e the last polling island in
- the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
- returned polling island's mu.
- Usage: To lock/unlock polling island "pi", do the following:
- polling_island *pi_latest = polling_island_lock(pi);
- ...
- ... critical section ..
- ...
- gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
-static polling_island *polling_island_lock(polling_island *pi) {
- polling_island *next = NULL;
-
- while (true) {
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* Looks like 'pi' is the last node in the linked list but unless we check
- this by holding the pi->mu lock, we cannot be sure (i.e without the
- pi->mu lock, we don't prevent island merges).
- To be absolutely sure, check once more by holding the pi->mu lock */
- gpr_mu_lock(&pi->mu);
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* pi is infact the last node and we have the pi->mu lock. we're done */
- break;
- }
-
- /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
- * isn't the lock we are interested in. Continue traversing the list */
- gpr_mu_unlock(&pi->mu);
- }
-
- pi = next;
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling islands in the linked lists pointed by
- *p and *q (and also updates *p and *q to point to the latest polling islands)
-
- This function is needed because calling the following block of code to obtain
- locks on polling islands (*p and *q) is prone to deadlocks.
- {
- polling_island_lock(*p, true);
- polling_island_lock(*q, true);
- }
-
- Usage/example:
- polling_island *p1;
- polling_island *p2;
- ..
- polling_island_lock_pair(&p1, &p2);
- ..
- .. Critical section with both p1 and p2 locked
- ..
- // Release locks: Always call polling_island_unlock_pair() to release locks
- polling_island_unlock_pair(p1, p2);
-*/
-static void polling_island_lock_pair(polling_island **p, polling_island **q) {
- polling_island *pi_1 = *p;
- polling_island *pi_2 = *q;
- polling_island *next_1 = NULL;
- polling_island *next_2 = NULL;
-
- /* The algorithm is simple:
- - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
- keep updating pi_1 and pi_2)
- - Then obtain locks on the islands by following a lock order rule of
- locking polling_island with lower address first
- Special case: Before obtaining the locks, check if pi_1 and pi_2 are
- pointing to the same island. If that is the case, we can just call
- polling_island_lock()
- - After obtaining both the locks, double check that the polling islands
- are still the last polling islands in their respective linked lists
- (this is because there might have been polling island merges before
- we got the lock)
- - If the polling islands are the last islands, we are done. If not,
- release the locks and continue the process from the first step */
- while (true) {
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- while (next_1 != NULL) {
- pi_1 = next_1;
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- }
-
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- while (next_2 != NULL) {
- pi_2 = next_2;
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- }
-
- if (pi_1 == pi_2) {
- pi_1 = pi_2 = polling_island_lock(pi_1);
- break;
- }
-
- if (pi_1 < pi_2) {
- gpr_mu_lock(&pi_1->mu);
- gpr_mu_lock(&pi_2->mu);
- } else {
- gpr_mu_lock(&pi_2->mu);
- gpr_mu_lock(&pi_1->mu);
- }
-
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- if (next_1 == NULL && next_2 == NULL) {
- break;
- }
-
- gpr_mu_unlock(&pi_1->mu);
- gpr_mu_unlock(&pi_2->mu);
- }
-
- *p = pi_1;
- *q = pi_2;
-}
-
-static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
- if (p == q) {
- gpr_mu_unlock(&p->mu);
- } else {
- gpr_mu_unlock(&p->mu);
- gpr_mu_unlock(&q->mu);
- }
-}
-
-static polling_island *polling_island_merge(polling_island *p,
- polling_island *q,
- grpc_error **error) {
- /* Get locks on both the polling islands */
- polling_island_lock_pair(&p, &q);
-
- if (p != q) {
- /* Make sure that p points to the polling island with fewer fds than q */
- if (p->fd_cnt > q->fd_cnt) {
- GPR_SWAP(polling_island *, p, q);
- }
-
- /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
- Note that the refcounts on the fds being moved will not change here.
- This is why the last param in the following two functions is 'false') */
- polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
- polling_island_remove_all_fds_locked(p, false, error);
-
- /* Wakeup all the pollers (if any) on p so that they pickup this change */
- polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
-
- /* Add the 'merged_to' link from p --> q */
- gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
- PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
- }
- /* else if p == q, nothing needs to be done */
-
- polling_island_unlock_pair(p, q);
-
- /* Return the merged polling island (Note that no merge would have happened
- if p == q which is ok) */
- return q;
-}
-
-static grpc_error *polling_island_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
- }
-
- return error;
-}
-
-static void polling_island_global_shutdown() {
- grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-#ifndef NDEBUG
-#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
- }
-#else
-#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
-#endif
- GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
-}
-
-#ifndef NDEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
- }
-#else
-static void unref_by(grpc_fd *fd, int n) {
-#endif
- gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
- if (old == n) {
- /* Add the fd to the freelist */
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
- } else {
- GPR_ASSERT(old > n);
- }
-}
-
-/* Increment refcount by two to avoid changing the orphan bit */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- ref_by(fd, 2, reason, file, line);
-}
-
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- unref_by(fd, 2, reason, file, line);
-}
-#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
-#endif
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->po.mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
-
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->po.mu);
- }
-
- /* Note: It is not really needed to get the new_fd->po.mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->po.mu);
- new_fd->po.pi = NULL;
-#ifndef NDEBUG
- new_fd->po.obj_type = POLL_OBJ_FD;
-#endif
-
- gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
- gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->po.mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
- }
-#endif
- gpr_free(fd_name);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->po.mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->po.mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *unref_pi = NULL;
-
- gpr_mu_lock(&fd->po.mu);
- fd->on_done_closure = on_done;
-
- /* Remove the active status but keep referenced. We want this grpc_fd struct
- to be alive (and not added to freelist) until the end of this function */
- REF_BY(fd, 1, reason);
-
- /* Remove the fd from the polling island:
- - Get a lock on the latest polling island (i.e the last island in the
- linked list pointed by fd->po.pi). This is the island that
- would actually contain the fd
- - Remove the fd from the latest polling island
- - Unlock the latest polling island
- - Set fd->po.pi to NULL (but remove the ref on the polling island
- before doing this.) */
- if (fd->po.pi != NULL) {
- polling_island *pi_latest = polling_island_lock(fd->po.pi);
- polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error);
- gpr_mu_unlock(&pi_latest->mu);
-
- unref_pi = fd->po.pi;
- fd->po.pi = NULL;
- }
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else {
- close(fd->fd);
- }
-
- fd->orphaned = true;
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->po.mu);
- UNREF_BY(fd, 2, reason); /* Drop the reference */
- if (unref_pi != NULL) {
- /* Unref stale polling island here, outside the fd lock above.
- The polling island owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-static __thread bool g_initialized_sigmask;
-static __thread sigset_t g_orig_sigmask;
-static __thread sigset_t g_wakeup_sig_set;
-
-static void sig_handler(int sig_num) {
-#ifdef GRPC_EPOLL_DEBUG
- gpr_log(GPR_INFO, "Received signal %d", sig_num);
-#endif
-}
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->pt_id = pthread_self();
- worker->next = worker->prev = NULL;
- gpr_atm_no_barrier_store(&worker->is_kicked, (gpr_atm)0);
- gpr_atm_no_barrier_store(&worker->is_polling_turn, (gpr_atm)0);
- worker_node_init(&worker->pi_list_link);
-}
-
-static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- poller_kick_init();
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *worker_kick(grpc_pollset_worker *worker,
- gpr_atm *is_kicked) {
- grpc_error *err = GRPC_ERROR_NONE;
-
- /* Kick the worker only if it was not already kicked */
- if (gpr_atm_no_barrier_cas(is_kicked, (gpr_atm)0, (gpr_atm)1)) {
- GRPC_POLLING_TRACE(
- "pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
- (void *)worker, (long int)worker->pt_id);
- int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
- if (err_num != 0) {
- err = GRPC_OS_ERROR(err_num, "pthread_kill");
- }
- }
- return err;
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_kicked);
-}
-
-static grpc_error *poller_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_polling_turn);
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->po.mu);
- *mu = &pollset->po.mu;
- pollset->po.pi = NULL;
-#ifndef NDEBUG
- pollset->po.obj_type = POLL_OBJ_POLLSET;
-#endif
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-/* Convert millis to timespec (clock-type is assumed to be GPR_TIMESPAN) */
-static struct timespec millis_to_timespec(int millis) {
- struct timespec linux_ts;
- gpr_timespec gpr_ts;
-
- if (millis == -1) {
- gpr_ts = gpr_inf_future(GPR_TIMESPAN);
- } else {
- gpr_ts = gpr_time_from_millis(millis, GPR_TIMESPAN);
- }
-
- linux_ts.tv_sec = (time_t)gpr_ts.tv_sec;
- linux_ts.tv_nsec = gpr_ts.tv_nsec;
- return linux_ts;
-}
-
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
- return millis >= 1 ? millis : 1;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-
- /* Note, it is possible that fd_become_readable might be called twice with
- different 'notifier's when an fd becomes readable and it is in two epoll
- sets (This can happen briefly during polling island merges). In such cases
- it does not really matter which notifer is set as the read_notifier_pollset
- (They would both point to the same polling island anyway) */
- /* Use release store to match with acquire load in fd_get_read_notifier */
- gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
- if (ps->po.pi != NULL) {
- PI_UNREF(exec_ctx, ps->po.pi, reason);
- }
- ps->po.pi = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->po.pi to NULL */
- pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying polling island. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->po.mu);
-}
-
-/* NOTE: This function may modify 'now' */
-static bool acquire_polling_lease(grpc_pollset_worker *worker,
- polling_island *pi, gpr_timespec deadline,
- gpr_timespec *now) {
- bool is_lease_acquired = false;
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- long num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
-
- if (num_pollers >= g_max_pollers_per_pi) {
- push_back_worker_node(&pi->worker_list_head, &worker->pi_list_link);
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
-
- bool is_timeout = false;
- int ret;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, *now);
- if (timeout_ms == -1) {
- ret = sigwaitinfo(&g_wakeup_sig_set, NULL);
- } else {
- struct timespec sigwait_timeout = millis_to_timespec(timeout_ms);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- ret = sigtimedwait(&g_wakeup_sig_set, NULL, &sigwait_timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- }
-
- if (ret == -1) {
- if (errno == EAGAIN) {
- is_timeout = true;
- } else {
- /* NOTE: This should not happen. If we see these log messages, it means
- we are most likely doing something incorrect in the setup * needed
- for sigwaitinfo/sigtimedwait */
- gpr_log(GPR_ERROR,
- "sigtimedwait failed with retcode: %d (timeout_ms: %d)", errno,
- timeout_ms);
- }
- }
-
- /* Did the worker come out of sigtimedwait due to a thread that just
- exited epoll and kicking it (in release_polling_lease function). */
- bool is_polling_turn = gpr_atm_acq_load(&worker->is_polling_turn);
-
- /* Did the worker come out of sigtimedwait due to a thread alerting it that
- some completion event was (likely) available in the completion queue */
- bool is_kicked = gpr_atm_no_barrier_load(&worker->is_kicked);
-
- if (is_kicked || is_timeout) {
- *now = deadline; /* Essentially make the epoll timeout = 0 */
- } else if (is_polling_turn) {
- *now = gpr_now(GPR_CLOCK_MONOTONIC); /* Reduce the epoll timeout */
- }
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- /* The node might have already been removed from the list by the poller
- that kicked this. However it is safe to call 'remove_worker_node' on
- an already detached node */
- remove_worker_node(&worker->pi_list_link);
- /* It is important to read the num_pollers again under the lock so that we
- * have the latest num_pollers value that doesn't change while we are doing
- * the "(num_pollers < g_max_pollers_per_pi)" a a few lines below */
- num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- }
-
- if (num_pollers < g_max_pollers_per_pi) {
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
- is_lease_acquired = true;
- }
-
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
- return is_lease_acquired;
-}
-
-static void release_polling_lease(polling_island *pi, grpc_error **error) {
- gpr_mu_lock(&pi->worker_list_mu);
-
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
- worker_node *node = pop_front_worker_node(&pi->worker_list_head);
- if (node != NULL) {
- grpc_pollset_worker *next_worker = WORKER_FROM_WORKER_LIST_NODE(node);
- append_error(error, poller_kick(next_worker), "poller kick error");
- }
-
- gpr_mu_unlock(&pi->worker_list_mu);
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
- grpc_pollset *pollset, polling_island *pi,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- /* Only g_max_pollers_per_pi threads can be doing polling in parallel.
- If we cannot get a lease, we cannot continue to do epoll_pwait() */
- if (!acquire_polling_lease(worker, pi, deadline, &now)) {
- return;
- }
-
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "pollset_work_and_unlock";
-
- /* timeout_ms is the time between 'now' and 'deadline' */
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- ep_rv =
- epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- /* Give back the lease right away so that some other thread can enter */
- release_polling_lease(pi, error);
-
- if (ep_rv < 0) {
- if (errno != EINTR) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- } else {
- /* We were interrupted. Save an interation by doing a zero timeout
- epoll_wait to see if there are any other events of interest */
- GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
- (void *)pollset, (void *)worker);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
- }
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &polling_island_wakeup_fd) {
- GRPC_POLLING_TRACE(
- "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
- "%d) got merged",
- (void *)pollset, (void *)worker, epoll_fd);
- /* This means that our polling island is merged with a different
- island. We do not have to do anything here since the subsequent call
- to the function pollset_work_and_unlock() will pick up the correct
- epoll_fd */
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- int epoll_fd = -1;
- polling_island *pi = NULL;
- GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
-
- /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
- latest polling island pointed by pollset->po.pi
-
- Since epoll_fd is immutable, it is safe to read it without a lock on the
- polling island. There is however a possibility that the polling island from
- which we got the epoll_fd, got merged with another island in the meantime.
- This is okay because in such a case, we will wakeup right-away from
- epoll_pwait() (because any merge will poison the old polling island's epoll
- set 'polling_island_wakeup_fd') and then pick up the latest polling_island
- the next time this function - pollset_work_and_unlock()) is called */
-
- if (pollset->po.pi == NULL) {
- pollset->po.pi = polling_island_create(exec_ctx, NULL, error);
- if (pollset->po.pi == NULL) {
- GPR_TIMER_END("pollset_work_and_unlock", 0);
- return; /* Fatal error. Cannot continue */
- }
-
- PI_ADD_REF(pollset->po.pi, "ps");
- GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
- (void *)pollset, (void *)pollset->po.pi);
- }
-
- pi = polling_island_maybe_get_latest(pollset->po.pi);
- epoll_fd = pi->epoll_fd;
-
- /* Update the pollset->po.pi since the island being pointed by
- pollset->po.pi maybe older than the one pointed by pi) */
- if (pollset->po.pi != pi) {
- /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
- polling island to be deleted */
- PI_ADD_REF(pi, "ps");
- PI_UNREF(exec_ctx, pollset->po.pi, "ps");
- pollset->po.pi = pi;
- }
-
- /* Add an extra ref so that the island does not get destroyed (which means
- the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
- epoll_fd */
- PI_ADD_REF(pi, "ps_work");
- gpr_mu_unlock(&pollset->po.mu);
-
- g_current_thread_polling_island = pi;
- pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
- sig_mask, error);
- g_current_thread_polling_island = NULL;
-
- GPR_ASSERT(pi != NULL);
-
- /* Before leaving, release the extra ref we added to the polling island. It
- is important to use "pi" here (i.e our old copy of pollset->po.pi
- that we got before releasing the polling island lock). This is because
- pollset->po.pi pointer might get udpated in other parts of the
- code when there is an island merge while we are doing epoll_wait() above */
- PI_UNREF(exec_ctx, pi, "ps_work");
-
- GPR_TIMER_END("pollset_work_and_unlock", 0);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->po.mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
- (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
- worker that there is some pending work that needs immediate attention
- (like an event on the completion queue, or a polling island merge that
- results in a new epoll-fd to wait on) and that the worker should not
- spend time waiting in epoll_pwait().
-
- A worker can be kicked anytime from the point it is added to the pollset
- via push_front_worker() (or push_back_worker()) to the point it is
- removed via remove_worker().
- If the worker is kicked before/during it calls epoll_pwait(), it should
- immediately exit from epoll_wait(). If the worker is kicked after it
- returns from epoll_wait(), then nothing really needs to be done.
-
- To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
- times *except* when it is in epoll_pwait(). This way, the worker never
- misses acting on a kick */
-
- if (!g_initialized_sigmask) {
- sigemptyset(&g_wakeup_sig_set);
- sigaddset(&g_wakeup_sig_set, grpc_wakeup_signal);
- pthread_sigmask(SIG_BLOCK, &g_wakeup_sig_set, &g_orig_sigmask);
- sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
- g_initialized_sigmask = true;
- /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'.
- This is the mask used at all times *except during
- epoll_wait()*"
- g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
- this is the mask to use *during epoll_wait()*
-
- The new_mask is set on the worker before it is added to the pollset
- (i.e before it can be kicked) */
- }
-
- push_front_worker(pollset, &worker); /* Add worker to pollset */
-
- pollset_work_and_unlock(exec_ctx, pollset, &worker, now, deadline,
- &g_orig_sigmask, &error);
- grpc_exec_ctx_flush(exec_ctx);
-
- gpr_mu_lock(&pollset->po.mu);
-
- /* Note: There is no need to reset worker.is_kicked to 0 since we are no
- longer going to use this worker */
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->po.mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->po.mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
- poll_obj_type bag_type, poll_obj *item,
- poll_obj_type item_type) {
- GPR_TIMER_BEGIN("add_poll_object", 0);
-
-#ifndef NDEBUG
- GPR_ASSERT(item->obj_type == item_type);
- GPR_ASSERT(bag->obj_type == bag_type);
-#endif
-
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *pi_new = NULL;
-
- gpr_mu_lock(&bag->mu);
- gpr_mu_lock(&item->mu);
-
-retry:
- /*
- * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing
- * 2) If item->pi and bag->pi are both NULL, create a new polling island (with
- * a refcount of 2) and point item->pi and bag->pi to the new island
- * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to
- * the other's non-NULL pi
- * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the
- * polling islands and update item->pi and bag->pi to point to the new
- * island
- */
-
- /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already
- * orphaned */
- if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) {
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
- return;
- }
-
- if (item->pi == bag->pi) {
- pi_new = item->pi;
- if (pi_new == NULL) {
- /* GPR_ASSERT(item->pi == bag->pi == NULL) */
-
- /* If we are adding an fd to a bag (i.e pollset or pollset_set), then
- * we need to do some extra work to make TSAN happy */
- if (item_type == POLL_OBJ_FD) {
- /* Unlock before creating a new polling island: the polling island will
- create a workqueue which creates a file descriptor, and holding an fd
- lock here can eventually cause a loop to appear to TSAN (making it
- unhappy). We don't think it's a real loop (there's an epoch point
- where that loop possibility disappears), but the advantages of
- keeping TSAN happy outweigh any performance advantage we might have
- by keeping the lock held. */
- gpr_mu_unlock(&item->mu);
- pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
- gpr_mu_lock(&item->mu);
-
- /* Need to reverify any assumptions made between the initial lock and
- getting to this branch: if they've changed, we need to throw away our
- work and figure things out again. */
- if (item->pi != NULL) {
- GRPC_POLLING_TRACE(
- "add_poll_object: Raced creating new polling island. pi_new: %p "
- "(fd: %d, %s: %p)",
- (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
- (void *)bag);
- /* No need to lock 'pi_new' here since this is a new polling island
- and no one has a reference to it yet */
- polling_island_remove_all_fds_locked(pi_new, true, &error);
-
- /* Ref and unref so that the polling island gets deleted during unref
- */
- PI_ADD_REF(pi_new, "dance_of_destruction");
- PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
- goto retry;
- }
- } else {
- pi_new = polling_island_create(exec_ctx, NULL, &error);
- }
-
- GRPC_POLLING_TRACE(
- "add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
- "%s: %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- GRPC_POLLING_TRACE(
- "add_poll_object: Same polling island. pi: %p (%s, %s)",
- (void *)pi_new, poll_obj_string(item_type),
- poll_obj_string(bag_type));
- }
- } else if (item->pi == NULL) {
- /* GPR_ASSERT(bag->pi != NULL) */
- /* Make pi_new point to latest pi*/
- pi_new = polling_island_lock(bag->pi);
-
- if (item_type == POLL_OBJ_FD) {
- grpc_fd *fd = FD_FROM_PO(item);
- polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
- }
-
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else if (bag->pi == NULL) {
- /* GPR_ASSERT(item->pi != NULL) */
- /* Make pi_new to point to latest pi */
- pi_new = polling_island_lock(item->pi);
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- pi_new = polling_island_merge(item->pi, bag->pi, &error);
- GRPC_POLLING_TRACE(
- "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- }
-
- /* At this point, pi_new is the polling island that both item->pi and bag->pi
- MUST be pointing to */
-
- if (item->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(item_type));
- if (item->pi != NULL) {
- PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
- }
- item->pi = pi_new;
- }
-
- if (bag->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(bag_type));
- if (bag->pi != NULL) {
- PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
- }
- bag->pi = pi_new;
- }
-
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
-
- GRPC_LOG_IF_ERROR("add_poll_object", error);
- GPR_TIMER_END("add_poll_object", 0);
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
- POLL_OBJ_FD);
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
- gpr_mu_init(&pss->po.mu);
- pss->po.pi = NULL;
-#ifndef NDEBUG
- pss->po.obj_type = POLL_OBJ_POLLSET_SET;
-#endif
- return pss;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- gpr_mu_destroy(&pss->po.mu);
-
- if (pss->po.pi != NULL) {
- PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
- }
-
- gpr_free(pss);
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
- POLL_OBJ_FD);
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
- POLL_OBJ_POLLSET);
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
- POLL_OBJ_POLLSET_SET);
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- fd_global_shutdown();
- pollset_global_shutdown();
- polling_island_global_shutdown();
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-/* This is mainly for testing purposes. Checks to see if environment variable
- * GRPC_MAX_POLLERS_PER_PI is set and if so, assigns that value to
- * g_max_pollers_per_pi (any negative value is considered INT_MAX) */
-static void set_max_pollers_per_island() {
- char *s = gpr_getenv("GRPC_MAX_POLLERS_PER_PI");
- if (s) {
- g_max_pollers_per_pi = (int)strtol(s, NULL, 10);
- if (g_max_pollers_per_pi < 0) {
- g_max_pollers_per_pi = INT_MAX;
- }
- } else {
- g_max_pollers_per_pi = INT_MAX;
- }
-
- gpr_log(GPR_INFO, "Max number of pollers per polling island: %d",
- g_max_pollers_per_pi);
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- if (!explicitly_requested) {
- return NULL;
- }
-
- /* If use of signals is disabled, we cannot use epoll engine*/
- if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
- return NULL;
- }
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- if (!is_grpc_wakeup_signal_initialized) {
- grpc_use_signal(SIGRTMIN + 6);
- }
-
- set_max_pollers_per_island();
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
- polling_island_global_init())) {
- return NULL;
- }
-
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
deleted file mode 100644
index 1d6af5f52c..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested);
-
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
deleted file mode 100644
index 07c8eadf4f..0000000000
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
+++ /dev/null
@@ -1,1182 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-
-/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-struct epoll_set;
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/*******************************************************************************
- * Fd Declarations
- */
-struct grpc_fd {
- gpr_mu mu;
- struct epoll_set *eps;
-
- int fd;
-
- /* The fd is either closed or we relinquished control of it. In either cases,
- this indicates that the 'fd' on this structure is no longer valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- grpc_iomgr_object iomgr_object;
-};
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * epoll set Declarations
- */
-
-#ifndef NDEBUG
-
-#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define EPS_UNREF(exec_ctx, p, r) \
- eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define EPS_ADD_REF(p, r) eps_add_ref((p))
-#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct epoll_set {
- /* Mutex poller should acquire to poll this. This enforces that only one
- * poller can be polling on epoll_set at any time */
- gpr_mu mu;
-
- /* Ref count. Use EPS_ADD_REF() and EPS_UNREF() macros to increment/decrement
- the refcount. Once the ref count becomes zero, this structure is destroyed
- which means we should ensure that there is never a scenario where a
- EPS_ADD_REF() is racing with a EPS_UNREF() that just made the ref_count
- zero. */
- gpr_atm ref_count;
-
- /* Number of threads currently polling on this epoll set*/
- gpr_atm poller_count;
-
- /* Is the epoll set shutdown */
- gpr_atm is_shutdown;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-} epoll_set;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-struct grpc_pollset_worker {
- gpr_cv kick_cv;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-};
-
-struct grpc_pollset {
- gpr_mu mu;
- struct epoll_set *eps;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- char unused;
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Declarations
- */
-
-size_t g_num_eps = 1;
-struct epoll_set **g_epoll_sets = NULL;
-gpr_atm g_next_eps;
-size_t g_num_threads_per_eps = 1;
-gpr_thd_id *g_poller_threads = NULL;
-
-/* Used as read-notifier pollsets for fds. We won't be using read notifier
- * pollsets with this polling engine. So it does not matter what pollset we
- * return */
-grpc_pollset g_read_notifier;
-
-static void add_fd_to_eps(grpc_fd *fd);
-static bool init_epoll_sets();
-static void shutdown_epoll_sets();
-static void poller_thread_loop(void *arg);
-static void start_poller_threads();
-static void shutdown_poller_threads();
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * epoll set Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in an epoll_set informing
- that the epoll set is shutdown. This wakeup fd initialized to be readable
- and MUST NOT be consumed i.e the threads that woke up MUST NOT call
- grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd epoll_set_wakeup_fd;
-
-/* The epoll set being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread epoll_set *g_current_thread_epoll_set;
-
-/* Forward declaration */
-static void epoll_set_delete(epoll_set *eps);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void eps_add_ref(epoll_set *eps);
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
-
-#ifndef NDEBUG
-static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, old_cnt + 1, reason, file, line);
- }
- eps_add_ref(eps);
-}
-
-static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, (old_cnt - 1), reason, file, line);
- }
- eps_unref(exec_ctx, eps);
-}
-#endif
-
-static void eps_add_ref(epoll_set *eps) {
- gpr_atm_no_barrier_fetch_add(&eps->ref_count, 1);
-}
-
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps) {
- /* If ref count went to zero, delete the epoll set. This deletion is
- not done under a lock since once the ref count goes to zero, we are
- guaranteed that no one else holds a reference to the epoll set (and
- that there is no racing eps_add_ref() call either).*/
- if (1 == gpr_atm_full_fetch_add(&eps->ref_count, -1)) {
- epoll_set_delete(eps);
- }
-}
-
-static void epoll_set_add_fd_locked(epoll_set *eps, grpc_fd *fd,
- grpc_error **error) {
- int err;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "epoll_set_add_fd_locked";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_add_wakeup_fd_locked(epoll_set *eps,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- eps->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_remove_fd(epoll_set *eps, grpc_fd *fd, bool is_fd_closed,
- grpc_error **error) {
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static epoll_set *epoll_set_create(grpc_error **error) {
- epoll_set *eps = NULL;
- const char *err_desc = "epoll_set_create";
-
- *error = GRPC_ERROR_NONE;
-
- eps = gpr_malloc(sizeof(*eps));
- eps->epoll_fd = -1;
-
- gpr_mu_init(&eps->mu);
-
- gpr_atm_rel_store(&eps->ref_count, 0);
- gpr_atm_rel_store(&eps->poller_count, 0);
-
- gpr_atm_rel_store(&eps->is_shutdown, false);
-
- eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (eps->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- epoll_set_delete(eps);
- eps = NULL;
- }
- return eps;
-}
-
-static void epoll_set_delete(epoll_set *eps) {
- if (eps->epoll_fd >= 0) {
- close(eps->epoll_fd);
- }
-
- gpr_mu_destroy(&eps->mu);
-
- gpr_free(eps);
-}
-
-static grpc_error *epoll_set_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&epoll_set_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&epoll_set_wakeup_fd);
- }
-
- return error;
-}
-
-static void epoll_set_global_shutdown() {
- grpc_wakeup_fd_destroy(&epoll_set_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-static grpc_fd *get_fd_from_freelist() {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
- return new_fd;
-}
-
-static void add_fd_to_freelist(grpc_fd *fd) {
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
-}
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = get_fd_from_freelist();
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->mu);
- }
-
- /* Note: It is not really needed to get the new_fd->mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->mu);
- new_fd->eps = NULL;
-
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
- gpr_free(fd_name);
-
- /* Associate the fd with one of the eps */
- add_fd_to_eps(new_fd);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- bool is_fd_closed = already_closed;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *unref_eps = NULL;
-
- gpr_mu_lock(&fd->mu);
- fd->on_done_closure = on_done;
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else if (!is_fd_closed) {
- close(fd->fd);
- is_fd_closed = true;
- }
-
- fd->orphaned = true;
-
- /* Remove the fd from the epoll set */
- if (fd->eps != NULL) {
- epoll_set_remove_fd(fd->eps, fd, is_fd_closed, &error);
- unref_eps = fd->eps;
- fd->eps = NULL;
- }
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->mu);
-
- /* We are done with this fd. Release it (i.e add back to freelist) */
- add_fd_to_freelist(fd);
-
- if (unref_eps != NULL) {
- /* Unref stale epoll set here, outside the fd lock above.
- The epoll set owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- EPS_UNREF(exec_ctx, unref_eps, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-/* This polling engine doesn't really need the read notifier functionality. So
- * it just returns a dummy read notifier pollset */
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- return &g_read_notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-/* TODO: sreek - Not needed anymore */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->next = worker->prev = NULL;
- gpr_cv_init(&worker->kick_cv);
-}
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- gpr_cv_signal(&worker->kick_cv);
- return GRPC_ERROR_NONE;
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->mu);
- *mu = &pollset->mu;
- pollset->eps = NULL;
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- char *reason) {
- if (ps->eps != NULL) {
- EPS_UNREF(exec_ctx, ps->eps, reason);
- }
- ps->eps = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->eps to NULL */
- pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying epoll set. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->mu);
-}
-
-/* Blocking call */
-static void acquire_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_lock(&eps->mu);
- }
-}
-
-static void release_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_unlock(&eps->mu);
- }
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps,
- grpc_error **error) {
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "do_epoll_wait";
-
- int timeout_ms = -1;
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- acquire_epoll_lease(eps);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms);
- release_epoll_lease(eps);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- if (ep_rv < 0) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &epoll_set_wakeup_fd) {
- gpr_atm_rel_store(&eps->is_shutdown, 1);
- gpr_log(GPR_INFO, "pollset poller: shutdown set");
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- grpc_error **error) {
- int epoll_fd = -1;
- GPR_TIMER_BEGIN("epoll_set_work", 0);
-
- /* Since epoll_fd is immutable, it is safe to read it without a lock on the
- epoll set. */
- epoll_fd = eps->epoll_fd;
-
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
- g_current_thread_epoll_set = eps;
-
- do_epoll_wait(exec_ctx, epoll_fd, eps, error);
-
- g_current_thread_epoll_set = NULL;
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
-
- GPR_TIMER_END("epoll_set_work", 0);
-}
-
-/* pollset->mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- push_front_worker(pollset, &worker);
-
- gpr_cv_wait(&worker.kick_cv, &pollset->mu,
- gpr_convert_clock_type(deadline, GPR_CLOCK_REALTIME));
- /* pollset->mu locked here */
-
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-grpc_pollset_set g_dummy_pollset_set;
-static grpc_pollset_set *pollset_set_create(void) {
- return &g_dummy_pollset_set;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- shutdown_poller_threads();
- shutdown_epoll_sets();
- fd_global_shutdown();
- pollset_global_shutdown();
- epoll_set_global_shutdown();
- gpr_log(GPR_INFO, "ev-epoll-threadpool engine shutdown complete");
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Definitions
- */
-static void add_fd_to_eps(grpc_fd *fd) {
- GPR_ASSERT(fd->eps == NULL);
- GPR_TIMER_BEGIN("add_fd_to_eps", 0);
-
- grpc_error *error = GRPC_ERROR_NONE;
- size_t idx = (size_t)gpr_atm_no_barrier_fetch_add(&g_next_eps, 1) % g_num_eps;
- epoll_set *eps = g_epoll_sets[idx];
-
- gpr_mu_lock(&fd->mu);
-
- if (fd->orphaned) {
- gpr_mu_unlock(&fd->mu);
- return; /* Early out */
- }
-
- epoll_set_add_fd_locked(eps, fd, &error);
- EPS_ADD_REF(eps, "fd");
- fd->eps = eps;
-
- GRPC_POLLING_TRACE("add_fd_to_eps (fd: %d, eps idx = %" PRIdPTR ")", fd->fd,
- idx);
- gpr_mu_unlock(&fd->mu);
-
- GRPC_LOG_IF_ERROR("add_fd_to_eps", error);
- GPR_TIMER_END("add_fd_to_eps", 0);
-}
-
-static bool init_epoll_sets() {
- grpc_error *error = GRPC_ERROR_NONE;
- bool is_success = true;
-
- g_epoll_sets = (epoll_set **)malloc(g_num_eps * sizeof(epoll_set *));
-
- for (size_t i = 0; i < g_num_eps; i++) {
- g_epoll_sets[i] = epoll_set_create(&error);
- if (g_epoll_sets[i] == NULL) {
- gpr_log(GPR_ERROR, "Error in creating a epoll set");
- g_num_eps = i; /* Helps cleanup */
- shutdown_epoll_sets();
- is_success = false;
- goto done;
- }
-
- EPS_ADD_REF(g_epoll_sets[i], "init_epoll_sets");
- }
-
- gpr_atm_no_barrier_store(&g_next_eps, 0);
- gpr_mu *mu;
- pollset_init(&g_read_notifier, &mu);
-
-done:
- GRPC_LOG_IF_ERROR("init_epoll_sets", error);
- return is_success;
-}
-
-static void shutdown_epoll_sets() {
- if (!g_epoll_sets) {
- return;
- }
-
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- for (size_t i = 0; i < g_num_eps; i++) {
- EPS_UNREF(&exec_ctx, g_epoll_sets[i], "shutdown_epoll_sets");
- }
- grpc_exec_ctx_flush(&exec_ctx);
-
- gpr_free(g_epoll_sets);
- g_epoll_sets = NULL;
- pollset_destroy(&exec_ctx, &g_read_notifier);
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void poller_thread_loop(void *arg) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *eps = (epoll_set *)arg;
-
- while (!gpr_atm_acq_load(&eps->is_shutdown)) {
- epoll_set_work(&exec_ctx, eps, &error);
- grpc_exec_ctx_flush(&exec_ctx);
- }
-
- grpc_exec_ctx_finish(&exec_ctx);
- GRPC_LOG_IF_ERROR("poller_thread_loop", error);
-}
-
-/* g_epoll_sets MUST be initialized before calling this */
-static void start_poller_threads() {
- GPR_ASSERT(g_epoll_sets);
-
- gpr_log(GPR_INFO, "Starting poller threads");
-
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- g_poller_threads = (gpr_thd_id *)malloc(num_threads * sizeof(gpr_thd_id));
- gpr_thd_options options = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&options);
-
- for (size_t i = 0; i < num_threads; i++) {
- gpr_thd_new(&g_poller_threads[i], poller_thread_loop,
- (void *)g_epoll_sets[i % g_num_eps], &options);
- }
-}
-
-static void shutdown_poller_threads() {
- GPR_ASSERT(g_poller_threads);
- GPR_ASSERT(g_epoll_sets);
- grpc_error *error = GRPC_ERROR_NONE;
-
- gpr_log(GPR_INFO, "Shutting down pollers");
-
- epoll_set *eps = NULL;
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- for (size_t i = 0; i < num_threads; i++) {
- eps = g_epoll_sets[i];
- epoll_set_add_wakeup_fd_locked(eps, &epoll_set_wakeup_fd, &error);
- }
-
- for (size_t i = 0; i < g_num_eps; i++) {
- gpr_thd_join(g_poller_threads[i]);
- }
-
- GRPC_LOG_IF_ERROR("shutdown_poller_threads", error);
- gpr_free(g_poller_threads);
- g_poller_threads = NULL;
-}
-
-/****************************************************************************/
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- if (!requested_explicitly) return NULL;
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("epoll_set_global_init", epoll_set_global_init())) {
- return NULL;
- }
-
- if (!init_epoll_sets()) {
- return NULL;
- }
-
- /* TODO (sreek): Maynot be a good idea to start threads here (especially if
- * this engine doesn't get picked. Consider introducing an engine_init
- * function in the vtable */
- start_poller_threads();
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
index 770d1fd0a9..8eb4de44d9 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ b/src/core/lib/iomgr/ev_epollex_linux.c
@@ -37,6 +37,7 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/is_epollexclusive_available.h"
@@ -49,7 +50,7 @@
#include "src/core/lib/support/spinlock.h"
/*******************************************************************************
- * Pollset-set sibling link
+ * Polling object
*/
typedef enum {
@@ -141,7 +142,7 @@ static grpc_error *pollable_materialize(pollable *p);
*/
struct grpc_fd {
- pollable pollable;
+ pollable pollable_obj;
int fd;
/* refst format:
bit 0 : 1=Active / 0=Orphaned
@@ -192,15 +193,15 @@ struct grpc_pollset_worker {
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
gpr_cv cv;
grpc_pollset *pollset;
- pollable *pollable;
+ pollable *pollable_obj;
};
#define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
struct grpc_pollset {
- pollable pollable;
- pollable *current_pollable;
+ pollable pollable_obj;
+ pollable *current_pollable_obj;
int kick_alls_pending;
bool kicked_without_poller;
grpc_closure *shutdown_closure;
@@ -278,10 +279,10 @@ static void ref_by(grpc_fd *fd, int n) {
}
static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_fd *fd = arg;
+ grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
- pollable_destroy(&fd->pollable);
+ pollable_destroy(&fd->pollable_obj);
gpr_mu_destroy(&fd->orphaned_mu);
gpr_mu_lock(&fd_freelist_mu);
fd->freelist_next = fd_freelist;
@@ -339,10 +340,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
- pollable_init(&new_fd->pollable, PO_FD);
+ pollable_init(&new_fd->pollable_obj, PO_FD);
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
@@ -384,7 +385,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
bool is_fd_closed = already_closed;
grpc_error *error = GRPC_ERROR_NONE;
- gpr_mu_lock(&fd->pollable.po.mu);
+ gpr_mu_lock(&fd->pollable_obj.po.mu);
gpr_mu_lock(&fd->orphaned_mu);
fd->on_done_closure = on_done;
@@ -410,7 +411,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->orphaned_mu);
- gpr_mu_unlock(&fd->pollable.po.mu);
+ gpr_mu_unlock(&fd->pollable_obj.po.mu);
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
@@ -476,8 +477,9 @@ static grpc_error *pollable_materialize(pollable *p) {
close(new_epfd);
return err;
}
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = (void *)(1 | (intptr_t)&p->wakeup)};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = (void *)(1 | (intptr_t)&p->wakeup);
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(new_epfd);
@@ -506,9 +508,9 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
gpr_mu_unlock(&fd->orphaned_mu);
return GRPC_ERROR_NONE;
}
- struct epoll_event ev_fd = {
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE),
- .data.ptr = fd};
+ struct epoll_event ev_fd;
+ ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
+ ev_fd.data.ptr = fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
case EEXIST:
@@ -555,31 +557,35 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
- grpc_pollset *pollset = arg;
- gpr_mu_lock(&pollset->pollable.po.mu);
+ grpc_pollset *pollset = (grpc_pollset *)arg;
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_lock(&worker->pollable->po.mu);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&worker->pollable_obj->po.mu);
}
if (worker->initialized_cv && worker != pollset->root_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
+ pollset, worker, &pollset->pollable_obj,
+ worker->pollable_obj);
}
worker->kicked = true;
gpr_cv_signal(&worker->cv);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
+ pollset, worker, &pollset->pollable_obj,
+ worker->pollable_obj);
}
- append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
+ append_error(&error,
+ grpc_wakeup_fd_wakeup(&worker->pollable_obj->wakeup),
"pollset_shutdown");
}
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker->pollable_obj->po.mu);
}
worker = worker->links[PWL_POLLSET].next;
@@ -587,7 +593,7 @@ static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
}
pollset->kick_alls_pending--;
pollset_maybe_finish_shutdown(exec_ctx, pollset);
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("kick_all", error);
}
@@ -661,26 +667,27 @@ static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
}
/* p->po.mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *pollset,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- pollable *p = pollset->current_pollable;
- if (p != &pollset->pollable) {
+ pollable *p = pollset->current_pollable_obj;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ if (p != &pollset->pollable_obj) {
gpr_mu_lock(&p->po.mu);
}
grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
- if (p != &pollset->pollable) {
+ if (p != &pollset->pollable_obj) {
gpr_mu_unlock(&p->po.mu);
}
return error;
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- pollable_init(&pollset->pollable, PO_POLLSET);
- pollset->current_pollable = &g_empty_pollable;
+ pollable_init(&pollset->pollable_obj, PO_POLLSET);
+ pollset->current_pollable_obj = &g_empty_pollable;
pollset->kicked_without_poller = false;
pollset->shutdown_closure = NULL;
pollset->root_worker = NULL;
- *mu = &pollset->pollable.po.mu;
+ *mu = &pollset->pollable_obj.po.mu;
}
/* Convert a timespec to milliseconds:
@@ -702,7 +709,10 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
}
static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
+ 0, /* tv_sec */
+ GPR_NS_PER_MS - 1, /* tv_nsec */
+ GPR_TIMESPAN /* clock_type */
+ };
timeout = gpr_time_sub(deadline, now);
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
return millis >= 1 ? millis : 1;
@@ -728,8 +738,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "fd_become_pollable";
- if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) {
- append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc);
+ if (append_error(&error, pollable_materialize(&fd->pollable_obj), err_desc)) {
+ append_error(&error, pollable_add_fd(&fd->pollable_obj, fd), err_desc);
}
return error;
}
@@ -744,7 +754,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
- return p != &g_empty_pollable && p != &pollset->pollable;
+ return p != &g_empty_pollable && p != &pollset->pollable_obj;
}
static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
@@ -761,8 +771,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
- append_error(&error, grpc_wakeup_fd_consume_wakeup(
- (void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ append_error(&error,
+ grpc_wakeup_fd_consume_wakeup(
+ (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
err_desc);
} else {
grpc_fd *fd = (grpc_fd *)data_ptr;
@@ -789,9 +800,9 @@ static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- pollable_destroy(&pollset->pollable);
- if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
+ pollable_destroy(&pollset->pollable_obj);
+ if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) {
+ UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2,
"pollset_pollable");
}
GRPC_LOG_IF_ERROR("pollset_process_events",
@@ -814,6 +825,7 @@ static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
int r;
do {
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
@@ -880,68 +892,69 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
worker->initialized_cv = false;
worker->kicked = false;
worker->pollset = pollset;
- worker->pollable = pollset->current_pollable;
+ worker->pollable_obj = pollset->current_pollable_obj;
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
+ if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
+ REF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
- if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
+ if (!worker_insert(&worker->pollable_obj->root_worker, PWL_POLLABLE,
+ worker)) {
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
}
if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
+ worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
- worker->pollable, worker,
+ worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline, *now));
}
- while (do_poll && worker->pollable->root_worker != worker) {
- if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
+ while (do_poll && worker->pollable_obj->root_worker != worker) {
+ if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
- worker->pollable, worker);
+ worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
- worker);
+ gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
+ worker->pollable_obj, worker);
}
do_poll = false;
} else if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
+ worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
- worker->pollable, worker);
+ worker->pollable_obj, worker);
}
}
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
- gpr_mu_lock(&pollset->pollable.po.mu);
- gpr_mu_lock(&worker->pollable->po.mu);
+ if (worker->pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker->pollable_obj->po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
+ gpr_mu_lock(&worker->pollable_obj->po.mu);
}
*now = gpr_now(now->clock_type);
}
return do_poll && pollset->shutdown_closure == NULL &&
- pollset->current_pollable == worker->pollable;
+ pollset->current_pollable_obj == worker->pollable_obj;
}
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
if (NEW_ROOT ==
- worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
- gpr_cv_signal(&worker->pollable->root_worker->cv);
+ worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) {
+ gpr_cv_signal(&worker->pollable_obj->root_worker->cv);
}
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
+ if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) {
+ UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll");
}
if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset);
@@ -969,48 +982,48 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE;
}
- if (pollset->current_pollable != &pollset->pollable) {
- gpr_mu_lock(&pollset->current_pollable->po.mu);
+ if (pollset->current_pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&pollset->current_pollable_obj->po.mu);
}
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
- append_error(&error, pollable_materialize(worker.pollable), err_desc);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
+ append_error(&error, pollable_materialize(worker.pollable_obj), err_desc);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
if (pollset->event_cursor == pollset->event_count) {
- append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable,
+ append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj,
now, deadline),
err_desc);
}
append_error(&error, pollset_process_events(exec_ctx, pollset, false),
err_desc);
- gpr_mu_lock(&pollset->pollable.po.mu);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_lock(&worker.pollable->po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_lock(&worker.pollable_obj->po.mu);
}
gpr_tls_set(&g_current_thread_pollset, 0);
gpr_tls_set(&g_current_thread_worker, 0);
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
end_worker(exec_ctx, pollset, &worker, worker_hdl);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
+ if (worker.pollable_obj != &pollset->pollable_obj) {
+ gpr_mu_unlock(&worker.pollable_obj->po.mu);
}
if (grpc_exec_ctx_has_work(exec_ctx)) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->pollable.po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
}
return error;
}
static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_fd *fd = arg;
+ grpc_fd *fd = (grpc_fd *)arg;
UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
}
@@ -1020,7 +1033,7 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
bool fd_locked) {
static const char *err_desc = "pollset_add_fd";
grpc_error *error = GRPC_ERROR_NONE;
- if (pollset->current_pollable == &g_empty_pollable) {
+ if (pollset->current_pollable_obj == &g_empty_pollable) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
@@ -1028,19 +1041,19 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
}
/* empty pollable --> single fd pollable */
pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &fd->pollable;
- if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
+ pollset->current_pollable_obj = &fd->pollable_obj;
+ if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu);
append_error(&error, fd_become_pollable_locked(fd), err_desc);
- if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
+ if (!fd_locked) gpr_mu_unlock(&fd->pollable_obj.po.mu);
REF_BY(fd, 2, "pollset_pollable");
- } else if (pollset->current_pollable == &pollset->pollable) {
+ } else if (pollset->current_pollable_obj == &pollset->pollable_obj) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
}
- append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
+ append_error(&error, pollable_add_fd(pollset->current_pollable_obj, fd),
err_desc);
- } else if (pollset->current_pollable != &fd->pollable) {
- grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
+ } else if (pollset->current_pollable_obj != &fd->pollable_obj) {
+ grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable_obj;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
@@ -1052,11 +1065,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &pollset->pollable;
- if (append_error(&error, pollable_materialize(&pollset->pollable),
+ pollset->current_pollable_obj = &pollset->pollable_obj;
+ if (append_error(&error, pollable_materialize(&pollset->pollable_obj),
err_desc)) {
- pollable_add_fd(&pollset->pollable, had_fd);
- pollable_add_fd(&pollset->pollable, fd);
+ pollable_add_fd(&pollset->pollable_obj, had_fd);
+ pollable_add_fd(&pollset->pollable_obj, fd);
}
GRPC_CLOSURE_SCHED(exec_ctx,
GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
@@ -1068,9 +1081,9 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
- gpr_mu_lock(&pollset->pollable.po.mu);
+ gpr_mu_lock(&pollset->pollable_obj.po.mu);
grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
- gpr_mu_unlock(&pollset->pollable.po.mu);
+ gpr_mu_unlock(&pollset->pollable_obj.po.mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
@@ -1079,7 +1092,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_zalloc(sizeof(*pss));
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
po_init(&pss->po, PO_POLLSET_SET);
return pss;
}
@@ -1092,7 +1105,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
- po_join(exec_ctx, &pss->po, &fd->pollable.po);
+ po_join(exec_ctx, &pss->po, &fd->pollable_obj.po);
}
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
@@ -1100,7 +1113,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
- po_join(exec_ctx, &pss->po, &ps->pollable.po);
+ po_join(exec_ctx, &pss->po, &ps->pollable_obj.po);
}
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
@@ -1241,7 +1254,7 @@ static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from,
static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
size_t initial_po_count) {
/* assumes all polling objects in initial_po are locked */
- polling_group *pg = gpr_malloc(sizeof(*pg));
+ polling_group *pg = (polling_group *)gpr_malloc(sizeof(*pg));
po_init(&pg->po, PO_POLLING_GROUP);
gpr_ref_init(&pg->refs, (int)initial_po_count);
for (size_t i = 0; i < initial_po_count; i++) {
@@ -1351,7 +1364,7 @@ static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
gpr_mu_lock(&po->mu);
if (unref_count == unref_cap) {
unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
- unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
+ unref = (polling_group **)gpr_realloc(unref, unref_cap * sizeof(*unref));
}
unref[unref_count++] = po->group;
po->group = pg_ref(a);
@@ -1383,34 +1396,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_epollex_linux(
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c
index 070d75e42a..4d8bdf1401 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.c
@@ -39,6 +39,7 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
@@ -362,7 +363,8 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
+ pi->fds =
+ (grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
@@ -465,7 +467,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
- pi = gpr_malloc(sizeof(*pi));
+ pi = (polling_island *)gpr_malloc(sizeof(*pi));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -809,7 +811,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
@@ -1019,10 +1021,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
}
/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) {
@@ -1130,7 +1133,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
+ grpc_pollset *ps,
+ const char *reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
@@ -1156,7 +1160,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we
@@ -1236,6 +1240,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
ep_rv =
epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
GRPC_SCHEDULING_END_BLOCKING_REGION;
@@ -1271,7 +1276,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
- grpc_fd *fd = data_ptr;
+ grpc_fd *fd = (grpc_fd *)data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@@ -1567,7 +1572,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifndef NDEBUG
@@ -1645,8 +1650,8 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
}
bool grpc_are_polling_islands_equal(void *p, void *q) {
- polling_island *p1 = p;
- polling_island *p2 = q;
+ polling_island *p1 = (polling_island *)p;
+ polling_island *p2 = (polling_island *)q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
@@ -1667,34 +1672,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
@@ -1728,9 +1733,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux(
}
if (!is_grpc_wakeup_signal_initialized) {
- /* TODO(ctiller): when other epoll engines are ready, remove the true || to
- * force this to be explitly chosen if needed */
- if (true || explicit_request) {
+ if (explicit_request) {
grpc_use_signal(SIGRTMIN + 6);
} else {
return NULL;
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 9472a8e520..e170702dca 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -36,6 +36,7 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
@@ -208,7 +209,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
@@ -326,7 +327,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *r = gpr_malloc(sizeof(*r));
+ grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@@ -364,36 +365,39 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier;
}
-static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
+static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ grpc_error *err =
+ pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
-static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
+static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next);
+ pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
-static void wake_all_watchers_locked(grpc_fd *fd) {
+static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher);
+ pollset_kick_locked(exec_ctx, watcher);
}
if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
@@ -434,7 +438,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
if (!has_watchers(fd)) {
close_fd_locked(exec_ctx, fd);
} else {
- wake_all_watchers_locked(fd);
+ wake_all_watchers_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
@@ -478,7 +482,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@@ -647,7 +651,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
}
if (kick) {
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
}
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
close_fd_locked(exec_ctx, fd);
@@ -711,11 +715,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
if (specific_worker != NULL) {
@@ -781,9 +786,9 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
- return pollset_kick_ext(p, specific_worker, 0);
+ return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
@@ -841,12 +846,12 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
- pollset->fds =
- gpr_realloc(pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
+ pollset->fds = (grpc_fd **)gpr_realloc(
+ pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
exit:
gpr_mu_unlock(&pollset->mu);
}
@@ -894,7 +899,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
- worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
+ worker.wakeup_fd =
+ (grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@@ -949,8 +955,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
void *buf = gpr_malloc(pfd_size + watch_size);
- pfds = buf;
- watchers = (void *)((char *)buf + pfd_size);
+ pfds = (struct pollfd *)buf;
+ watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size);
}
fd_count = 0;
@@ -983,9 +989,14 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_SCHEDULING_START_BLOCKING_REGION;
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
+ }
+
if (r < 0) {
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
@@ -1006,6 +1017,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
+ }
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@@ -1013,6 +1027,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
+ pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
+ (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
+ }
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@@ -1068,7 +1087,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* check shutdown conditions */
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
@@ -1097,7 +1116,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}
@@ -1129,7 +1148,8 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set));
+ grpc_pollset_set *pollset_set =
+ (grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
@@ -1172,9 +1192,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets =
- gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
- sizeof(*pollset_set->pollsets));
+ pollset_set->pollsets = (grpc_pollset **)gpr_realloc(
+ pollset_set->pollsets,
+ pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
@@ -1223,9 +1243,9 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets =
- gpr_realloc(bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ bag->pollset_sets = (grpc_pollset_set **)gpr_realloc(
+ bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
@@ -1262,7 +1282,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = gpr_realloc(
+ pollset_set->fds = (grpc_fd **)gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
@@ -1316,11 +1336,12 @@ static void cache_insert_locked(poll_args *args) {
}
static void init_result(poll_args *pargs) {
- pargs->result = gpr_malloc(sizeof(poll_result));
+ pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = NULL;
pargs->result->watchcount = 0;
- pargs->result->fds = gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
+ pargs->result->fds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@@ -1359,7 +1380,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
- poll_args *pargs = gpr_malloc(sizeof(struct poll_args));
+ poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@@ -1406,7 +1427,8 @@ static void cache_poller_locked(poll_args *args) {
poll_args **old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * poll_cache.size);
+ poll_cache.active_pollers =
+ (poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1511,17 +1533,17 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
nfds_t nsockfds = 0;
poll_result *result = NULL;
gpr_mu_lock(&g_cvfds.mu);
- pollcv = gpr_malloc(sizeof(cv_node));
+ pollcv = (cv_node *)gpr_malloc(sizeof(cv_node));
pollcv->next = NULL;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
- cv_node *fd_cvs = gpr_malloc(nfds * sizeof(cv_node));
+ cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- idx = FD_TO_IDX(fds[i].fd);
+ idx = GRPC_FD_TO_IDX(fds[i].fd);
fd_cvs[i].cv = &pollcv_cv;
fd_cvs[i].prev = NULL;
fd_cvs[i].next = g_cvfds.cvfds[idx].cvs;
@@ -1548,7 +1570,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
- struct pollfd *pollfds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ struct pollfd *pollfds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@@ -1583,8 +1606,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- remove_cvn(&g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
- if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
+ remove_cvn(&g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
+ if (g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
@@ -1611,7 +1634,8 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
- g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.cvfds =
+ (fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1628,7 +1652,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = NULL;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * 32);
+ poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1668,34 +1692,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 91f8cd5482..4d3ae2228e 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -31,8 +31,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
@@ -64,10 +62,8 @@ typedef struct {
} event_engine_factory;
static const event_engine_factory g_factories[] = {
- {"epollsig", grpc_init_epollsig_linux},
{"epoll1", grpc_init_epoll1_linux},
- {"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
- {"epoll-limited", grpc_init_epoll_limited_pollers_linux},
+ {"epollsig", grpc_init_epollsig_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
{"epollex", grpc_init_epollex_linux},
@@ -80,10 +76,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
@@ -214,9 +210,9 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- return g_event_engine->pollset_kick(pollset, specific_worker);
+ return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 1108e46ef8..1ff2ff1413 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -54,7 +54,7 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
gpr_timespec deadline);
- grpc_error *(*pollset_kick)(grpc_pollset *pollset,
+ grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index a0d2a965d5..c89792c8c4 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -19,6 +19,7 @@
#ifndef GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
#define GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
+#include <grpc/support/cpu.h>
#include "src/core/lib/iomgr/closure.h"
/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */
@@ -62,6 +63,7 @@ struct grpc_exec_ctx {
/** last active combiner in the active combiner list */
grpc_combiner *last_combiner;
uintptr_t flags;
+ unsigned starting_cpu;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
@@ -69,7 +71,10 @@ struct grpc_exec_ctx {
/* initializer for grpc_exec_ctx:
prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
- { GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, finish_check_arg, finish_check }
+ { \
+ GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \
+ finish_check_arg, finish_check \
+ }
/* initialize an execution context at the top level of an API call into grpc
(this is safe to use elsewhere, though possibly not as efficient) */
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 7621a7fe75..2439f15a8a 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -28,18 +28,18 @@
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/spinlock.h"
-#define MAX_DEPTH 2
-
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_closure_list elems;
- size_t depth;
bool shutdown;
+ bool queued_long_job;
gpr_thd_id id;
+ grpc_closure_list local_elems;
} thread_state;
static thread_state *g_thread_state;
@@ -49,25 +49,40 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
+static grpc_tracer_flag executor_trace =
+ GRPC_TRACER_INITIALIZER(false, "executor");
+
static void executor_thread(void *arg);
-static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
- size_t n = 0;
+static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+ int n = 0; // number of closures executed
- grpc_closure *c = list.head;
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+ while (!grpc_closure_list_empty(*list)) {
+ grpc_closure *c = list->head;
+ grpc_closure_list_init(list);
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
- c->scheduled = false;
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
+ c->file_created, c->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- n++;
+ }
+#ifndef NDEBUG
+ c->scheduled = false;
+#endif
+ n++;
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ grpc_exec_ctx_flush(exec_ctx);
+ }
}
- return n;
+ GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
}
bool grpc_executor_is_threaded() {
@@ -81,7 +96,8 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
- g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ g_thread_state =
+ (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
@@ -111,7 +127,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
- run_closures(exec_ctx, g_thread_state[i].elems);
+ run_closures(exec_ctx, &g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
@@ -119,6 +135,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+ grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@@ -128,68 +145,166 @@ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
}
static void executor_thread(void *arg) {
- thread_state *ts = arg;
+ thread_state *ts = (thread_state *)arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
- size_t subtract_depth = 0;
+ GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
+
+ bool used = false;
for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state));
+ }
gpr_mu_lock(&ts->mu);
- ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+ ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
+ (int)(ts - g_thread_state));
+ }
gpr_mu_unlock(&ts->mu);
break;
}
- grpc_closure_list exec = ts->elems;
+ if (!used) {
+ GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
+ used = true;
+ }
+ GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
+ GPR_ASSERT(grpc_closure_list_empty(ts->local_elems));
+ ts->local_elems = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
+ }
- subtract_depth = run_closures(&exec_ctx, exec);
- grpc_exec_ctx_flush(&exec_ctx);
+ run_closures(&exec_ctx, &ts->local_elems);
}
grpc_exec_ctx_finish(&exec_ctx);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count == 0) {
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- return;
- }
- thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
- if (ts == NULL) {
- ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
- }
- gpr_mu_lock(&ts->mu);
- if (grpc_closure_list_empty(ts->elems)) {
- gpr_cv_signal(&ts->cv);
+ grpc_error *error, bool is_short) {
+ bool retry_push;
+ if (is_short) {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
- grpc_closure_list_append(&ts->elems, closure, error);
- ts->depth++;
- bool try_new_thread = ts->depth > MAX_DEPTH &&
- cur_thread_count < g_max_threads && !ts->shutdown;
- gpr_mu_unlock(&ts->mu);
- if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
- cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count < g_max_threads) {
- gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
-
- gpr_thd_options opt = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&opt);
- gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
- &g_thread_state[cur_thread_count], &opt);
+ do {
+ retry_push = false;
+ size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count == 0) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
+ closure, closure->file_created, closure->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
+#endif
+ }
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ return;
}
- gpr_spinlock_unlock(&g_adding_thread_lock);
- }
+ thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ if (ts == NULL) {
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+ if (is_short) {
+ grpc_closure_list_append(&ts->local_elems, closure, error);
+ return;
+ }
+ }
+ thread_state *orig_ts = ts;
+
+ bool try_new_thread;
+ for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(
+ GPR_DEBUG,
+ "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
+ closure, is_short ? "short" : "long", closure->file_created,
+ closure->line_created, (int)(ts - g_thread_state));
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
+ closure, is_short ? "short" : "long",
+ (int)(ts - g_thread_state));
+#endif
+ }
+ gpr_mu_lock(&ts->mu);
+ if (ts->queued_long_job) {
+ // if there's a long job queued, we never queue anything else to this
+ // queue (since long jobs can take 'infinite' time and we need to
+ // guarantee no starvation)
+ // ... spin through queues and try again
+ gpr_mu_unlock(&ts->mu);
+ size_t idx = (size_t)(ts - g_thread_state);
+ ts = &g_thread_state[(idx + 1) % cur_thread_count];
+ if (ts == orig_ts) {
+ retry_push = true;
+ try_new_thread = true;
+ break;
+ }
+ continue;
+ }
+ if (grpc_closure_list_empty(ts->elems)) {
+ GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
+ gpr_cv_signal(&ts->cv);
+ }
+ grpc_closure_list_append(&ts->elems, closure, error);
+ try_new_thread = ts->elems.head != closure &&
+ cur_thread_count < g_max_threads && !ts->shutdown;
+ if (!is_short) ts->queued_long_job = true;
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
+ cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count < g_max_threads) {
+ gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
+ &g_thread_state[cur_thread_count], &opt);
+ }
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ }
+ if (retry_push) {
+ GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
+ }
+ } while (retry_push);
+}
+
+static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, true);
+}
+
+static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, false);
}
-static const grpc_closure_scheduler_vtable executor_vtable = {
- executor_push, executor_push, "executor"};
-static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
-grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
+static const grpc_closure_scheduler_vtable executor_vtable_short = {
+ executor_push_short, executor_push_short, "executor"};
+static grpc_closure_scheduler executor_scheduler_short = {
+ &executor_vtable_short};
+
+static const grpc_closure_scheduler_vtable executor_vtable_long = {
+ executor_push_long, executor_push_long, "executor"};
+static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
+
+grpc_closure_scheduler *grpc_executor_scheduler(
+ grpc_executor_job_length length) {
+ return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
+ : &executor_scheduler_long;
+}
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index c3382a0a12..0412c02790 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -21,6 +21,11 @@
#include "src/core/lib/iomgr/closure.h"
+typedef enum {
+ GRPC_EXECUTOR_SHORT,
+ GRPC_EXECUTOR_LONG
+} grpc_executor_job_length;
+
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@@ -28,7 +33,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
-extern grpc_closure_scheduler *grpc_executor_scheduler;
+grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c
index e343f8a794..c082179c0b 100644
--- a/src/core/lib/iomgr/iocp_windows.c
+++ b/src/core/lib/iomgr/iocp_windows.c
@@ -27,6 +27,7 @@
#include <grpc/support/log_windows.h>
#include <grpc/support/thd.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/socket_windows.h"
@@ -65,6 +66,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
LPOVERLAPPED overlapped;
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
success = GetQueuedCompletionStatus(
g_iocp, &bytes, &completion_key, &overlapped,
deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type)));
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 3d19953eeb..f63f190155 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -50,7 +50,7 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
grpc_executor_init(exec_ctx);
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
- g_root_object.name = "root";
+ g_root_object.name = (char *)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}
@@ -164,13 +164,7 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
bool grpc_iomgr_abort_on_leaks(void) {
char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
- if (env == NULL) return false;
- static const char *truthy[] = {"yes", "Yes", "YES", "true",
- "True", "TRUE", "1"};
- bool should_we = false;
- for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
- if (0 == strcmp(env, truthy[i])) should_we = true;
- }
+ bool should_we = gpr_is_true(env);
gpr_free(env);
return should_we;
}
diff --git a/src/core/lib/iomgr/is_epollexclusive_available.c b/src/core/lib/iomgr/is_epollexclusive_available.c
index e8a7d4d52c..d08844c0df 100644
--- a/src/core/lib/iomgr/is_epollexclusive_available.c
+++ b/src/core/lib/iomgr/is_epollexclusive_available.c
@@ -57,12 +57,12 @@ bool grpc_is_epollexclusive_available(void) {
close(fd);
return false;
}
- struct epoll_event ev = {
- /* choose events that should cause an error on
- EPOLLEXCLUSIVE enabled kernels - specifically the combination of
- EPOLLONESHOT and EPOLLEXCLUSIVE */
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT),
- .data.ptr = NULL};
+ struct epoll_event ev;
+ /* choose events that should cause an error on
+ EPOLLEXCLUSIVE enabled kernels - specifically the combination of
+ EPOLLONESHOT and EPOLLEXCLUSIVE */
+ ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
+ ev.data.ptr = NULL;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
if (!logged_why_not) {
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c
index ba77a52afc..0b4d41ea4b 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.c
@@ -47,7 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
- contents = gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
+ contents = (unsigned char *)gpr_malloc(contents_size +
+ (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");
diff --git a/src/core/lib/iomgr/polling_entity.c b/src/core/lib/iomgr/polling_entity.c
index 74d8794af5..8591a5518e 100644
--- a/src/core/lib/iomgr/polling_entity.c
+++ b/src/core/lib/iomgr/polling_entity.c
@@ -25,7 +25,7 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set *pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
- pollent.tag = POPS_POLLSET_SET;
+ pollent.tag = GRPC_POLLS_POLLSET_SET;
return pollent;
}
@@ -33,12 +33,12 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset(
grpc_pollset *pollset) {
grpc_polling_entity pollent;
pollent.pollent.pollset = pollset;
- pollent.tag = POPS_POLLSET;
+ pollent.tag = GRPC_POLLS_POLLSET;
return pollent;
}
grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
return pollent->pollent.pollset;
}
return NULL;
@@ -46,23 +46,23 @@ grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
grpc_pollset_set *grpc_polling_entity_pollset_set(
grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET_SET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
return pollent->pollent.pollset_set;
}
return NULL;
}
bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
- return pollent->tag == POPS_NONE;
+ return pollent->tag == GRPC_POLLS_NONE;
}
void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
@@ -75,10 +75,10 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 971fd88b42..a161e1fea6 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -22,6 +22,12 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+typedef enum grpc_pollset_tag {
+ GRPC_POLLS_NONE,
+ GRPC_POLLS_POLLSET,
+ GRPC_POLLS_POLLSET_SET
+} grpc_pollset_tag;
+
/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
@@ -31,7 +37,7 @@ typedef struct grpc_polling_entity {
grpc_pollset *pollset;
grpc_pollset_set *pollset_set;
} pollent;
- enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag;
+ grpc_pollset_tag tag;
} grpc_polling_entity;
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index a609a3877a..a0f6b3a9d3 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -76,7 +76,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT;
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index a79fe89d3e..2651325e25 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -145,7 +145,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index ea017a6054..eb295d3eeb 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -98,7 +98,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
pollset->shutting_down = 1;
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
@@ -181,7 +181,7 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *p,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@@ -209,7 +209,7 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
- grpc_pollset_kick(p, specific_worker);
+ grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 35dedc23de..60cfeebd47 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) {
/* Retry if well-known service name is recognized */
- char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
@@ -112,13 +112,14 @@ static grpc_error *blocking_resolve_address_impl(
}
/* Success path: set addrs non-NULL, fill it in */
- *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addresses =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs =
- gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -153,7 +154,7 @@ typedef struct {
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
- request *r = rp;
+ request *r = (request *)rp;
GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
@@ -174,9 +175,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
- request *r = gpr_malloc(sizeof(request));
+ request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 45cfd7248d..0cb0029f4e 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -159,7 +159,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index a31d9eef93..4d69986fbc 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -241,7 +242,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->step_scheduled = false;
do {
if (rq_alloc(exec_ctx, resource_quota)) goto done;
@@ -380,12 +381,12 @@ typedef struct {
} ru_slice_refcount;
static void ru_slice_ref(void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
gpr_ref(&rc->refs);
}
static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
@@ -398,7 +399,8 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
size_t size) {
- ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ ru_slice_refcount *rc =
+ (ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size);
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@@ -417,7 +419,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
*/
static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -427,7 +429,7 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -454,7 +456,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -469,7 +471,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -485,7 +487,7 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
@@ -497,7 +499,7 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
}
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
@@ -518,7 +520,8 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resource_user_slice_allocator *slice_allocator = arg;
+ grpc_resource_user_slice_allocator *slice_allocator =
+ (grpc_resource_user_slice_allocator *)arg;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@@ -541,7 +544,7 @@ typedef struct {
} rq_resize_args;
static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- rq_resize_args *a = args;
+ rq_resize_args *a = (rq_resize_args *)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@@ -553,7 +556,7 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->reclaiming = false;
rq_step_sched(exec_ctx, resource_quota);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -565,7 +568,8 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
/* Public API */
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
- grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ grpc_resource_quota *resource_quota =
+ (grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@@ -629,7 +633,7 @@ double grpc_resource_quota_get_memory_pressure(
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- rq_resize_args *a = gpr_malloc(sizeof(*a));
+ rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
@@ -653,7 +657,7 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@@ -663,12 +667,12 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
}
static void *rq_copy(void *rq) {
- grpc_resource_quota_ref(rq);
+ grpc_resource_quota_ref((grpc_resource_quota *)rq);
return rq;
}
static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) {
- grpc_resource_quota_unref_internal(exec_ctx, rq);
+ grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq);
}
static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
@@ -684,7 +688,8 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota *resource_quota, const char *name) {
- grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
+ grpc_resource_user *resource_user =
+ (grpc_resource_user *)gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
diff --git a/src/core/lib/iomgr/socket_factory_posix.c b/src/core/lib/iomgr/socket_factory_posix.c
index 0f82dea570..8e907703ae 100644
--- a/src/core/lib/iomgr/socket_factory_posix.c
+++ b/src/core/lib/iomgr/socket_factory_posix.c
@@ -69,11 +69,11 @@ void grpc_socket_factory_unref(grpc_socket_factory *factory) {
}
static void *socket_factory_arg_copy(void *p) {
- return grpc_socket_factory_ref(p);
+ return grpc_socket_factory_ref((grpc_socket_factory *)p);
}
static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_factory_unref(p);
+ grpc_socket_factory_unref((grpc_socket_factory *)p);
}
static int socket_factory_cmp(void *a, void *b) {
@@ -85,8 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory,
- &socket_factory_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
+ factory, &socket_factory_arg_vtable);
}
#endif
diff --git a/src/core/lib/iomgr/socket_mutator.c b/src/core/lib/iomgr/socket_mutator.c
index 5d6c2c400e..b0435d5a07 100644
--- a/src/core/lib/iomgr/socket_mutator.c
+++ b/src/core/lib/iomgr/socket_mutator.c
@@ -60,11 +60,11 @@ void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
}
static void *socket_mutator_arg_copy(void *p) {
- return grpc_socket_mutator_ref(p);
+ return grpc_socket_mutator_ref((grpc_socket_mutator *)p);
}
static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_mutator_unref(p);
+ grpc_socket_mutator_unref((grpc_socket_mutator *)p);
}
static int socket_mutator_cmp(void *a, void *b) {
@@ -76,6 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator,
- &socket_mutator_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
+ mutator, &socket_mutator_arg_vtable);
}
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index a25fba4527..39dbb506e2 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -80,7 +80,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
- grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
+ grpc_socket_mutator *mutator =
+ (grpc_socket_mutator *)channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@@ -98,7 +99,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
@@ -126,7 +127,7 @@ grpc_endpoint *grpc_tcp_client_create_from_fd(
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
int so_error = 0;
socklen_t so_error_size;
int err;
@@ -304,7 +305,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
- ac = gpr_malloc(sizeof(async_connect));
+ ac = (async_connect *)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 2f543fd8a9..7e271294fd 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -40,8 +40,10 @@
#include <grpc/support/useful.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -66,7 +68,6 @@ typedef struct {
grpc_fd *em_fd;
int fd;
bool finished_edge;
- msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
double target_length;
double bytes_read_this_round;
gpr_refcount refcount;
@@ -90,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
- grpc_closure read_closure;
- grpc_closure write_closure;
+ grpc_closure read_done_closure;
+ grpc_closure write_done_closure;
char *peer_string;
@@ -99,6 +100,148 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+typedef struct backup_poller {
+ gpr_mu *pollset_mu;
+ grpc_closure run_poller;
+} backup_poller;
+
+#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
+
+static gpr_atm g_uncovered_notifications_pending;
+static gpr_atm g_backup_poller; /* backup_poller* */
+
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
+ }
+ grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
+ gpr_free(p);
+}
+
+static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
+ }
+ gpr_mu_lock(p->pollset_mu);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec deadline =
+ gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
+ GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
+ GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
+ now, deadline));
+ gpr_mu_unlock(p->pollset_mu);
+ /* last "uncovered" notification is the ref that keeps us polling, if we get
+ * there try a cas to release it */
+ if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
+ gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
+ gpr_mu_lock(p->pollset_mu);
+ bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
+ }
+ gpr_mu_unlock(p->pollset_mu);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
+ }
+ grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
+ GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
+ grpc_schedule_on_exec_ctx));
+ } else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
+ }
+}
+
+static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
+ (int)old_count - 1);
+ }
+ GPR_ASSERT(old_count != 1);
+}
+
+static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p;
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
+ 2 + (int)old_count);
+ }
+ if (old_count == 0) {
+ GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
+ p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size());
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
+ }
+ grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
+ gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
+ grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
+ GRPC_ERROR_NONE);
+ } else {
+ while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
+ // spin waiting for backup poller
+ }
+ }
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
+ }
+ grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+ if (old_count != 0) {
+ drop_uncovered(exec_ctx, tcp);
+ }
+}
+
+static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
+ }
+ GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
+}
+
+static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
+ }
+ cover_self(exec_ctx, tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
+}
+
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
+ }
+ drop_uncovered(exec_ctx, (grpc_tcp *)arg);
+ tcp_handle_write(exec_ctx, arg, error);
+}
+
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@@ -214,6 +357,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -239,7 +383,6 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t i;
GPR_ASSERT(!tcp->finished_edge);
- GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
@@ -251,13 +394,17 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_iov = iov;
- msg.msg_iovlen = tcp->iov_size;
+ msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
+ GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length);
+ GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count);
+
GPR_TIMER_BEGIN("recvmsg", 0);
do {
+ GRPC_STATS_INC_SYSCALL_READ(exec_ctx);
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
@@ -268,7 +415,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@@ -285,6 +432,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
+ GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes);
add_to_estimate(tcp, (size_t)read_bytes);
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
@@ -303,7 +451,11 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
- grpc_tcp *tcp = tcpp;
+ grpc_tcp *tcp = (grpc_tcp *)tcpp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
+ grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@@ -319,9 +471,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
+ }
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
+ }
tcp_do_read(exec_ctx, tcp);
}
}
@@ -330,6 +488,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@@ -353,15 +514,16 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
/* returns true if done, false if pending; if returning true, *error is set */
#define MAX_WRITE_IOVEC 1000
-static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
+static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
+ grpc_error **error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
msg_iovlen_type iov_size;
@@ -400,9 +562,13 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
+ GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length);
+ GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size);
+
GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
+ GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx);
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
GPR_TIMER_END("sendmsg", 0);
@@ -459,11 +625,11 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
return;
}
- if (!tcp_flush(tcp, &error)) {
+ if (!tcp_flush(exec_ctx, tcp, &error)) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@@ -510,13 +676,13 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->outgoing_slice_idx = 0;
tcp->outgoing_byte_idx = 0;
- if (!tcp_flush(tcp, &error)) {
+ if (!tcp_flush(exec_ctx, tcp, &error)) {
TCP_REF(tcp, "write");
tcp->write_cb = cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@@ -593,7 +759,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@@ -617,16 +783,11 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
tcp->min_read_chunk_size = tcp_min_read_chunk_size;
tcp->max_read_chunk_size = tcp_max_read_chunk_size;
tcp->bytes_read_this_round = 0;
- tcp->iov_size = 1;
tcp->finished_edge = true;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 0fc5c0fd86..06612d639c 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -74,7 +74,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
- grpc_tcp_server *s = gpr_zalloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
@@ -138,7 +138,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_tcp_server *s = server;
+ grpc_tcp_server *s = (grpc_tcp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -197,13 +197,13 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
- grpc_tcp_listener *sp = arg;
-
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
+ grpc_pollset *read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
- grpc_pollset *read_notifier_pollset =
+ read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count];
@@ -251,7 +251,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor *acceptor =
+ (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@@ -365,7 +366,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
index ad535bc43e..a828bee074 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
@@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index b92b8fb8b8..ac392f87fe 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -44,6 +44,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_closure *closure,
gpr_timespec now);
+/* Initialize *timer without setting it. This can later be passed through
+ the regular init or cancel */
+void grpc_timer_init_unset(grpc_timer *timer);
+
/* Note that there is no timer destroy function. This is because the
timer is a one-time occurrence with a guarantee that the callback will
be called exactly once, either at expiration or cancellation. Thus, all
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 12efce241f..e9a7236c8c 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -95,9 +95,7 @@ struct shared_mutables {
gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
-static struct shared_mutables g_shared_mutables = {
- .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
-};
+static struct shared_mutables g_shared_mutables;
static gpr_clock_type g_clock_type;
static gpr_timespec g_start_time;
@@ -155,6 +153,7 @@ void grpc_timer_list_init(gpr_timespec now) {
uint32_t i;
g_shared_mutables.initialized = true;
+ g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
g_clock_type = now.clock_type;
g_start_time = now;
@@ -234,6 +233,8 @@ static void note_deadline_change(timer_shard *shard) {
}
}
+void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
+
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_closure *closure,
gpr_timespec now) {
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.c
index a70e3942b2..2648d5da5d 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.c
@@ -74,8 +74,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
}
@@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);
diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.c
index 631f7935d9..04ca44563d 100644
--- a/src/core/lib/iomgr/timer_manager.c
+++ b/src/core/lib/iomgr/timer_manager.c
@@ -83,7 +83,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
- completed_thread *ct = gpr_malloc(sizeof(*ct));
+ completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
@@ -276,7 +276,7 @@ static void timer_thread(void *completed_thread_ptr) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
timer_main_loop(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- timer_thread_cleanup(completed_thread_ptr);
+ timer_thread_cleanup((completed_thread *)completed_thread_ptr);
}
static void start_threads(void) {
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index 70f49bcbe8..adced41f53 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -77,6 +77,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_unref((uv_handle_t *)uv_timer);
}
+void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; }
+
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
GRPC_UV_ASSERT_SAME_THREAD();
if (timer->pending) {
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 88fa34cb7a..00b2e68bb5 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -118,14 +118,14 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
- return arg->value.pointer.p;
+ return (grpc_socket_factory *)arg->value.pointer.p;
}
}
return NULL;
}
grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
- grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
+ grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@@ -176,7 +176,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_udp_server *s = server;
+ grpc_udp_server *s = (grpc_udp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -237,7 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
+ struct shutdown_fd_args *args =
+ (struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -331,7 +332,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -354,7 +355,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
@@ -393,7 +394,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
- sp = gpr_malloc(sizeof(grpc_udp_listener));
+ sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -444,7 +445,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t *)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c
index 0c8627c8c6..35f898f13a 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.c
@@ -49,9 +49,11 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
gpr_free(err_msg);
return err;
}
- *addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addrs =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addrs)->naddrs = 1;
- (*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address));
+ (*addrs)->addrs =
+ (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
un = (struct sockaddr_un *)(*addrs)->addrs->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, name);
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.c
index 075a0b6426..268e0175dd 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.c
+++ b/src/core/lib/iomgr/wakeup_fd_cv.c
@@ -42,7 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
- g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+ g_cvfds.cvfds =
+ (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;
@@ -56,7 +57,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = NULL;
g_cvfds.cvfds[idx].is_set = 0;
- fd_info->read_fd = IDX_TO_FD(idx);
+ fd_info->read_fd = GRPC_IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
@@ -65,8 +66,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
- cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
+ cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
@@ -77,7 +78,7 @@ static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
@@ -88,9 +89,9 @@ static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
- GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
- g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
+ GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
index 46e84f5843..dc170ad5b4 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.h
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -37,8 +37,8 @@
#include "src/core/lib/iomgr/ev_posix.h"
-#define FD_TO_IDX(fd) (-(fd)-1)
-#define IDX_TO_FD(idx) (-(idx)-1)
+#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
+#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
typedef struct cv_node {
gpr_cv* cv;
diff --git a/src/core/lib/json/json.c b/src/core/lib/json/json.c
index 25eee05532..4ad51f662a 100644
--- a/src/core/lib/json/json.c
+++ b/src/core/lib/json/json.c
@@ -23,7 +23,7 @@
#include "src/core/lib/json/json.h"
grpc_json* grpc_json_create(grpc_json_type type) {
- grpc_json* json = gpr_zalloc(sizeof(*json));
+ grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json));
json->type = type;
return json;
diff --git a/src/core/lib/json/json_string.c b/src/core/lib/json/json_string.c
index 65b5f0f482..3178d2d2b4 100644
--- a/src/core/lib/json/json_string.c
+++ b/src/core/lib/json/json_string.c
@@ -63,19 +63,19 @@ typedef struct {
* bytes at a time (or multiples thereof).
*/
static void json_writer_output_check(void *userdata, size_t needed) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xffU;
- state->output = gpr_realloc(state->output, state->allocated + needed);
+ state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
}
/* These are needed by the writer's implementation. */
static void json_writer_output_char(void *userdata, char c) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
@@ -83,7 +83,7 @@ static void json_writer_output_char(void *userdata, char c) {
static void json_writer_output_string_with_len(void *userdata, const char *str,
size_t len) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
@@ -99,7 +99,7 @@ static void json_writer_output_string(void *userdata, const char *str) {
* the end of the current string, and advance our output pointer.
*/
static void json_reader_string_clear(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -108,7 +108,7 @@ static void json_reader_string_clear(void *userdata) {
}
static void json_reader_string_add_char(void *userdata, uint32_t c) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (uint8_t)c;
@@ -149,7 +149,7 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
*/
static uint32_t json_reader_read_char(void *userdata) {
uint32_t r;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -168,7 +168,7 @@ static uint32_t json_reader_read_char(void *userdata) {
* our tree-in-progress inside our opaque structure.
*/
static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
@@ -194,7 +194,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
}
static void json_reader_container_begins(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -215,7 +215,7 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
*/
static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->current_container);
@@ -236,18 +236,18 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
static void json_reader_set_key(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
state->key = state->string;
}
static void json_reader_set_string(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
json->value = (char *)state->string;
}
static int json_reader_set_number(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
json->value = (char *)state->string;
return 1;
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index 58e6659e6d..7f02b4bf84 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -37,7 +37,8 @@ void gpr_timers_set_log_filename(const char *filename);
void gpr_timer_set_enabled(int enabled);
-#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
+ defined(GRPC_CUSTOM_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
do { \
@@ -56,6 +57,12 @@ void gpr_timer_set_enabled(int enabled);
#if defined(GRPC_STAP_PROFILER) && defined(GRPC_BASIC_PROFILER)
#error "GRPC_STAP_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
#endif
+#if defined(GRPC_STAP_PROFILER) && defined(GRPC_CUSTOM_PROFILER)
+#error "GRPC_STAP_PROFILER and GRPC_CUSTOM_PROFILER are mutually exclusive."
+#endif
+#if defined(GRPC_CUSTOM_PROFILER) && defined(GRPC_BASIC_PROFILER)
+#error "GRPC_CUSTOM_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
+#endif
/* Generic profiling interface. */
#define GPR_TIMER_MARK(tag, important) \
@@ -80,22 +87,25 @@ void gpr_timer_set_enabled(int enabled);
#ifdef __cplusplus
}
-#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
+ defined(GRPC_CUSTOM_PROFILER))
namespace grpc {
class ProfileScope {
public:
- ProfileScope(const char *desc, bool important) : desc_(desc) {
- GPR_TIMER_BEGIN(desc_, important ? 1 : 0);
+ ProfileScope(const char *desc, bool important, const char *file, int line)
+ : desc_(desc) {
+ gpr_timer_begin(desc_, important ? 1 : 0, file, line);
}
- ~ProfileScope() { GPR_TIMER_END(desc_, 0); }
+ ~ProfileScope() { gpr_timer_end(desc_, 0, "n/a", 0); }
private:
const char *const desc_;
};
-}
+} // namespace grpc
-#define GPR_TIMER_SCOPE(tag, important) \
- ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important))
+#define GPR_TIMER_SCOPE(tag, important) \
+ ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important), __FILE__, \
+ __LINE__)
#else
#define GPR_TIMER_SCOPE(tag, important) \
do { \
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index a2a8e289ee..691d66df69 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -79,7 +79,8 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
detector->is_done = 1;
GRPC_LOG_IF_ERROR(
"Pollset kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL));
+ grpc_pollset_kick(exec_ctx,
+ grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu);
}
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 531a88434f..dd7dd44e79 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -39,6 +39,8 @@
/* We can have a per-call credentials. */
typedef struct {
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
grpc_call_credentials *creds;
bool have_host;
bool have_method;
@@ -49,17 +51,12 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
- gpr_atm security_context_set;
- gpr_mu security_context_mu;
grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
- grpc_closure closure;
- // Either 0 (no cancellation and no async operation in flight),
- // a grpc_closure* (if the lowest bit is 0),
- // or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancellation_state;
- grpc_closure cancel_closure;
+ grpc_closure async_result_closure;
+ grpc_closure check_call_host_cancel_closure;
+ grpc_closure get_request_metadata_cancel_closure;
} call_data;
/* We can have a per-channel credentials. */
@@ -68,43 +65,6 @@ typedef struct {
grpc_auth_context *auth_context;
} channel_data;
-static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func,
- grpc_error **error) {
- // If the lowest bit is 1, the value is a grpc_error*.
- // Otherwise, if non-zdero, the value is a grpc_closure*.
- if (cancel_state & 1) {
- *error = (grpc_error *)(cancel_state & ~(gpr_atm)1);
- } else if (cancel_state != 0) {
- *func = (grpc_closure *)cancel_state;
- }
-}
-
-static gpr_atm encode_cancel_state_error(grpc_error *error) {
- // Set the lowest bit to 1 to indicate that it's an error.
- return (gpr_atm)1 | (gpr_atm)error;
-}
-
-// Returns an error if the call has been cancelled. Otherwise, sets the
-// cancellation function to be called upon cancellation.
-static grpc_error *set_cancel_func(grpc_call_element *elem,
- grpc_iomgr_cb_func func) {
- call_data *calld = (call_data *)elem->call_data;
- // Decode original state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *original_error = GRPC_ERROR_NONE;
- grpc_closure *original_func = NULL;
- decode_cancel_state(original_state, &original_func, &original_error);
- // If error is set, return it.
- if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
- // Otherwise, store func.
- GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
- grpc_schedule_on_exec_ctx);
- GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
- gpr_atm_rel_store(&calld->cancellation_state,
- (gpr_atm)&calld->cancel_closure);
- return GRPC_ERROR_NONE;
-}
-
static void reset_auth_metadata_context(
grpc_auth_metadata_context *auth_md_context) {
if (auth_md_context->service_url != NULL) {
@@ -153,7 +113,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
} else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED);
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
}
}
@@ -191,8 +152,12 @@ static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- grpc_call_credentials_cancel_get_request_metadata(
- exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_call_credentials_cancel_get_request_metadata(
+ exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "cancel_get_request_metadata");
}
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
@@ -223,7 +188,8 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
return;
}
} else {
@@ -234,22 +200,25 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
- grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
- return;
- }
GPR_ASSERT(calld->pollent != NULL);
- GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
- grpc_schedule_on_exec_ctx);
+
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata,
+ batch, grpc_schedule_on_exec_ctx);
grpc_error *error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
- &calld->md_array, &calld->closure, &error)) {
+ &calld->md_array, &calld->async_result_closure, &error)) {
// Synchronous return; invoke on_credentials_metadata() directly.
on_credentials_metadata(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error);
+ } else {
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
+ cancel_get_request_metadata, elem,
+ grpc_schedule_on_exec_ctx));
}
}
@@ -258,7 +227,6 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
-
if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch);
} else {
@@ -271,7 +239,8 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
gpr_free(error_msg);
}
}
@@ -281,9 +250,12 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
- grpc_channel_security_connector_cancel_check_call_host(
- exec_ctx, chand->security_connector, &calld->closure,
- GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_channel_security_connector_cancel_check_call_host(
+ exec_ctx, chand->security_connector, &calld->async_result_closure,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host");
}
static void auth_start_transport_stream_op_batch(
@@ -295,52 +267,19 @@ static void auth_start_transport_stream_op_batch(
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (batch->cancel_stream) {
- while (true) {
- // Decode the original cancellation state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *func = NULL;
- decode_cancel_state(original_state, &func, &cancel_error);
- // If we had already set a cancellation error, there's nothing
- // more to do.
- if (cancel_error != GRPC_ERROR_NONE) break;
- // If there's a cancel func, call it.
- // Note that even if the cancel func has been changed by some
- // other thread between when we decoded it and now, it will just
- // be a no-op.
- cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- if (func != NULL) {
- GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error));
- }
- // Encode the new error into cancellation state.
- if (gpr_atm_full_cas(&calld->cancellation_state, original_state,
- encode_cancel_state_error(cancel_error))) {
- break; // Success.
- }
- // The cas failed, so try again.
- }
- } else {
- /* double checked lock over security context to ensure it's set once */
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- gpr_mu_lock(&calld->security_context_mu);
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- GPR_ASSERT(batch->payload->context != NULL);
- if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- batch->payload->context[GRPC_CONTEXT_SECURITY].value =
- grpc_client_security_context_create();
- batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_client_security_context_destroy;
- }
- grpc_client_security_context *sec_ctx =
- batch->payload->context[GRPC_CONTEXT_SECURITY].value;
- GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
- sec_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
- gpr_atm_rel_store(&calld->security_context_set, 1);
- }
- gpr_mu_unlock(&calld->security_context_mu);
+ if (!batch->cancel_stream) {
+ GPR_ASSERT(batch->payload->context != NULL);
+ if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
}
+ grpc_client_security_context *sec_ctx =
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (batch->send_initial_metadata) {
@@ -365,26 +304,27 @@ static void auth_start_transport_stream_op_batch(
}
}
if (calld->have_host) {
- grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
+ batch->handler_private.extra_arg = elem;
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch,
+ grpc_schedule_on_exec_ctx);
+ char *call_host = grpc_slice_to_c_string(calld->host);
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host,
+ chand->auth_context, &calld->async_result_closure, &error)) {
+ // Synchronous return; invoke on_host_checked() directly.
+ on_host_checked(exec_ctx, batch, error);
+ GRPC_ERROR_UNREF(error);
} else {
- char *call_host = grpc_slice_to_c_string(calld->host);
- batch->handler_private.extra_arg = elem;
- grpc_error *error = GRPC_ERROR_NONE;
- if (grpc_channel_security_connector_check_call_host(
- exec_ctx, chand->security_connector, call_host,
- chand->auth_context,
- GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch,
- grpc_schedule_on_exec_ctx),
- &error)) {
- // Synchronous return; invoke on_host_checked() directly.
- on_host_checked(exec_ctx, batch, error);
- GRPC_ERROR_UNREF(error);
- }
- gpr_free(call_host);
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
+ cancel_check_call_host, elem,
+ grpc_schedule_on_exec_ctx));
}
+ gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */
}
@@ -400,8 +340,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(*calld));
- gpr_mu_init(&calld->security_context_mu);
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -426,12 +366,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
- gpr_mu_destroy(&calld->security_context_mu);
- gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *cancel_func = NULL;
- decode_cancel_state(cancel_state, &cancel_func, &cancel_error);
- GRPC_ERROR_UNREF(cancel_error);
}
/* Constructor for channel_data */
@@ -490,6 +424,5 @@ const grpc_channel_filter grpc_client_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client-auth"};
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 5e41b94ff8..ae5633b82c 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -34,7 +34,7 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
-#include "src/core/tsi/transport_security_interface.h"
+#include "src/core/tsi/transport_security_grpc.h"
#define STAGING_BUFFER_SIZE 8192
@@ -42,6 +42,7 @@ typedef struct {
grpc_endpoint base;
grpc_endpoint *wrapped_ep;
struct tsi_frame_protector *protector;
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
grpc_closure *read_cb;
@@ -67,6 +68,7 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint *ep = secure_ep;
grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
+ tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector);
grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes);
grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer);
grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer);
@@ -159,51 +161,58 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
- /* TODO(yangg) check error, maybe bail out early */
- for (i = 0; i < ep->source_buffer.count; i++) {
- grpc_slice encrypted = ep->source_buffer.slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
- size_t message_size = GRPC_SLICE_LENGTH(encrypted);
-
- while (message_size > 0 || keep_looping) {
- size_t unprotected_buffer_size_written = (size_t)(end - cur);
- size_t processed_message_size = message_size;
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
- &processed_message_size, cur,
- &unprotected_buffer_size_written);
- gpr_mu_unlock(&ep->protector_mu);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Decryption error: %s",
- tsi_result_to_string(result));
- break;
- }
- message_bytes += processed_message_size;
- message_size -= processed_message_size;
- cur += unprotected_buffer_size_written;
-
- if (cur == end) {
- flush_read_staging_buffer(ep, &cur, &end);
- /* Force to enter the loop again to extract buffered bytes in protector.
- The bytes could be buffered because of running out of staging_buffer.
- If this happens at the end of all slices, doing another unprotect
- avoids leaving data in the protector. */
- keep_looping = 1;
- } else if (unprotected_buffer_size_written > 0) {
- keep_looping = 1;
- } else {
- keep_looping = 0;
+ if (ep->zero_copy_protector != NULL) {
+ // Use zero-copy grpc protector to unprotect.
+ result = tsi_zero_copy_grpc_protector_unprotect(
+ exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer);
+ } else {
+ // Use frame protector to unprotect.
+ /* TODO(yangg) check error, maybe bail out early */
+ for (i = 0; i < ep->source_buffer.count; i++) {
+ grpc_slice encrypted = ep->source_buffer.slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
+ size_t message_size = GRPC_SLICE_LENGTH(encrypted);
+
+ while (message_size > 0 || keep_looping) {
+ size_t unprotected_buffer_size_written = (size_t)(end - cur);
+ size_t processed_message_size = message_size;
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_unprotect(
+ ep->protector, message_bytes, &processed_message_size, cur,
+ &unprotected_buffer_size_written);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Decryption error: %s",
+ tsi_result_to_string(result));
+ break;
+ }
+ message_bytes += processed_message_size;
+ message_size -= processed_message_size;
+ cur += unprotected_buffer_size_written;
+
+ if (cur == end) {
+ flush_read_staging_buffer(ep, &cur, &end);
+ /* Force to enter the loop again to extract buffered bytes in
+ protector. The bytes could be buffered because of running out of
+ staging_buffer. If this happens at the end of all slices, doing
+ another unprotect avoids leaving data in the protector. */
+ keep_looping = 1;
+ } else if (unprotected_buffer_size_written > 0) {
+ keep_looping = 1;
+ } else {
+ keep_looping = 0;
+ }
}
+ if (result != TSI_OK) break;
}
- if (result != TSI_OK) break;
- }
- if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
- grpc_slice_buffer_add(
- ep->read_buffer,
- grpc_slice_split_head(
- &ep->read_staging_buffer,
- (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
+ if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
+ grpc_slice_buffer_add(
+ ep->read_buffer,
+ grpc_slice_split_head(
+ &ep->read_staging_buffer,
+ (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
+ }
}
/* TODO(yangg) experiment with moving this block after read_cb to see if it
@@ -270,54 +279,62 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
}
}
- for (i = 0; i < slices->count; i++) {
- grpc_slice plain = slices->slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
- size_t message_size = GRPC_SLICE_LENGTH(plain);
- while (message_size > 0) {
- size_t protected_buffer_size_to_send = (size_t)(end - cur);
- size_t processed_message_size = message_size;
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_protect(ep->protector, message_bytes,
- &processed_message_size, cur,
- &protected_buffer_size_to_send);
- gpr_mu_unlock(&ep->protector_mu);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Encryption error: %s",
- tsi_result_to_string(result));
- break;
- }
- message_bytes += processed_message_size;
- message_size -= processed_message_size;
- cur += protected_buffer_size_to_send;
-
- if (cur == end) {
- flush_write_staging_buffer(ep, &cur, &end);
+ if (ep->zero_copy_protector != NULL) {
+ // Use zero-copy grpc protector to protect.
+ result = tsi_zero_copy_grpc_protector_protect(
+ exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer);
+ } else {
+ // Use frame protector to protect.
+ for (i = 0; i < slices->count; i++) {
+ grpc_slice plain = slices->slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
+ size_t message_size = GRPC_SLICE_LENGTH(plain);
+ while (message_size > 0) {
+ size_t protected_buffer_size_to_send = (size_t)(end - cur);
+ size_t processed_message_size = message_size;
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_protect(ep->protector, message_bytes,
+ &processed_message_size, cur,
+ &protected_buffer_size_to_send);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Encryption error: %s",
+ tsi_result_to_string(result));
+ break;
+ }
+ message_bytes += processed_message_size;
+ message_size -= processed_message_size;
+ cur += protected_buffer_size_to_send;
+
+ if (cur == end) {
+ flush_write_staging_buffer(ep, &cur, &end);
+ }
}
- }
- if (result != TSI_OK) break;
- }
- if (result == TSI_OK) {
- size_t still_pending_size;
- do {
- size_t protected_buffer_size_to_send = (size_t)(end - cur);
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_protect_flush(ep->protector, cur,
- &protected_buffer_size_to_send,
- &still_pending_size);
- gpr_mu_unlock(&ep->protector_mu);
if (result != TSI_OK) break;
- cur += protected_buffer_size_to_send;
- if (cur == end) {
- flush_write_staging_buffer(ep, &cur, &end);
+ }
+ if (result == TSI_OK) {
+ size_t still_pending_size;
+ do {
+ size_t protected_buffer_size_to_send = (size_t)(end - cur);
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_protect_flush(
+ ep->protector, cur, &protected_buffer_size_to_send,
+ &still_pending_size);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) break;
+ cur += protected_buffer_size_to_send;
+ if (cur == end) {
+ flush_write_staging_buffer(ep, &cur, &end);
+ }
+ } while (still_pending_size > 0);
+ if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
+ grpc_slice_buffer_add(
+ &ep->output_buffer,
+ grpc_slice_split_head(
+ &ep->write_staging_buffer,
+ (size_t)(cur -
+ GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
}
- } while (still_pending_size > 0);
- if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
- grpc_slice_buffer_add(
- &ep->output_buffer,
- grpc_slice_split_head(
- &ep->write_staging_buffer,
- (size_t)(cur - GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
}
}
@@ -389,13 +406,16 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_get_fd};
grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector, grpc_endpoint *transport,
- grpc_slice *leftover_slices, size_t leftover_nslices) {
+ struct tsi_frame_protector *protector,
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector,
+ grpc_endpoint *transport, grpc_slice *leftover_slices,
+ size_t leftover_nslices) {
size_t i;
secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
ep->base.vtable = &vtable;
ep->wrapped_ep = transport;
ep->protector = protector;
+ ep->zero_copy_protector = zero_copy_protector;
grpc_slice_buffer_init(&ep->leftover_bytes);
for (i = 0; i < leftover_nslices; i++) {
grpc_slice_buffer_add(&ep->leftover_bytes,
diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h
index 1c5555f3df..3323a6ff42 100644
--- a/src/core/lib/security/transport/secure_endpoint.h
+++ b/src/core/lib/security/transport/secure_endpoint.h
@@ -23,12 +23,17 @@
#include "src/core/lib/iomgr/endpoint.h"
struct tsi_frame_protector;
+struct tsi_zero_copy_grpc_protector;
extern grpc_tracer_flag grpc_trace_secure_endpoint;
-/* Takes ownership of protector and to_wrap, and refs leftover_slices. */
+/* Takes ownership of protector, zero_copy_protector, and to_wrap, and refs
+ * leftover_slices. If zero_copy_protector is not NULL, protector will never be
+ * used. */
grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector, grpc_endpoint *to_wrap,
- grpc_slice *leftover_slices, size_t leftover_nslices);
+ struct tsi_frame_protector *protector,
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector,
+ grpc_endpoint *to_wrap, grpc_slice *leftover_slices,
+ size_t leftover_nslices);
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index a7568b995f..2a9e939d40 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_server_handshaker_factory *handshaker_factory;
+ tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
- if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
+ c->client_handshaker_factory = NULL;
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc);
@@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- if (c->handshaker_factory != NULL) {
- tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
+ c->server_handshaker_factory = NULL;
gpr_free(sc);
}
@@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
- c->handshaker_factory,
+ c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name,
&tsi_hs);
@@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
- c->handshaker_factory, &tsi_hs);
+ c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
@@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
result = tsi_create_ssl_client_handshaker_factory(
has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->client_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
config->pem_root_certs, get_tsi_client_certificate_request_type(
config->client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->server_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index fc9c9f980f..3d19605617 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -32,6 +32,7 @@
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/tsi/transport_security_grpc.h"
#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
@@ -127,24 +128,36 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = arg;
- gpr_mu_lock(&h->mu);
+static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
- goto done;
+ return;
}
- // Create frame protector.
- tsi_frame_protector *protector;
- tsi_result result = tsi_handshaker_result_create_frame_protector(
- h->handshaker_result, NULL, &protector);
- if (result != TSI_OK) {
+ // Create zero-copy frame protector, if implemented.
+ tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
+ tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
+ exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
+ if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
error = grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"),
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
+ }
+ // Create frame protector if zero-copy frame protector is NULL.
+ tsi_frame_protector *protector = NULL;
+ if (zero_copy_protector == NULL) {
+ result = tsi_handshaker_result_create_frame_protector(h->handshaker_result,
+ NULL, &protector);
+ if (result != TSI_OK) {
+ error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Frame protector creation failed"),
+ result);
+ security_handshake_failed_locked(exec_ctx, h, error);
+ return;
+ }
}
// Get unused bytes.
const unsigned char *unused_bytes = NULL;
@@ -155,12 +168,12 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
if (unused_bytes_size > 0) {
grpc_slice slice =
grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size);
- h->args->endpoint =
- grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1);
+ h->args->endpoint = grpc_secure_endpoint_create(
+ protector, zero_copy_protector, h->args->endpoint, &slice, 1);
grpc_slice_unref_internal(exec_ctx, slice);
} else {
- h->args->endpoint =
- grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0);
+ h->args->endpoint = grpc_secure_endpoint_create(
+ protector, zero_copy_protector, h->args->endpoint, NULL, 0);
}
tsi_handshaker_result_destroy(h->handshaker_result);
h->handshaker_result = NULL;
@@ -177,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
-done:
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = (security_handshaker *)arg;
+ gpr_mu_lock(&h->mu);
+ on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@@ -239,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
- security_handshaker *h = user_data;
+ security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -281,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -298,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
- h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
+ h->handshake_buffer =
+ (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@@ -323,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -400,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
- security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
+ security_handshaker *h =
+ (security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@@ -450,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
- grpc_handshaker *h = gpr_malloc(sizeof(*h));
+ grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 9bf3f0ca0f..7f523c0883 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -26,7 +26,15 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+typedef enum {
+ STATE_INIT = 0,
+ STATE_DONE,
+ STATE_CANCELLED,
+} async_state;
+
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+ grpc_call_stack *owning_call;
grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
@@ -34,6 +42,8 @@ typedef struct call_data {
const grpc_metadata *consumed_md;
size_t num_consumed_md;
grpc_auth_context *auth_context;
+ grpc_closure cancel_closure;
+ gpr_atm state; // async_state
} call_data;
typedef struct channel_data {
@@ -78,54 +88,94 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md);
}
-/* called from application code */
-static void on_md_processing_done(
- void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
- const grpc_metadata *response_md, size_t num_response_md,
- grpc_status_code status, const char *error_details) {
- grpc_call_element *elem = user_data;
+static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_metadata *consumed_md,
+ size_t num_consumed_md,
+ const grpc_metadata *response_md,
+ size_t num_response_md,
+ grpc_error *error) {
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. "
"Ignoring...");
}
- grpc_error *error = GRPC_ERROR_NONE;
- if (status == GRPC_STATUS_OK) {
+ if (error == GRPC_ERROR_NONE) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
error = grpc_metadata_batch_filter(
- &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
+ exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
- } else {
- if (error_details == NULL) {
- error_details = "Authentication metadata processing failed.";
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
+ error);
+}
+
+// Called from application code.
+static void on_md_processing_done(
+ void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+ const grpc_metadata *response_md, size_t num_response_md,
+ grpc_status_code status, const char *error_details) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ // If the call was not cancelled while we were in flight, process the result.
+ if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_DONE)) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (status != GRPC_STATUS_OK) {
+ if (error_details == NULL) {
+ error_details = "Authentication metadata processing failed.";
+ }
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status);
}
- error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status);
+ on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
+ response_md, num_response_md, error);
}
+ // Clean up.
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
- GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
- error);
+ GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata");
grpc_exec_ctx_finish(&exec_ctx);
}
+static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = elem->call_data;
+ // If the result was not already processed, invoke the callback now.
+ if (error != GRPC_ERROR_NONE &&
+ gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_CANCELLED)) {
+ on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
+}
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
+ grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
+ // We're calling out to the application, so we need to make sure
+ // to drop the call combiner early if we get cancelled.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call");
+ GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->cancel_closure);
+ GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process(
@@ -159,6 +209,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ calld->call_combiner = args->call_combiner;
+ calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -218,6 +270,5 @@ const grpc_channel_filter grpc_server_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server-auth"};
diff --git a/src/core/lib/slice/b64.c b/src/core/lib/slice/b64.c
index d02f303bdb..50264719a4 100644
--- a/src/core/lib/slice/b64.c
+++ b/src/core/lib/slice/b64.c
@@ -58,7 +58,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
- char *result = gpr_malloc(result_projected_size);
+ char *result = (char *)gpr_malloc(result_projected_size);
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
@@ -75,7 +75,7 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
int url_safe, int multiline) {
- const unsigned char *data = vdata;
+ const unsigned char *data = (const unsigned char *)vdata;
const char *base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =
diff --git a/src/core/lib/slice/slice.c b/src/core/lib/slice/slice.c
index 8a8087805c..0764eda052 100644
--- a/src/core/lib/slice/slice.c
+++ b/src/core/lib/slice/slice.c
@@ -27,7 +27,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
char *grpc_slice_to_c_string(grpc_slice slice) {
- char *out = gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
+ char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
out[GRPC_SLICE_LENGTH(slice)] = 0;
return out;
@@ -105,12 +105,12 @@ typedef struct new_slice_refcount {
} new_slice_refcount;
static void new_slice_ref(void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data);
gpr_free(r);
@@ -125,7 +125,8 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data) {
grpc_slice slice;
- new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
+ new_slice_refcount *rc =
+ (new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_slice_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -133,7 +134,7 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
rc->user_data = user_data;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -154,12 +155,12 @@ typedef struct new_with_len_slice_refcount {
} new_with_len_slice_refcount;
static void new_with_len_ref(void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data, r->user_length);
gpr_free(r);
@@ -173,8 +174,8 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = {
grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t)) {
grpc_slice slice;
- new_with_len_slice_refcount *rc =
- gpr_malloc(sizeof(new_with_len_slice_refcount));
+ new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc(
+ sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_with_len_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -183,7 +184,7 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len,
rc->user_length = len;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -205,12 +206,12 @@ typedef struct {
} malloc_refcount;
static void malloc_ref(void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
gpr_ref(&r->refs);
}
static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
if (gpr_unref(&r->refs)) {
gpr_free(r);
}
@@ -232,7 +233,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
refcount is a malloc_refcount
bytes is an array of bytes of the requested length
Both parts are placed in the same allocation returned from gpr_malloc */
- malloc_refcount *rc = gpr_malloc(sizeof(malloc_refcount) + length);
+ malloc_refcount *rc =
+ (malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length);
/* Initial refcount on rc is 1 - and it's up to the caller to release
this reference. */
@@ -451,7 +453,7 @@ int grpc_slice_rchr(grpc_slice s, char c) {
int grpc_slice_chr(grpc_slice s, char c) {
const char *b = (const char *)GRPC_SLICE_START_PTR(s);
- const char *p = memchr(b, c, GRPC_SLICE_LENGTH(s));
+ const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s));
return p == NULL ? -1 : (int)(p - b);
}
diff --git a/src/core/lib/slice/slice_buffer.c b/src/core/lib/slice/slice_buffer.c
index a54a997a0d..63ffc0b00d 100644
--- a/src/core/lib/slice/slice_buffer.c
+++ b/src/core/lib/slice/slice_buffer.c
@@ -45,11 +45,12 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
sb->capacity = GROW(sb->capacity);
GPR_ASSERT(sb->capacity > slice_count);
if (sb->base_slices == sb->inlined) {
- sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ sb->base_slices =
+ (grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice));
memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
} else {
- sb->base_slices =
- gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
+ sb->base_slices = (grpc_slice *)gpr_realloc(
+ sb->base_slices, sb->capacity * sizeof(grpc_slice));
}
sb->slices = sb->base_slices + slice_offset;
@@ -291,7 +292,7 @@ void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n,
void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *src, size_t n,
void *dst) {
- char *dstp = dst;
+ char *dstp = (char *)dst;
GPR_ASSERT(src->length >= n);
while (n > 0) {
diff --git a/src/core/lib/slice/slice_hash_table.c b/src/core/lib/slice/slice_hash_table.c
index 1866ed25ac..6c2c9c201c 100644
--- a/src/core/lib/slice/slice_hash_table.c
+++ b/src/core/lib/slice/slice_hash_table.c
@@ -60,14 +60,15 @@ grpc_slice_hash_table* grpc_slice_hash_table_create(
size_t num_entries, grpc_slice_hash_table_entry* entries,
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
int (*value_cmp)(void* a, void* b)) {
- grpc_slice_hash_table* table = gpr_zalloc(sizeof(*table));
+ grpc_slice_hash_table* table =
+ (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table));
gpr_ref_init(&table->refs, 1);
table->destroy_value = destroy_value;
table->value_cmp = value_cmp;
// Keep load factor low to improve performance of lookups.
table->size = num_entries * 2;
const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
- table->entries = gpr_zalloc(entry_size);
+ table->entries = (grpc_slice_hash_table_entry*)gpr_zalloc(entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_slice_hash_table_entry* entry = &entries[i];
grpc_slice_hash_table_add(table, entry->key, entry->value);
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.c
index a6d22c1e1f..ec71b3ca1d 100644
--- a/src/core/lib/slice/slice_intern.c
+++ b/src/core/lib/slice/slice_intern.c
@@ -69,7 +69,7 @@ static uint32_t max_static_metadata_hash_probe;
static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
static void interned_slice_ref(void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
}
@@ -90,7 +90,7 @@ static void interned_slice_destroy(interned_slice_refcount *s) {
}
static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
interned_slice_destroy(s);
}
@@ -129,7 +129,8 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_BEGIN("grow_strtab", 0);
- strtab = gpr_zalloc(sizeof(interned_slice_refcount *) * capacity);
+ strtab = (interned_slice_refcount **)gpr_zalloc(
+ sizeof(interned_slice_refcount *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@@ -242,7 +243,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* not found: create a new string */
/* string data goes after the internal_string header */
- s = gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice));
+ s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
+ GRPC_SLICE_LENGTH(slice));
gpr_atm_rel_store(&s->refcnt, 1);
s->length = GRPC_SLICE_LENGTH(slice);
s->hash = hash;
@@ -280,7 +282,8 @@ void grpc_slice_intern_init(void) {
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->strs = gpr_zalloc(sizeof(*shard->strs) * shard->capacity);
+ shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) *
+ shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;
diff --git a/src/core/lib/support/block_annotate.h b/src/core/lib/support/block_annotate.h
index 0a2cb45018..8e3ef7df65 100644
--- a/src/core/lib/support/block_annotate.h
+++ b/src/core/lib/support/block_annotate.h
@@ -19,15 +19,37 @@
#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void gpr_thd_start_blocking_region();
+void gpr_thd_end_blocking_region();
+
+#ifdef __cplusplus
+}
+#endif
+
/* These annotations identify the beginning and end of regions where
the code may block for reasons other than synchronization functions.
These include poll, epoll, and getaddrinfo. */
+#ifdef GRPC_SCHEDULING_MARK_BLOCKING_REGION
+#define GRPC_SCHEDULING_START_BLOCKING_REGION \
+ do { \
+ gpr_thd_start_blocking_region(); \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION \
+ do { \
+ gpr_thd_end_blocking_region(); \
+ } while (0)
+#else
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION \
do { \
} while (0)
+#endif
#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/support/log_linux.c b/src/core/lib/support/log_linux.c
index 61d2346427..7755018693 100644
--- a/src/core/lib/support/log_linux.c
+++ b/src/core/lib/support/log_linux.c
@@ -57,7 +57,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
}
void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+ const char *final_slash;
char *prefix;
const char *display_file;
char time_buffer[64];
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index b65009754a..6b172df82f 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -276,7 +276,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs,
void gpr_string_split(const char *input, const char *sep, char ***strs,
size_t *nstrs) {
- char *next;
+ const char *next;
*strs = NULL;
*nstrs = 0;
size_t capstrs = 0;
@@ -298,3 +298,17 @@ void *gpr_memrchr(const void *s, int c, size_t n) {
}
return NULL;
}
+
+bool gpr_is_true(const char *s) {
+ size_t i;
+ if (s == NULL) {
+ return false;
+ }
+ static const char *truthy[] = {"yes", "true", "1"};
+ for (i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
+ if (0 == gpr_stricmp(s, truthy[i])) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index e11df8439d..5a56fa3a0a 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -19,6 +19,7 @@
#ifndef GRPC_CORE_LIB_SUPPORT_STRING_H
#define GRPC_CORE_LIB_SUPPORT_STRING_H
+#include <stdbool.h>
#include <stddef.h>
#include <grpc/support/port_platform.h>
@@ -106,6 +107,8 @@ int gpr_stricmp(const char *a, const char *b);
void *gpr_memrchr(const void *s, int c, size_t n);
+/** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */
+bool gpr_is_true(const char *s);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c
index 7d60b1de17..7712f560b9 100644
--- a/src/core/lib/surface/alarm.c
+++ b/src/core/lib/surface/alarm.c
@@ -44,7 +44,9 @@ static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); }
static void alarm_unref(grpc_alarm *alarm) {
if (gpr_unref(&alarm->refs)) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm");
+ if (alarm->cq != NULL) {
+ GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm");
+ }
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(alarm);
}
@@ -78,12 +80,12 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *c) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
}
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
/* We are queuing an op on completion queue. This means, the alarm's structure
cannot be destroyed until the op is dequeued. Adding an extra ref
@@ -93,12 +95,8 @@ static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
(void *)alarm, &alarm->completion);
}
-grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
- void *tag) {
- grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
- gpr_ref_init(&alarm->refs, 1);
+grpc_alarm *grpc_alarm_create(void *reserved) {
+ grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
@@ -106,27 +104,36 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
}
#endif
+ gpr_ref_init(&alarm->refs, 1);
+ grpc_timer_init_unset(&alarm->alarm);
+ alarm->cq = NULL;
+ GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
+ grpc_schedule_on_exec_ctx);
+ return alarm;
+}
+
+void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
+ gpr_timespec deadline, void *tag, void *reserved) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
GRPC_CQ_INTERNAL_REF(cq, "alarm");
alarm->cq = cq;
alarm->tag = tag;
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
- GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
- grpc_schedule_on_exec_ctx);
grpc_timer_init(&exec_ctx, &alarm->alarm,
gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
&alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_exec_ctx_finish(&exec_ctx);
- return alarm;
}
-void grpc_alarm_cancel(grpc_alarm *alarm) {
+void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_timer_cancel(&exec_ctx, &alarm->alarm);
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_alarm_destroy(grpc_alarm *alarm) {
- grpc_alarm_cancel(alarm);
+void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) {
+ grpc_alarm_cancel(alarm, reserved);
GRPC_ALARM_UNREF(alarm, "alarm_destroy");
}
diff --git a/src/core/lib/surface/byte_buffer.c b/src/core/lib/surface/byte_buffer.c
index 0bc990d487..7ed550ef87 100644
--- a/src/core/lib/surface/byte_buffer.c
+++ b/src/core/lib/surface/byte_buffer.c
@@ -32,7 +32,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices,
grpc_compression_algorithm compression) {
size_t i;
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
@@ -45,7 +46,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader) {
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
grpc_slice slice;
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 00ec9c7c9a..03f47553a1 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -32,6 +32,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/compression/algorithm_metadata.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -121,6 +122,7 @@ typedef struct batch_control {
bool is_closure;
} notify_tag;
} completion_data;
+ grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
@@ -144,15 +146,19 @@ typedef struct {
grpc_call *sibling_prev;
} child_call;
+#define RECV_NONE ((gpr_atm)0)
+#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
+
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
+ grpc_call_combiner call_combiner;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
- child_call *child_call;
+ child_call *child;
/* client or server call */
bool is_client;
@@ -170,9 +176,6 @@ struct grpc_call {
gpr_atm any_ops_sent_atm;
gpr_atm received_final_op_atm;
- /* have we received initial metadata */
- bool has_initial_md_been_received;
-
batch_control *active_batches[MAX_CONCURRENT_BATCHES];
grpc_transport_stream_op_batch_payload stream_op_payload;
@@ -183,6 +186,11 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
+ grpc_metadata compression_md;
+
+ // A char* indicating the peer name.
+ gpr_atm peer_string;
+
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
@@ -192,8 +200,12 @@ struct grpc_call {
/* Compression algorithm for *incoming* data */
grpc_compression_algorithm incoming_compression_algorithm;
+ /* Stream compression algorithm for *incoming* data */
+ grpc_stream_compression_algorithm incoming_stream_compression_algorithm;
/* Supported encodings (compression algorithms), a bitset */
uint32_t encodings_accepted_by_peer;
+ /* Supported stream encodings (stream compression algorithms), a bitset */
+ uint32_t stream_encodings_accepted_by_peer;
/* Contexts for various subsystems (security, tracing, ...). */
grpc_call_context_element context[GRPC_CONTEXT_COUNT];
@@ -226,7 +238,23 @@ struct grpc_call {
} server;
} final_op;
- void *saved_receiving_stream_ready_bctlp;
+ /* recv_state can contain one of the following values:
+ RECV_NONE : : no initial metadata and messages received
+ RECV_INITIAL_METADATA_FIRST : received initial metadata first
+ a batch_control* : received messages first
+
+ +------1------RECV_NONE------3-----+
+ | |
+ | |
+ v v
+ RECV_INITIAL_METADATA_FIRST receiving_stream_ready_bctlp
+ | ^ | ^
+ | | | |
+ +-----2-----+ +-----4-----+
+
+ For 1, 4: See receiving_initial_metadata_ready() function
+ For 2, 3: See receiving_stream_ready() function */
+ gpr_atm recv_state;
};
grpc_tracer_flag grpc_call_error_trace =
@@ -241,8 +269,9 @@ grpc_tracer_flag grpc_compression_trace =
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op);
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *op,
+ grpc_closure *start_batch_closure);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status,
const char *description);
@@ -264,11 +293,11 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled);
-static void add_init_error(grpc_error **composite, grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+static void add_init_error(grpc_error **composite, grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
- *composite = grpc_error_add_child(*composite, new);
+ *composite = grpc_error_add_child(*composite, new_err);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
@@ -278,7 +307,7 @@ void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
- p = gpr_arena_alloc(call->arena, sizeof(*p));
+ p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
@@ -301,12 +330,14 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- gpr_arena *arena =
- gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
- call = gpr_arena_alloc(arena,
- sizeof(grpc_call) + channel_stack->call_stack_size);
+ size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
+ GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
+ gpr_arena *arena = gpr_arena_create(initial_size);
+ call = (grpc_call *)gpr_arena_alloc(
+ arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
+ grpc_call_combiner_init(&call->call_combiner);
*out_call = call;
call->channel = args->channel;
call->cq = args->cq;
@@ -314,6 +345,11 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = args->server_transport_data == NULL;
+ if (call->is_client) {
+ GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx);
+ } else {
+ GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx);
+ }
call->stream_op_payload.context = call->context;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
@@ -342,24 +378,24 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
- if (args->parent_call != NULL) {
- child_call *cc = call->child_call =
- gpr_arena_alloc(arena, sizeof(child_call));
- call->child_call->parent = args->parent_call;
+ if (args->parent != NULL) {
+ child_call *cc = call->child =
+ (child_call *)gpr_arena_alloc(arena, sizeof(child_call));
+ call->child->parent = args->parent;
- GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
+ GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!args->parent_call->is_client);
+ GPR_ASSERT(!args->parent->is_client);
- parent_call *pc = get_or_create_parent_call(args->parent_call);
+ parent_call *pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
- args->parent_call->send_deadline.clock_type),
- args->parent_call->send_deadline);
+ args->parent->send_deadline.clock_type),
+ args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@@ -371,9 +407,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
"Census tracing propagation requested "
"without Census context propagation"));
}
- grpc_call_context_set(
- call, GRPC_CONTEXT_TRACING,
- args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ args->parent->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Census context propagation requested "
@@ -381,7 +417,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
- if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
+ if (gpr_atm_acq_load(&args->parent->received_final_op_atm)) {
immediately_cancel = true;
}
}
@@ -391,9 +427,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cc->sibling_next = cc->sibling_prev = call;
} else {
cc->sibling_next = pc->first_child;
- cc->sibling_prev = pc->first_child->child_call->sibling_prev;
- cc->sibling_next->child_call->sibling_prev =
- cc->sibling_prev->child_call->sibling_next = call;
+ cc->sibling_prev = pc->first_child->child->sibling_prev;
+ cc->sibling_next->child->sibling_prev =
+ cc->sibling_prev->child->sibling_next = call;
}
gpr_mu_unlock(&pc->child_list_mu);
@@ -410,7 +446,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
.path = path,
.start_time = call->start_time,
.deadline = send_deadline,
- .arena = call->arena};
+ .arena = call->arena,
+ .call_combiner = &call->call_combiner};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) {
@@ -475,8 +512,10 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
grpc_channel *channel = c->channel;
+ grpc_call_combiner_destroy(&c->call_combiner);
+ gpr_free((char *)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
}
@@ -486,7 +525,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
size_t i;
int ii;
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
GPR_TIMER_BEGIN("destroy_call", 0);
for (i = 0; i < 2; i++) {
grpc_metadata_batch_destroy(
@@ -511,7 +550,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
- get_final_status(call, set_status_value_directly, &c->final_info.final_status,
+ get_final_status(c, set_status_value_directly, &c->final_info.final_status,
NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@@ -532,7 +571,7 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
- child_call *cc = c->child_call;
+ child_call *cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
@@ -547,8 +586,8 @@ void grpc_call_unref(grpc_call *c) {
pc->first_child = NULL;
}
}
- cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
- cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
+ cc->sibling_prev->child->sibling_next = cc->sibling_next;
+ cc->sibling_next->child->sibling_prev = cc->sibling_prev;
gpr_mu_unlock(&pc->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
@@ -560,6 +599,12 @@ void grpc_call_unref(grpc_call *c) {
if (cancel) {
cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
+ } else {
+ // Unset the call combiner cancellation closure. This has the
+ // effect of scheduling the previously set cancellation closure, if
+ // any, so that it can release any internal references it may be
+ // holding to the call stack.
+ grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL);
}
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
@@ -576,30 +621,37 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return GRPC_CALL_OK;
}
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op) {
- grpc_call_element *elem;
-
- GPR_TIMER_BEGIN("execute_op", 0);
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
- GPR_TIMER_END("execute_op", 0);
+// This is called via the call combiner to start sending a batch down
+// the filter stack.
+static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
+ grpc_call *call = (grpc_call *)batch->handler_private.extra_arg;
+ GPR_TIMER_BEGIN("execute_batch", 0);
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+ GPR_TIMER_END("execute_batch", 0);
+}
+
+// start_batch_closure points to a caller-allocated closure to be used
+// for entering the call combiner.
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *batch,
+ grpc_closure *start_batch_closure) {
+ batch->handler_private.extra_arg = call;
+ GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
+ GRPC_ERROR_NONE, "executing batch");
}
char *grpc_call_get_peer(grpc_call *call) {
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result;
- GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
- result = elem->filter->get_peer(&exec_ctx, elem);
- if (result == NULL) {
- result = grpc_channel_get_target(call->channel);
- }
- if (result == NULL) {
- result = gpr_strdup("unknown");
- }
- grpc_exec_ctx_finish(&exec_ctx);
- return result;
+ char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string);
+ if (peer_string != NULL) return gpr_strdup(peer_string);
+ peer_string = grpc_channel_get_target(call->channel);
+ if (peer_string != NULL) return peer_string;
+ return gpr_strdup("unknown");
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@@ -626,20 +678,41 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return GRPC_CALL_OK;
}
-static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
+typedef struct {
+ grpc_call *call;
+ grpc_closure start_batch;
+ grpc_closure finish_batch;
+} cancel_state;
+
+// The on_complete callback used when sending a cancel_stream batch down
+// the filter stack. Yields the call combiner when the batch is done.
+static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
+ cancel_state *state = (cancel_state *)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
+ "on_complete for cancel_stream op");
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
+ gpr_free(state);
}
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
+ // Inform the call combiner of the cancellation, so that it can cancel
+ // any in-flight asynchronous actions that may be holding the call
+ // combiner. This ensures that the cancel_stream batch can be sent
+ // down the filter stack in a timely manner.
+ grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
- GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
+ cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state));
+ state->call = c;
+ GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
+ grpc_schedule_on_exec_ctx);
+ grpc_transport_stream_op_batch *op =
+ grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
- execute_op(exec_ctx, c, op);
+ execute_batch(exec_ctx, c, op, &state->start_batch);
}
static grpc_error *error_from_status(grpc_status_code status,
@@ -752,6 +825,12 @@ static void set_incoming_compression_algorithm(
call->incoming_compression_algorithm = algo;
}
+static void set_incoming_stream_compression_algorithm(
+ grpc_call *call, grpc_stream_compression_algorithm algo) {
+ GPR_ASSERT(algo < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
+ call->incoming_stream_compression_algorithm = algo;
+}
+
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call) {
grpc_compression_algorithm algorithm;
@@ -765,6 +844,13 @@ static grpc_compression_algorithm compression_algorithm_for_level_locked(
call->encodings_accepted_by_peer);
}
+static grpc_stream_compression_algorithm
+stream_compression_algorithm_for_level_locked(
+ grpc_call *call, grpc_stream_compression_level level) {
+ return grpc_stream_compression_algorithm_for_level(
+ level, call->stream_encodings_accepted_by_peer);
+}
+
uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
uint32_t flags;
flags = call->test_only_last_message_flags;
@@ -819,12 +905,70 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
(void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
}
+static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ grpc_mdelem mdel) {
+ size_t i;
+ grpc_stream_compression_algorithm algorithm;
+ grpc_slice_buffer accept_encoding_parts;
+ grpc_slice accept_encoding_slice;
+ void *accepted_user_data;
+
+ accepted_user_data =
+ grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
+ if (accepted_user_data != NULL) {
+ call->stream_encodings_accepted_by_peer =
+ (uint32_t)(((uintptr_t)accepted_user_data) - 1);
+ return;
+ }
+
+ accept_encoding_slice = GRPC_MDVALUE(mdel);
+ grpc_slice_buffer_init(&accept_encoding_parts);
+ grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
+
+ /* Always support no compression */
+ GPR_BITSET(&call->stream_encodings_accepted_by_peer,
+ GRPC_STREAM_COMPRESS_NONE);
+ for (i = 0; i < accept_encoding_parts.count; i++) {
+ grpc_slice accept_encoding_entry_slice = accept_encoding_parts.slices[i];
+ if (grpc_stream_compression_algorithm_parse(accept_encoding_entry_slice,
+ &algorithm)) {
+ GPR_BITSET(&call->stream_encodings_accepted_by_peer, algorithm);
+ } else {
+ char *accept_encoding_entry_str =
+ grpc_slice_to_c_string(accept_encoding_entry_slice);
+ gpr_log(GPR_ERROR,
+ "Invalid entry in accept encoding metadata: '%s'. Ignoring.",
+ accept_encoding_entry_str);
+ gpr_free(accept_encoding_entry_str);
+ }
+ }
+
+ grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts);
+
+ grpc_mdelem_set_user_data(
+ mdel, destroy_encodings_accepted_by_peer,
+ (void *)(((uintptr_t)call->stream_encodings_accepted_by_peer) + 1));
+}
+
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
uint32_t encodings_accepted_by_peer;
encodings_accepted_by_peer = call->encodings_accepted_by_peer;
return encodings_accepted_by_peer;
}
+uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
+ grpc_call *call) {
+ uint32_t stream_encodings_accepted_by_peer;
+ stream_encodings_accepted_by_peer = call->stream_encodings_accepted_by_peer;
+ return stream_encodings_accepted_by_peer;
+}
+
+grpc_stream_compression_algorithm
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call) {
+ return call->incoming_stream_compression_algorithm;
+}
+
static grpc_linked_mdelem *linked_from_md(const grpc_metadata *md) {
return (grpc_linked_mdelem *)&md->internal_data;
}
@@ -936,6 +1080,22 @@ static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
return algorithm;
}
+static grpc_stream_compression_algorithm decode_stream_compression(
+ grpc_mdelem md) {
+ grpc_stream_compression_algorithm algorithm =
+ grpc_stream_compression_algorithm_from_slice(GRPC_MDVALUE(md));
+ if (algorithm == GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
+ char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR,
+ "Invalid incoming stream compression algorithm: '%s'. Interpreting "
+ "incoming data as uncompressed.",
+ md_c_str);
+ gpr_free(md_c_str);
+ return GRPC_STREAM_COMPRESS_NONE;
+ }
+ return algorithm;
+}
+
static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
int is_trailing) {
if (b->list.count == 0) return;
@@ -946,8 +1106,8 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
if (dest->count + b->list.count > dest->capacity) {
dest->capacity =
GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2);
- dest->metadata =
- gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ dest->metadata = (grpc_metadata *)gpr_realloc(
+ dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) {
mdusr = &dest->metadata[dest->count++];
@@ -960,7 +1120,19 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch *b) {
- if (b->idx.named.grpc_encoding != NULL) {
+ if (b->idx.named.content_encoding != NULL) {
+ if (b->idx.named.grpc_encoding != NULL) {
+ gpr_log(GPR_ERROR,
+ "Received both content-encoding and grpc-encoding header. "
+ "Ignoring grpc-encoding.");
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
+ }
+ GPR_TIMER_BEGIN("incoming_stream_compression_algorithm", 0);
+ set_incoming_stream_compression_algorithm(
+ call, decode_stream_compression(b->idx.named.content_encoding->md));
+ GPR_TIMER_END("incoming_stream_compression_algorithm", 0);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_encoding);
+ } else if (b->idx.named.grpc_encoding != NULL) {
GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
set_incoming_compression_algorithm(
call, decode_compression(b->idx.named.grpc_encoding->md));
@@ -974,12 +1146,19 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
}
+ if (b->idx.named.accept_encoding != NULL) {
+ GPR_TIMER_BEGIN("stream_encodings_accepted_by_peer", 0);
+ set_stream_encodings_accepted_by_peer(exec_ctx, call,
+ b->idx.named.accept_encoding->md);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.accept_encoding);
+ GPR_TIMER_END("stream_encodings_accepted_by_peer", 0);
+ }
publish_app_metadata(call, b, false);
}
static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
grpc_metadata_batch *b) {
- grpc_call *call = args;
+ grpc_call *call = (grpc_call *)args;
if (b->idx.named.grpc_status != NULL) {
uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
grpc_error *error =
@@ -1063,7 +1242,8 @@ static batch_control *allocate_batch_control(grpc_call *call,
int slot = batch_slot_for_op(ops[0].op);
batch_control **pslot = &call->active_batches[slot];
if (*pslot == NULL) {
- *pslot = gpr_arena_alloc(call->arena, sizeof(batch_control));
+ *pslot =
+ (batch_control *)gpr_arena_alloc(call->arena, sizeof(batch_control));
}
batch_control *bctl = *pslot;
if (bctl->call != NULL) {
@@ -1077,7 +1257,7 @@ static batch_control *allocate_batch_control(grpc_call *call,
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
- batch_control *bctl = user_data;
+ batch_control *bctl = (batch_control *)user_data;
grpc_call *call = bctl->call;
bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
@@ -1137,7 +1317,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
child = pc->first_child;
if (child != NULL) {
do {
- next_child_call = child->child_call->sibling_next;
+ next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
@@ -1166,7 +1346,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
+ GRPC_CLOSURE_RUN(
+ exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1220,7 +1401,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
grpc_byte_stream *bs = call->receiving_stream;
bool release_error = false;
@@ -1279,7 +1460,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
if (error != GRPC_ERROR_NONE) {
if (call->receiving_stream != NULL) {
@@ -1290,19 +1471,73 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
GRPC_ERROR_REF(error));
}
- if (call->has_initial_md_been_received || error != GRPC_ERROR_NONE ||
- call->receiving_stream == NULL) {
- process_data_after_md(exec_ctx, bctlp);
- } else {
- call->saved_receiving_stream_ready_bctlp = bctlp;
+ /* If recv_state is RECV_NONE, we will save the batch_control
+ * object with rel_cas, and will not use it after the cas. Its corresponding
+ * acq_load is in receiving_initial_metadata_ready() */
+ if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL ||
+ !gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
+ process_data_after_md(exec_ctx, bctl);
}
}
+// The recv_message_ready callback used when sending a batch containing
+// a recv_message op down the filter stack. Yields the call combiner
+// before processing the received message.
+static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *bctlp,
+ grpc_error *error) {
+ batch_control *bctl = (batch_control *)bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
+ receiving_stream_ready(exec_ctx, bctlp, error);
+}
+
static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
- /* validate call->incoming_compression_algorithm */
- if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
+ /* validate compression algorithms */
+ if (call->incoming_stream_compression_algorithm !=
+ GRPC_STREAM_COMPRESS_NONE) {
+ const grpc_stream_compression_algorithm algo =
+ call->incoming_stream_compression_algorithm;
+ char *error_msg = NULL;
+ const grpc_compression_options compression_options =
+ grpc_channel_compression_options(call->channel);
+ if (algo >= GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
+ gpr_asprintf(&error_msg,
+ "Invalid stream compression algorithm value '%d'.", algo);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
+ GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ } else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
+ &compression_options, algo) == 0) {
+ /* check if algorithm is supported by current channel config */
+ const char *algo_name = NULL;
+ grpc_stream_compression_algorithm_name(algo, &algo_name);
+ gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
+ algo_name);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
+ GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ }
+ gpr_free(error_msg);
+
+ GPR_ASSERT(call->stream_encodings_accepted_by_peer != 0);
+ if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
+ call->incoming_stream_compression_algorithm)) {
+ if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ const char *algo_name = NULL;
+ grpc_stream_compression_algorithm_name(
+ call->incoming_stream_compression_algorithm, &algo_name);
+ gpr_log(
+ GPR_ERROR,
+ "Stream compression algorithm (content-encoding = '%s') not "
+ "present in the bitset of accepted encodings (accept-encodings: "
+ "'0x%x')",
+ algo_name, call->stream_encodings_accepted_by_peer);
+ }
+ }
+ } else if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
const grpc_compression_algorithm algo =
call->incoming_compression_algorithm;
char *error_msg = NULL;
@@ -1318,7 +1553,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@@ -1329,22 +1564,20 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
call->incoming_compression_algorithm = algo;
}
gpr_free(error_msg);
- }
- /* make sure the received grpc-encoding is amongst the ones listed in
- * grpc-accept-encoding */
- GPR_ASSERT(call->encodings_accepted_by_peer != 0);
- if (!GPR_BITGET(call->encodings_accepted_by_peer,
- call->incoming_compression_algorithm)) {
- if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
- grpc_compression_algorithm_name(call->incoming_compression_algorithm,
- &algo_name);
- gpr_log(GPR_ERROR,
- "Compression algorithm (grpc-encoding = '%s') not present in "
- "the bitset of accepted encodings (grpc-accept-encodings: "
- "'0x%x')",
- algo_name, call->encodings_accepted_by_peer);
+ GPR_ASSERT(call->encodings_accepted_by_peer != 0);
+ if (!GPR_BITGET(call->encodings_accepted_by_peer,
+ call->incoming_compression_algorithm)) {
+ if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ const char *algo_name = NULL;
+ grpc_compression_algorithm_name(call->incoming_compression_algorithm,
+ &algo_name);
+ gpr_log(GPR_ERROR,
+ "Compression algorithm (grpc-encoding = '%s') not present in "
+ "the bitset of accepted encodings (grpc-accept-encodings: "
+ "'0x%x')",
+ algo_name, call->encodings_accepted_by_peer);
+ }
}
}
}
@@ -1362,9 +1595,12 @@ static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
+ "recv_initial_metadata_ready");
+
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
@@ -1384,12 +1620,31 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
}
- call->has_initial_md_been_received = true;
- if (call->saved_receiving_stream_ready_bctlp != NULL) {
- grpc_closure *saved_rsr_closure = GRPC_CLOSURE_CREATE(
- receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
- grpc_schedule_on_exec_ctx);
- call->saved_receiving_stream_ready_bctlp = NULL;
+ grpc_closure *saved_rsr_closure = NULL;
+ while (true) {
+ gpr_atm rsr_bctlp = gpr_atm_acq_load(&call->recv_state);
+ /* Should only receive initial metadata once */
+ GPR_ASSERT(rsr_bctlp != 1);
+ if (rsr_bctlp == 0) {
+ /* We haven't seen initial metadata and messages before, thus initial
+ * metadata is received first.
+ * no_barrier_cas is used, as this function won't access the batch_control
+ * object saved by receiving_stream_ready() if the initial metadata is
+ * received first. */
+ if (gpr_atm_no_barrier_cas(&call->recv_state, RECV_NONE,
+ RECV_INITIAL_METADATA_FIRST)) {
+ break;
+ }
+ } else {
+ /* Already received messages */
+ saved_rsr_closure = GRPC_CLOSURE_CREATE(receiving_stream_ready,
+ (batch_control *)rsr_bctlp,
+ grpc_schedule_on_exec_ctx);
+ /* No need to modify recv_state */
+ break;
+ }
+ }
+ if (saved_rsr_closure != NULL) {
GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
}
@@ -1398,8 +1653,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
-
+ batch_control *bctl = (batch_control *)bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
@@ -1418,9 +1674,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
batch_control *bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
-
- // sent_initial_metadata guards against variable reuse.
- grpc_metadata compression_md;
+ grpc_transport_stream_op_batch *stream_op;
+ grpc_transport_stream_op_batch_payload *stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1428,11 +1683,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
if (!is_notify_tag_closure) {
GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
- grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
- free_no_op_completion, NULL,
- gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1446,9 +1702,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->completion_data.notify_tag.is_closure =
(uint8_t)(is_notify_tag_closure != 0);
- grpc_transport_stream_op_batch *stream_op = &bctl->op;
- grpc_transport_stream_op_batch_payload *stream_op_payload =
- &call->stream_op_payload;
+ stream_op = &bctl->op;
+ stream_op_payload = &call->stream_op_payload;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@@ -1458,7 +1713,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
switch (op->op) {
- case GRPC_OP_SEND_INITIAL_METADATA:
+ case GRPC_OP_SEND_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (!are_initial_metadata_flags_valid(op->flags, call->is_client)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1469,31 +1724,60 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
/* process compression level */
- memset(&compression_md, 0, sizeof(compression_md));
+ memset(&call->compression_md, 0, sizeof(call->compression_md));
size_t additional_metadata_count = 0;
- grpc_compression_level effective_compression_level;
+ grpc_compression_level effective_compression_level =
+ GRPC_COMPRESS_LEVEL_NONE;
+ grpc_stream_compression_level effective_stream_compression_level =
+ GRPC_STREAM_COMPRESS_LEVEL_NONE;
bool level_set = false;
- if (op->data.send_initial_metadata.maybe_compression_level.is_set) {
+ bool stream_compression = false;
+ if (op->data.send_initial_metadata.maybe_stream_compression_level
+ .is_set) {
+ effective_stream_compression_level =
+ op->data.send_initial_metadata.maybe_stream_compression_level
+ .level;
+ level_set = true;
+ stream_compression = true;
+ } else if (op->data.send_initial_metadata.maybe_compression_level
+ .is_set) {
effective_compression_level =
op->data.send_initial_metadata.maybe_compression_level.level;
level_set = true;
} else {
const grpc_compression_options copts =
grpc_channel_compression_options(call->channel);
- level_set = copts.default_level.is_set;
- if (level_set) {
+ if (copts.default_stream_compression_level.is_set) {
+ level_set = true;
+ effective_stream_compression_level =
+ copts.default_stream_compression_level.level;
+ stream_compression = true;
+ } else if (copts.default_level.is_set) {
+ level_set = true;
effective_compression_level = copts.default_level.level;
}
}
if (level_set && !call->is_client) {
- const grpc_compression_algorithm calgo =
- compression_algorithm_for_level_locked(
- call, effective_compression_level);
- // the following will be picked up by the compress filter and used as
- // the call's compression algorithm.
- compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
- compression_md.value = grpc_compression_algorithm_slice(calgo);
- additional_metadata_count++;
+ if (stream_compression) {
+ const grpc_stream_compression_algorithm calgo =
+ stream_compression_algorithm_for_level_locked(
+ call, effective_stream_compression_level);
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_stream_compression_algorithm_slice(calgo);
+ } else {
+ const grpc_compression_algorithm calgo =
+ compression_algorithm_for_level_locked(
+ call, effective_compression_level);
+ /* the following will be picked up by the compress filter and used
+ * as the call's compression algorithm. */
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_compression_algorithm_slice(calgo);
+ additional_metadata_count++;
+ }
}
if (op->data.send_initial_metadata.count + additional_metadata_count >
@@ -1506,7 +1790,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client,
- &compression_md, (int)additional_metadata_count)) {
+ &call->compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1518,8 +1802,13 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
op->flags;
+ if (call->is_client) {
+ stream_op_payload->send_initial_metadata.peer_string =
+ &call->peer_string;
+ }
break;
- case GRPC_OP_SEND_MESSAGE:
+ }
+ case GRPC_OP_SEND_MESSAGE: {
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
goto done_with_error;
@@ -1548,7 +1837,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_message.send_message =
&call->sending_stream.base;
break;
- case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ }
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1567,7 +1857,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ }
+ case GRPC_OP_SEND_STATUS_FROM_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1629,7 +1920,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_RECV_INITIAL_METADATA:
+ }
+ case GRPC_OP_RECV_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1650,9 +1942,14 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready;
+ if (!call->is_client) {
+ stream_op_payload->recv_initial_metadata.peer_string =
+ &call->peer_string;
+ }
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_MESSAGE:
+ }
+ case GRPC_OP_RECV_MESSAGE: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1666,13 +1963,15 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream;
- GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
- bctl, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&call->receiving_stream_ready,
+ receiving_stream_ready_in_call_combiner, bctl,
+ grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ }
+ case GRPC_OP_RECV_STATUS_ON_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1699,7 +1998,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
- case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ }
+ case GRPC_OP_RECV_CLOSE_ON_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1723,6 +2023,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
+ }
}
}
@@ -1737,7 +2038,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
- execute_op(exec_ctx, call, stream_op);
+ execute_batch(exec_ctx, call, stream_op, &bctl->start_batch);
done:
GPR_TIMER_END("grpc_call_start_batch", 0);
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 185bfccb77..c680139cf6 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_LIB_SURFACE_CALL_H
#define GRPC_CORE_LIB_SURFACE_CALL_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/surface/api_trace.h"
@@ -26,10 +30,6 @@
#include <grpc/grpc.h>
#include <grpc/impl/codegen/compression_types.h>
-#ifdef __cplusplus
-extern "C" {
-#endif
-
typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
@@ -37,7 +37,7 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
typedef struct grpc_call_create_args {
grpc_channel *channel;
- grpc_call *parent_call;
+ grpc_call *parent;
uint32_t propagation_mask;
grpc_completion_queue *cq;
@@ -89,7 +89,7 @@ grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
-void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c
index 4443aba58a..4a1c265817 100644
--- a/src/core/lib/surface/call_log_batch.c
+++ b/src/core/lib/surface/call_log_batch.c
@@ -103,7 +103,7 @@ char *grpc_op_string(const grpc_op *op) {
return out;
}
-void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag) {
char *tmp;
diff --git a/src/core/lib/surface/call_test_only.h b/src/core/lib/surface/call_test_only.h
index 2f1b80bfd7..a5a01b3679 100644
--- a/src/core/lib/surface/call_test_only.h
+++ b/src/core/lib/surface/call_test_only.h
@@ -42,6 +42,18 @@ uint32_t grpc_call_test_only_get_message_flags(grpc_call *call);
* To be indexed by grpc_compression_algorithm enum values. */
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+/** Returns a bitset for the stream encodings (stream compression algorithms)
+ * supported by \a call's peer.
+ *
+ * To be indexed by grpc_stream_compression_algorithm enum values. */
+uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
+ grpc_call *call);
+
+/** Returns the incoming stream compression algorithm (content-encoding header)
+ * received by a call. */
+grpc_stream_compression_algorithm
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 5780a18ce8..48962e5e45 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -27,6 +27,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -77,6 +78,11 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
+ if (channel_stack_type == GRPC_SERVER_CHANNEL) {
+ GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
+ } else {
+ GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
+ }
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);
@@ -142,6 +148,16 @@ grpc_channel *grpc_channel_create_with_builder(
GRPC_COMPRESS_LEVEL_NONE,
GRPC_COMPRESS_LEVEL_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
+ channel->compression_options.default_stream_compression_level.is_set =
+ true;
+ channel->compression_options.default_stream_compression_level.level =
+ (grpc_stream_compression_level)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){GRPC_STREAM_COMPRESS_LEVEL_NONE,
+ GRPC_STREAM_COMPRESS_LEVEL_NONE,
+ GRPC_STREAM_COMPRESS_LEVEL_COUNT - 1});
+ } else if (0 == strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
channel->compression_options.default_algorithm.is_set = true;
channel->compression_options.default_algorithm.algorithm =
@@ -149,12 +165,31 @@ grpc_channel *grpc_channel_create_with_builder(
&args->args[i],
(grpc_integer_options){GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
GRPC_COMPRESS_ALGORITHMS_COUNT - 1});
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
+ channel->compression_options.default_stream_compression_algorithm.is_set =
+ true;
+ channel->compression_options.default_stream_compression_algorithm
+ .algorithm =
+ (grpc_stream_compression_algorithm)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ GRPC_STREAM_COMPRESS_NONE, GRPC_STREAM_COMPRESS_NONE,
+ GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT - 1});
} else if (0 ==
strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
channel->compression_options.enabled_algorithms_bitset =
(uint32_t)args->args[i].value.integer |
0x1; /* always support no compression */
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
+ channel->compression_options
+ .enabled_stream_compression_algorithms_bitset =
+ (uint32_t)args->args[i].value.integer |
+ 0x1; /* always support no compression */
}
}
@@ -247,7 +282,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
- args.parent_call = parent_call;
+ args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
@@ -298,7 +333,7 @@ grpc_call *grpc_channel_create_pollset_set_call(
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved) {
- registered_call *rc = gpr_malloc(sizeof(registered_call));
+ registered_call *rc = (registered_call *)gpr_malloc(sizeof(registered_call));
GRPC_API_TRACE(
"grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
4, (channel, method, host, reserved));
@@ -325,7 +360,7 @@ grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved) {
- registered_call *rc = registered_call_handle;
+ registered_call *rc = (registered_call *)registered_call_handle;
GRPC_API_TRACE(
"grpc_channel_create_registered_call("
"channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
@@ -363,7 +398,7 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_channel *channel = arg;
+ grpc_channel *channel = (grpc_channel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
diff --git a/src/core/lib/surface/channel_init.c b/src/core/lib/surface/channel_init.c
index a1391ffe56..33f444b89e 100644
--- a/src/core/lib/surface/channel_init.c
+++ b/src/core/lib/surface/channel_init.c
@@ -53,9 +53,9 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
GPR_ASSERT(!g_finalized);
if (g_slots[type].cap_slots == g_slots[type].num_slots) {
g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
- g_slots[type].slots =
- gpr_realloc(g_slots[type].slots,
- g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
+ g_slots[type].slots = (stage_slot *)gpr_realloc(
+ g_slots[type].slots,
+ g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
}
stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
s->insertion_order = g_slots[type].num_slots;
@@ -65,8 +65,8 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
}
static int compare_slots(const void *a, const void *b) {
- const stage_slot *sa = a;
- const stage_slot *sb = b;
+ const stage_slot *sa = (const stage_slot *)a;
+ const stage_slot *sb = (const stage_slot *)b;
int c = GPR_ICMP(sa->priority, sb->priority);
if (c != 0) return c;
@@ -85,7 +85,7 @@ void grpc_channel_init_finalize(void) {
void grpc_channel_init_shutdown(void) {
for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
gpr_free(g_slots[i].slots);
- g_slots[i].slots = (void *)(uintptr_t)0xdeadbeef;
+ g_slots[i].slots = (stage_slot *)(void *)(uintptr_t)0xdeadbeef;
}
}
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
index e85b308850..f45b568958 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.c
@@ -39,7 +39,7 @@ static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
}
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- ping_result *pr = arg;
+ ping_result *pr = (ping_result *)arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy,
pr, &pr->completion_storage);
}
@@ -49,7 +49,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
(channel, cq, tag, reserved));
grpc_transport_op *op = grpc_make_transport_op(NULL);
- ping_result *pr = gpr_malloc(sizeof(*pr));
+ ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr));
grpc_channel_element *top_elem =
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index c20cfbc740..fed66e3a20 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -26,6 +26,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@@ -54,7 +55,7 @@ typedef struct {
bool can_listen;
size_t (*size)(void);
void (*init)(grpc_pollset *pollset, gpr_mu **mu);
- grpc_error *(*kick)(grpc_pollset *pollset,
+ grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
@@ -130,7 +131,8 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
}
static grpc_error *non_polling_poller_kick(
- grpc_pollset *pollset, grpc_pollset_worker *specific_worker) {
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
non_polling_poller *p = (non_polling_poller *)pollset;
if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
if (specific_worker != NULL) {
@@ -327,25 +329,12 @@ static void cq_destroy_pluck(void *data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
/* GRPC_CQ_NEXT */
- {.data_size = sizeof(cq_next_data),
- .cq_completion_type = GRPC_CQ_NEXT,
- .init = cq_init_next,
- .shutdown = cq_shutdown_next,
- .destroy = cq_destroy_next,
- .begin_op = cq_begin_op_for_next,
- .end_op = cq_end_op_for_next,
- .next = cq_next,
- .pluck = NULL},
+ {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
+ cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL},
/* GRPC_CQ_PLUCK */
- {.data_size = sizeof(cq_pluck_data),
- .cq_completion_type = GRPC_CQ_PLUCK,
- .init = cq_init_pluck,
- .shutdown = cq_shutdown_pluck,
- .destroy = cq_destroy_pluck,
- .begin_op = cq_begin_op_for_pluck,
- .end_op = cq_end_op_for_pluck,
- .next = NULL,
- .pluck = cq_pluck},
+ {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
+ cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL,
+ cq_pluck},
};
#define DATA_FROM_CQ(cq) ((void *)(cq + 1))
@@ -420,8 +409,13 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
- cq = gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
- poller_vtable->size());
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
+ vtable->data_size +
+ poller_vtable->size());
cq->vtable = vtable;
cq->poller_vtable = poller_vtable;
@@ -441,7 +435,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
}
static void cq_init_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->shutdown_called = false;
@@ -450,13 +444,13 @@ static void cq_init_next(void *ptr) {
}
static void cq_destroy_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0);
cq_event_queue_destroy(&cqd->queue);
}
static void cq_init_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->completed_tail = &cqd->completed_head;
@@ -468,7 +462,7 @@ static void cq_init_pluck(void *ptr) {
}
static void cq_destroy_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head);
}
@@ -501,7 +495,7 @@ void grpc_cq_internal_ref(grpc_completion_queue *cq) {
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_completion_queue *cq = arg;
+ grpc_completion_queue *cq = (grpc_completion_queue *)arg;
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy");
}
@@ -559,13 +553,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
* true if the increment was successful; false if the counter is zero */
static bool atm_inc_if_nonzero(gpr_atm *counter) {
while (true) {
- gpr_atm count = gpr_atm_no_barrier_load(counter);
+ gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
* increment) to maintain the contract: do not increment the counter if it
* is zero. */
if (count == 0) {
return false;
- } else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) {
+ } else if (gpr_atm_full_cas(counter, count, count + 1)) {
break;
}
}
@@ -574,12 +568,12 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
}
static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
@@ -588,9 +582,9 @@ bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) {
gpr_mu_lock(cq->mu);
if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
- cq->outstanding_tags =
- gpr_realloc(cq->outstanding_tags, sizeof(*cq->outstanding_tags) *
- cq->outstanding_tag_capacity);
+ cq->outstanding_tags = (void **)gpr_realloc(
+ cq->outstanding_tags,
+ sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity);
}
cq->outstanding_tags[cq->outstanding_tag_count++] = tag;
gpr_mu_unlock(cq->mu);
@@ -624,7 +618,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
}
}
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@@ -637,15 +631,19 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Add the completion to the queue */
bool is_first = cq_event_queue_push(&cqd->queue, storage);
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
- bool will_definitely_shutdown =
- gpr_atm_no_barrier_load(&cqd->pending_events) == 1;
+
+ /* Since we do not hold the cq lock here, it is important to do an 'acquire'
+ load here (instead of a 'no_barrier' load) to match with the release store
+ (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
+ */
+ bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
if (!will_definitely_shutdown) {
/* Only kick if this is the first item queued */
if (is_first) {
gpr_mu_lock(cq->mu);
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
@@ -685,7 +683,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
void *done_arg,
grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@@ -731,7 +729,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
@@ -766,9 +764,9 @@ typedef struct {
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -819,7 +817,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
gpr_timespec now;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -882,7 +880,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
}
- if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) {
+ if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
/* Before returning, check if the queue has any items left over (since
gpr_mpscq_pop() can sometimes return NULL even if the queue is not
empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
@@ -928,9 +926,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
if (cq_event_queue_num_items(&cqd->queue) > 0 &&
- gpr_atm_no_barrier_load(&cqd->pending_events) > 0) {
+ gpr_atm_acq_load(&cqd->pending_events) > 0) {
gpr_mu_lock(cq->mu);
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
}
@@ -952,7 +950,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
this function */
static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@@ -963,7 +961,7 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@@ -976,10 +974,12 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
if (cqd->shutdown_called) {
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
- GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cqd->shutdown_called = true;
+ /* Doing a full_fetch_add (i.e acq/release) here to match with
+ * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
+ * on this counter without necessarily holding a lock on cq */
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
cq_finish_shutdown_next(exec_ctx, cq);
}
@@ -994,7 +994,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
static int add_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@@ -1006,7 +1006,7 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
static void del_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@@ -1018,9 +1018,9 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -1057,7 +1057,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1181,7 +1181,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@@ -1195,7 +1195,7 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
* merging them is a bit tricky and probably not worth it */
static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
@@ -1208,7 +1208,6 @@ static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
if (cqd->shutdown_called) {
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)");
- GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cqd->shutdown_called = true;
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index d199ac060e..b089da2c54 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -28,12 +28,15 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/handshaker_registry.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
+#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/alarm_internal.h"
@@ -118,6 +121,7 @@ void grpc_init(void) {
gpr_mu_lock(&g_init_mu);
if (++g_initializations == 1) {
gpr_time_init();
+ grpc_stats_init();
grpc_slice_intern_init();
grpc_mdctx_global_init();
grpc_channel_init_init();
@@ -127,6 +131,7 @@ void grpc_init(void) {
grpc_register_tracer(&grpc_trace_channel_stack_builder);
grpc_register_tracer(&grpc_http1_trace);
grpc_register_tracer(&grpc_cq_pluck_trace); // default on
+ grpc_register_tracer(&grpc_call_combiner_trace);
grpc_register_tracer(&grpc_combiner_trace);
grpc_register_tracer(&grpc_server_channel_trace);
grpc_register_tracer(&grpc_bdp_estimator_trace);
@@ -175,17 +180,20 @@ void grpc_shutdown(void) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_iomgr_shutdown(&exec_ctx);
- gpr_timers_global_destroy();
- grpc_tracer_shutdown();
+ grpc_executor_shutdown(&exec_ctx);
+ grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
+ grpc_iomgr_shutdown(&exec_ctx);
+ gpr_timers_global_destroy();
+ grpc_tracer_shutdown();
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
grpc_slice_intern_shutdown();
+ grpc_stats_shutdown();
}
gpr_mu_unlock(&g_init_mu);
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index a0791080a9..6286f9159d 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -40,6 +40,7 @@ namespace grpc_core {
namespace {
struct CallData {
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata;
@@ -52,14 +53,14 @@ struct ChannelData {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) {
- CallData *calld = static_cast<CallData *>(elem->call_data);
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
- ChannelData *chand = static_cast<ChannelData *>(elem->channel_data);
+ ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@@ -79,6 +80,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void lame_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@@ -87,12 +89,8 @@ static void lame_start_transport_stream_op_batch(
op->payload->recv_trailing_metadata.recv_trailing_metadata);
}
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
-}
-
-static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return NULL;
+ exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
+ calld->call_combiner);
}
static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
@@ -122,6 +120,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -156,7 +156,6 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
sizeof(grpc_core::ChannelData),
grpc_core::init_channel_elem,
grpc_core::destroy_channel_elem,
- grpc_core::lame_get_peer,
grpc_core::lame_get_channel_info,
"lame-client",
};
@@ -176,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
- auto chand = static_cast<grpc_core::ChannelData *>(elem->channel_data);
+ auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 66dcc299aa..1d0fd472d0 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -75,7 +76,7 @@ typedef struct requested_call {
grpc_call_details *details;
} batch;
struct {
- registered_method *registered_method;
+ registered_method *method;
gpr_timespec *deadline;
grpc_byte_buffer **optional_payload;
} registered;
@@ -144,7 +145,7 @@ struct call_data {
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
- request_matcher *request_matcher;
+ request_matcher *matcher;
grpc_byte_buffer *payload;
grpc_closure got_initial_metadata;
@@ -170,7 +171,7 @@ struct registered_method {
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
- request_matcher request_matcher;
+ request_matcher matcher;
registered_method *next;
};
@@ -250,7 +251,8 @@ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
count++;
}
cb->num_channels = count;
- cb->channels = gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ cb->channels =
+ (grpc_channel **)gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
count = 0;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
cb->channels[count++] = c->channel;
@@ -265,14 +267,15 @@ struct shutdown_cleanup_args {
static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- struct shutdown_cleanup_args *a = arg;
+ struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg;
grpc_slice_unref_internal(exec_ctx, a->slice);
gpr_free(a);
}
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
bool send_goaway, grpc_error *send_disconnect) {
- struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
+ struct shutdown_cleanup_args *sc =
+ (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc));
GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
@@ -314,8 +317,8 @@ static void request_matcher_init(request_matcher *rm, size_t entries,
grpc_server *server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq =
- gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count);
+ rm->requests_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
}
@@ -331,7 +334,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
- grpc_call_unref(grpc_call_from_top_element(elem));
+ grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
@@ -384,7 +387,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
if (server->started) {
- request_matcher_destroy(&rm->request_matcher);
+ request_matcher_destroy(&rm->matcher);
}
gpr_free(rm->method);
gpr_free(rm->host);
@@ -426,7 +429,7 @@ static void orphan_channel(channel_data *chand) {
static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
@@ -459,7 +462,7 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
grpc_cq_completion *c) {
- requested_call *rc = req;
+ requested_call *rc = (requested_call *)req;
grpc_server *server = rc->server;
if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
@@ -505,7 +508,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
@@ -513,10 +516,10 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *call_elem = arg;
- call_data *calld = call_elem->call_data;
- channel_data *chand = call_elem->channel_data;
- request_matcher *rm = calld->request_matcher;
+ grpc_call_element *call_elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)call_elem->call_data;
+ channel_data *chand = (channel_data *)call_elem->channel_data;
+ request_matcher *rm = calld->matcher;
grpc_server *server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
@@ -538,6 +541,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@@ -548,6 +552,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
+ GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@@ -566,7 +571,7 @@ static void finish_start_new_rpc(
grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem,
request_matcher *rm,
grpc_server_register_method_payload_handling payload_handling) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_lock(&calld->mu_state);
@@ -578,7 +583,7 @@ static void finish_start_new_rpc(
return;
}
- calld->request_matcher = rm;
+ calld->matcher = rm;
switch (payload_handling) {
case GRPC_SRM_PAYLOAD_NONE:
@@ -599,8 +604,8 @@ static void finish_start_new_rpc(
}
static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_server *server = chand->server;
uint32_t i;
uint32_t hash;
@@ -624,7 +629,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -642,7 +647,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -663,7 +668,7 @@ static int num_listeners(grpc_server *server) {
static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
grpc_cq_completion *completion) {
- server_unref(exec_ctx, server);
+ server_unref(exec_ctx, (grpc_server *)server);
}
static int num_channels(grpc_server *server) {
@@ -686,9 +691,9 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &server->unregistered_request_matcher);
for (registered_method *rm = server->registered_methods; rm;
rm = rm->next) {
- request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher,
+ request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
- request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
}
}
GRPC_ERROR_UNREF(error);
@@ -732,8 +737,8 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
gpr_timespec op_deadline;
if (error == GRPC_ERROR_NONE) {
@@ -771,7 +776,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL);
@@ -789,15 +794,14 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
@@ -823,7 +827,7 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
grpc_transport *transport,
const void *transport_server_data) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
/* create a call */
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
@@ -839,7 +843,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
GRPC_ERROR_UNREF(error);
return;
}
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
@@ -853,7 +857,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op *op = grpc_make_transport_op(NULL);
@@ -874,8 +878,8 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
memset(calld, 0, sizeof(call_data));
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
calld->call = grpc_call_from_top_element(elem);
@@ -892,8 +896,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_ASSERT(calld->state != PENDING);
@@ -914,7 +918,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
chand->server = NULL;
@@ -931,7 +935,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
size_t i;
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
@@ -962,7 +966,6 @@ const grpc_channel_filter grpc_server_top_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server",
};
@@ -978,8 +981,8 @@ static void register_completion_queue(grpc_server *server,
GRPC_CQ_INTERNAL_REF(cq, "server");
n = server->cq_count++;
- server->cqs = gpr_realloc(server->cqs,
- server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs = (grpc_completion_queue **)gpr_realloc(
+ server->cqs, server->cq_count * sizeof(grpc_completion_queue *));
server->cqs[n] = cq;
}
@@ -1004,7 +1007,7 @@ void grpc_server_register_completion_queue(grpc_server *server,
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- grpc_server *server = gpr_zalloc(sizeof(grpc_server));
+ grpc_server *server = (grpc_server *)gpr_zalloc(sizeof(grpc_server));
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
@@ -1055,7 +1058,7 @@ void *grpc_server_register_method(
flags);
return NULL;
}
- m = gpr_zalloc(sizeof(registered_method));
+ m = (registered_method *)gpr_zalloc(sizeof(registered_method));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -1067,7 +1070,7 @@ void *grpc_server_register_method(
static void start_listeners(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
for (listener *l = server->listeners; l; l = l->next) {
l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count);
}
@@ -1088,11 +1091,12 @@ void grpc_server_start(grpc_server *server) {
server->started = true;
server->pollset_count = 0;
- server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
- server->request_freelist_per_cq =
- gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq =
- gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
+ server->pollsets =
+ (grpc_pollset **)gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
+ server->request_freelist_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*server->request_freelist_per_cq) * server->cq_count);
+ server->requested_calls_per_cq = (requested_call **)gpr_malloc(
+ sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
@@ -1103,22 +1107,24 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
- server->requested_calls_per_cq[i] =
- gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
+ server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
+ (size_t)server->max_requested_calls_per_cq *
+ sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->request_matcher,
+ request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
server_ref(server);
server->starting = true;
- GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
- grpc_executor_scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(start_listeners, server,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1173,7 +1179,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
if (num_registered_methods > 0) {
slots = 2 * num_registered_methods;
alloc = sizeof(channel_registered_method) * slots;
- chand->registered_methods = gpr_zalloc(alloc);
+ chand->registered_methods = (channel_registered_method *)gpr_zalloc(alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
grpc_slice host;
bool has_host;
@@ -1234,7 +1240,7 @@ void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
maybe_finish_shutdown(exec_ctx, server);
@@ -1261,14 +1267,15 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
- grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
- NULL, gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
- server->shutdown_tags =
- gpr_realloc(server->shutdown_tags,
- sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
+ server->shutdown_tags = (shutdown_tag *)gpr_realloc(
+ server->shutdown_tags,
+ sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
@@ -1351,7 +1358,7 @@ void grpc_server_add_listener(
grpc_pollset **pollsets, size_t pollset_count),
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
grpc_closure *on_done)) {
- listener *l = gpr_malloc(sizeof(listener));
+ listener *l = (listener *)gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
l->destroy = destroy;
@@ -1384,7 +1391,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- rm = &rc->data.registered.registered_method->request_matcher;
+ rm = &rc->data.registered.method->matcher;
break;
}
server->requested_calls_per_cq[cq_idx][request_id] = *rc;
@@ -1428,7 +1435,8 @@ grpc_call_error grpc_server_request_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1473,8 +1481,9 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
- registered_method *rm = rmp;
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ registered_method *rm = (registered_method *)rmp;
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1511,7 +1520,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->call = call;
- rc->data.registered.registered_method = rm;
+ rc->data.registered.method = rm;
rc->data.registered.deadline = deadline;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index 96c16105e7..fd6ea4daa9 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -21,6 +21,6 @@
#include <grpc/grpc.h>
-const char *grpc_version_string(void) { return "4.0.0-dev"; }
+const char *grpc_version_string(void) { return "5.0.0-dev"; }
const char *grpc_g_stands_for(void) { return "gambit"; }
diff --git a/src/core/lib/transport/byte_stream.c b/src/core/lib/transport/byte_stream.c
index fb03a10315..08f61629a9 100644
--- a/src/core/lib/transport/byte_stream.c
+++ b/src/core/lib/transport/byte_stream.c
@@ -85,6 +85,7 @@ static void slice_buffer_stream_shutdown(grpc_exec_ctx *exec_ctx,
static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *byte_stream) {
grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer);
GRPC_ERROR_UNREF(stream->shutdown_error);
}
diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h
index 1e1e8310b8..be2a35213e 100644
--- a/src/core/lib/transport/byte_stream.h
+++ b/src/core/lib/transport/byte_stream.h
@@ -81,7 +81,9 @@ void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
// grpc_slice_buffer_stream
//
-// A grpc_byte_stream that wraps a slice buffer.
+// A grpc_byte_stream that wraps a slice buffer. The stream takes
+// ownership of the slices in the buffer, and on destruction will
+// reset the contents of the buffer.
typedef struct grpc_slice_buffer_stream {
grpc_byte_stream base;
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 73a9178ae2..f328a6cdbb 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -148,7 +148,8 @@ bool grpc_connectivity_state_notify_on_state_change(
GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
- grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
+ grpc_connectivity_state_watcher *w =
+ (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
w->current = current;
w->notify = notify;
w->next = tracker->watchers;
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 2fea366072..188b485625 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -117,7 +117,8 @@ void grpc_mdctx_global_init(void) {
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->elems = gpr_zalloc(sizeof(*shard->elems) * shard->capacity);
+ shard->elems = (interned_metadata **)gpr_zalloc(sizeof(*shard->elems) *
+ shard->capacity);
}
}
@@ -204,7 +205,8 @@ static void grow_mdtab(mdtab_shard *shard) {
GPR_TIMER_BEGIN("grow_mdtab", 0);
- mdtab = gpr_zalloc(sizeof(interned_metadata *) * capacity);
+ mdtab =
+ (interned_metadata **)gpr_zalloc(sizeof(interned_metadata *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
@@ -243,7 +245,8 @@ grpc_mdelem grpc_mdelem_create(
GRPC_MDELEM_STORAGE_EXTERNAL);
}
- allocated_metadata *allocated = gpr_malloc(sizeof(*allocated));
+ allocated_metadata *allocated =
+ (allocated_metadata *)gpr_malloc(sizeof(*allocated));
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
@@ -292,7 +295,7 @@ grpc_mdelem grpc_mdelem_create(
}
/* not found: create a new pair */
- md = gpr_malloc(sizeof(interned_metadata));
+ md = (interned_metadata *)gpr_malloc(sizeof(interned_metadata));
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c
index 8f24b8527c..54388bdcda 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.c
@@ -105,6 +105,7 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
return GRPC_ERROR_NONE;
}
if (batch->idx.array[idx] == NULL) {
+ if (grpc_static_callout_is_default[idx]) ++batch->list.default_count;
batch->idx.array[idx] = storage;
return GRPC_ERROR_NONE;
}
@@ -120,6 +121,7 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch,
if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
return;
}
+ if (grpc_static_callout_is_default[idx]) --batch->list.default_count;
GPR_ASSERT(batch->idx.array[idx] != NULL);
batch->idx.array[idx] = NULL;
}
@@ -231,32 +233,32 @@ void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *storage,
grpc_slice value) {
- grpc_mdelem old = storage->md;
- grpc_mdelem new = grpc_mdelem_from_slices(
- exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old)), value);
- storage->md = new;
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ grpc_mdelem old_mdelem = storage->md;
+ grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
+ storage->md = new_mdelem;
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
}
grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
- grpc_mdelem new) {
+ grpc_mdelem new_mdelem) {
assert_valid_callouts(exec_ctx, batch);
grpc_error *error = GRPC_ERROR_NONE;
- grpc_mdelem old = storage->md;
- if (!grpc_slice_eq(GRPC_MDKEY(new), GRPC_MDKEY(old))) {
+ grpc_mdelem old_mdelem = storage->md;
+ if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
maybe_unlink_callout(batch, storage);
- storage->md = new;
+ storage->md = new_mdelem;
error = maybe_link_callout(batch, storage);
if (error != GRPC_ERROR_NONE) {
unlink_storage(&batch->list, storage);
GRPC_MDELEM_UNREF(exec_ctx, storage->md);
}
} else {
- storage->md = new;
+ storage->md = new_mdelem;
}
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
assert_valid_callouts(exec_ctx, batch);
return error;
}
@@ -300,12 +302,12 @@ grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
grpc_error *error = GRPC_ERROR_NONE;
while (l) {
grpc_linked_mdelem *next = l->next;
- grpc_filtered_mdelem new = func(exec_ctx, user_data, l->md);
- add_error(&error, new.error, composite_error_string);
- if (GRPC_MDISNULL(new.md)) {
+ grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
+ add_error(&error, new_mdelem.error, composite_error_string);
+ if (GRPC_MDISNULL(new_mdelem.md)) {
grpc_metadata_batch_remove(exec_ctx, batch, l);
- } else if (new.md.payload != l->md.payload) {
- grpc_metadata_batch_substitute(exec_ctx, batch, l, new.md);
+ } else if (new_mdelem.md.payload != l->md.payload) {
+ grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md);
}
l = next;
}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 1b11a3e252..57d298c75c 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -41,6 +41,7 @@ typedef struct grpc_linked_mdelem {
typedef struct grpc_mdelem_list {
size_t count;
+ size_t default_count; // Number of default keys.
grpc_linked_mdelem *head;
grpc_linked_mdelem *tail;
} grpc_mdelem_list;
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.c
index 0379d0010d..070a13a2b4 100644
--- a/src/core/lib/transport/service_config.c
+++ b/src/core/lib/transport/service_config.c
@@ -59,7 +59,8 @@ struct grpc_service_config {
};
grpc_service_config* grpc_service_config_create(const char* json_string) {
- grpc_service_config* service_config = gpr_malloc(sizeof(*service_config));
+ grpc_service_config* service_config =
+ (grpc_service_config*)gpr_malloc(sizeof(*service_config));
service_config->json_string = gpr_strdup(json_string);
service_config->json_tree =
grpc_json_parse_string(service_config->json_string);
@@ -198,7 +199,8 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
num_entries += count_names_in_method_config_json(method);
}
// Populate method config table entries.
- entries = gpr_malloc(num_entries * sizeof(grpc_slice_hash_table_entry));
+ entries = (grpc_slice_hash_table_entry*)gpr_malloc(
+ num_entries * sizeof(grpc_slice_hash_table_entry));
size_t idx = 0;
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
@@ -230,7 +232,7 @@ void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
- char* buf = gpr_malloc(len + 2); // '*' and NUL
+ char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 2388f19f81..472cf888ea 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -40,65 +40,68 @@ static uint8_t g_bytes[] = {
114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99,
45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116,
114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116,
- 45, 116, 121, 112, 101, 103, 114, 112, 99, 45, 105, 110, 116, 101, 114,
- 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, 101,
- 113, 117, 101, 115, 116, 117, 115, 101, 114, 45, 97, 103, 101, 110, 116,
- 104, 111, 115, 116, 108, 98, 45, 116, 111, 107, 101, 110, 103, 114, 112,
- 99, 45, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46, 119,
- 97, 105, 116, 95, 102, 111, 114, 95, 114, 101, 97, 100, 121, 103, 114,
- 112, 99, 46, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46,
- 109, 97, 120, 95, 114, 101, 113, 117, 101, 115, 116, 95, 109, 101, 115,
- 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 103, 114, 112, 99, 46,
- 109, 97, 120, 95, 114, 101, 115, 112, 111, 110, 115, 101, 95, 109, 101,
- 115, 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 47, 103, 114, 112,
- 99, 46, 108, 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108,
- 97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111,
- 97, 100, 48, 49, 50, 105, 100, 101, 110, 116, 105, 116, 121, 103, 122,
- 105, 112, 100, 101, 102, 108, 97, 116, 101, 116, 114, 97, 105, 108, 101,
- 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103,
- 114, 112, 99, 80, 79, 83, 84, 50, 48, 48, 52, 48, 52, 104, 116,
- 116, 112, 104, 116, 116, 112, 115, 103, 114, 112, 99, 71, 69, 84, 80,
- 85, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104, 116, 109, 108, 50,
- 48, 52, 50, 48, 54, 51, 48, 52, 52, 48, 48, 53, 48, 48, 97,
- 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115, 101, 116, 97, 99,
- 99, 101, 112, 116, 45, 101, 110, 99, 111, 100, 105, 110, 103, 103, 122,
- 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101, 97, 99, 99, 101,
- 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 97, 99, 99, 101,
- 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99, 99, 101, 112, 116,
- 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116, 114, 111, 108, 45,
- 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105, 110, 97, 103, 101,
- 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116,
- 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111, 110, 116, 114, 111,
- 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105, 115, 112, 111, 115,
- 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
- 99, 111, 100, 105, 110, 103, 99, 111, 110, 116, 101, 110, 116, 45, 108,
- 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, 116, 45,
- 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, 45, 108,
- 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45,
- 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, 116, 101,
- 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, 105, 114,
- 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, 104, 105,
- 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99,
- 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, 104, 105,
- 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, 111, 100,
- 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, 115, 116,
- 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 98, 45, 99, 111, 115,
- 116, 45, 98, 105, 110, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105,
- 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112,
- 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
- 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105,
- 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101,
- 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121,
- 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116,
- 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116,
- 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105,
- 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111,
- 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45,
- 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 105, 100, 101,
- 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100,
- 101, 110, 116, 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108,
- 97, 116, 101, 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116,
- 121, 44, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
+ 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114,
+ 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115,
+ 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45,
+ 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
+ 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
+ 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
+ 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
+ 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
+ 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
+ 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
+ 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
+ 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
+ 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
+ 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
+ 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
+ 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
+ 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
+ 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
+ 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
+ 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
+ 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102,
+ 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
+ 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
+ 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
+ 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
+ 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
+ 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
+ 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99,
+ 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111,
+ 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101,
+ 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105,
+ 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105,
+ 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105,
+ 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101,
+ 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110,
+ 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111,
+ 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116,
+ 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45,
+ 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97,
+ 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101,
+ 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101,
+ 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115,
+ 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116,
+ 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102,
+ 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121,
+ 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105,
+ 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
+ 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103,
+ 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112,
+ 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
+ 101, 44, 103, 122, 105, 112};
static void static_ref(void *unused) {}
static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
@@ -209,227 +212,129 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
- {.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[6],
- .data.refcounted = {g_bytes + 38, 12}},
- {.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[8],
- .data.refcounted = {g_bytes + 61, 16}},
- {.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[11],
- .data.refcounted = {g_bytes + 110, 21}},
- {.refcount = &grpc_static_metadata_refcounts[12],
- .data.refcounted = {g_bytes + 131, 13}},
- {.refcount = &grpc_static_metadata_refcounts[13],
- .data.refcounted = {g_bytes + 144, 14}},
- {.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 30}},
- {.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 200, 10}},
- {.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 210, 4}},
- {.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 214, 8}},
- {.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 222, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}},
- {.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 234, 19}},
- {.refcount = &grpc_static_metadata_refcounts[22],
- .data.refcounted = {g_bytes + 253, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 265, 30}},
- {.refcount = &grpc_static_metadata_refcounts[24],
- .data.refcounted = {g_bytes + 295, 31}},
- {.refcount = &grpc_static_metadata_refcounts[25],
- .data.refcounted = {g_bytes + 326, 36}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 362, 1}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 1}},
- {.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 364, 1}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 384, 8}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 392, 16}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 408, 4}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 412, 3}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 415, 3}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 418, 4}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 422, 5}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 427, 4}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 431, 3}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 434, 3}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 437, 1}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 438, 11}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 449, 3}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 452, 3}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 455, 3}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 458, 3}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 461, 3}},
- {.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 464, 14}},
- {.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 493, 13}},
- {.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 506, 15}},
- {.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 521, 13}},
- {.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 534, 6}},
- {.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 540, 27}},
- {.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 567, 3}},
- {.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 570, 5}},
- {.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 575, 13}},
- {.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 588, 13}},
- {.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 601, 19}},
- {.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 620, 16}},
- {.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 636, 16}},
- {.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 652, 14}},
- {.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 666, 16}},
- {.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 682, 13}},
- {.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 695, 6}},
- {.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 701, 4}},
- {.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 705, 4}},
- {.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 709, 6}},
- {.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 715, 7}},
- {.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 722, 4}},
- {.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 726, 8}},
- {.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 734, 17}},
- {.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 751, 13}},
- {.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 764, 8}},
- {.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 772, 19}},
- {.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 791, 13}},
- {.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 804, 11}},
- {.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 815, 4}},
- {.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 819, 8}},
- {.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 827, 12}},
- {.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 18}},
- {.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 857, 19}},
- {.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 876, 5}},
- {.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 881, 7}},
- {.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 888, 7}},
- {.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 895, 11}},
- {.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 906, 6}},
- {.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 912, 10}},
- {.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 922, 25}},
- {.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 947, 17}},
- {.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 964, 4}},
- {.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 968, 3}},
- {.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 971, 16}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 987, 16}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1003, 13}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1016, 12}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1028, 21}},
+ {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}},
+ {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}},
+ {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}},
+ {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}},
+ {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}},
+ {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}},
+ {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}},
+ {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}},
+ {&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}},
+ {&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}},
+ {&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}},
+ {&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}},
+ {&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
static const int8_t elems_r[] = {
- 10, 8, -3, 0, 9, 21, -77, 22, 0, 10, -7, 0, 0, 0,
- 14, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, -50, -51, 16, -53, -54, -55, -56,
- -56, -57, -58, -59, 0, 37, 36, 35, 34, 33, 32, 31, 30, 29,
- 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
- 14, 13, 12, 11, 10, 13, 12, 11, 10, 9, 8, 7, 0};
+ 11, 9, -3, 0, 10, 27, -74, 28, 0, 14, -7, 0, 0, 0, 18, 8, -2,
+ 0, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, -50, 0, -33, -55, -56, -57, -58, -57, 0, 40, 39, 38, 37, 36, 35, 34,
+ 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 22,
+ 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 42;
- uint32_t x = i % 97;
- uint32_t y = i / 97;
+ i -= 45;
+ uint32_t x = i % 98;
+ uint32_t y = i / 98;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
@@ -439,30 +344,31 @@ static uint32_t elems_phash(uint32_t i) {
}
static const uint16_t elem_keys[] = {
- 1019, 1020, 1021, 242, 243, 244, 245, 246, 139, 140, 42, 43,
- 433, 434, 435, 920, 921, 922, 719, 720, 1406, 527, 721, 1604,
- 1703, 1802, 4871, 4970, 5001, 5168, 5267, 5366, 5465, 1419, 5564, 5663,
- 5762, 5861, 5960, 6059, 6158, 6257, 6356, 6455, 6554, 6653, 6752, 6851,
- 6950, 7049, 7148, 7247, 7346, 7445, 7544, 7643, 7742, 7841, 7940, 8039,
- 8138, 8237, 8336, 8435, 8534, 8633, 1085, 1086, 1087, 1088, 8732, 8831,
- 8930, 9029, 9128, 9227, 9326, 0, 317, 0, 0, 0, 0, 0,
+ 1032, 1033, 1034, 247, 248, 249, 250, 251, 1623, 143, 144, 45,
+ 46, 440, 441, 442, 1523, 1632, 1633, 932, 933, 934, 729, 730,
+ 1423, 1532, 1533, 535, 731, 1923, 2023, 2123, 5223, 5523, 5623, 5723,
+ 5823, 1436, 1653, 5923, 6023, 6123, 6223, 6323, 6423, 6523, 6623, 6723,
+ 6823, 6923, 7023, 7123, 7223, 5423, 7323, 7423, 7523, 7623, 7723, 7823,
+ 7923, 8023, 8123, 8223, 1096, 1097, 1098, 1099, 8323, 8423, 8523, 8623,
+ 8723, 8823, 8923, 9023, 9123, 9223, 9323, 323, 9423, 9523, 1697, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 133, 233, 234, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 137, 238, 239, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0};
+ 0, 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 74, 77, 75, 19, 20, 21, 22, 23, 15, 16, 17, 18, 11, 12, 13,
- 3, 4, 5, 0, 1, 41, 6, 2, 70, 48, 55, 24, 25, 26, 27,
- 28, 29, 30, 7, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42,
- 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59,
- 60, 61, 62, 63, 64, 65, 76, 78, 79, 80, 66, 67, 68, 69, 71,
- 72, 73, 255, 14, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 8, 9, 10};
+ 76, 79, 77, 19, 20, 21, 22, 23, 25, 15, 16, 17, 18, 11,
+ 12, 13, 38, 83, 84, 3, 4, 5, 0, 1, 43, 36, 37, 6,
+ 2, 72, 50, 57, 24, 28, 29, 30, 31, 7, 26, 32, 33, 34,
+ 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 27, 51, 52,
+ 53, 54, 55, 56, 58, 59, 60, 61, 78, 80, 81, 82, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 73, 14, 74, 75, 85, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 8, 9, 10};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 99 + b);
+ uint32_t k = (uint32_t)(a * 100 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
elem_idxs[h] != 255
@@ -472,330 +378,205 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
}
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 362, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 364, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 384, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 392, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 408, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 412, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 415, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 418, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 422, 5}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 427, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 431, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 434, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 437, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 438, 11}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 449, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 452, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 455, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 458, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 461, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 464, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 493, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 506, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 521, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 534, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 540, 27}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 567, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 570, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 575, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 588, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 601, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 620, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 636, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 652, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 666, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 682, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 695, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 701, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 705, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 709, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 715, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 722, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 210, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 726, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 734, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 751, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 764, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 772, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 791, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 214, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 804, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 815, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 819, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 827, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 18}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 857, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 876, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 881, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 888, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 895, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 906, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 912, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 922, 25}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 947, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 200, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 964, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 968, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 971, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 987, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1003, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1016, 12}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1028, 21}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}},
+ {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}},
+ {{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}},
+ {{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
+};
+bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
+ true, // :path
+ true, // :method
+ true, // :status
+ true, // :authority
+ true, // :scheme
+ true, // te
+ true, // grpc-message
+ true, // grpc-status
+ true, // grpc-payload-bin
+ true, // grpc-encoding
+ true, // grpc-accept-encoding
+ true, // grpc-server-stats-bin
+ true, // grpc-tags-bin
+ true, // grpc-trace-bin
+ true, // content-type
+ true, // content-encoding
+ true, // accept-encoding
+ true, // grpc-internal-encoding-request
+ true, // grpc-internal-stream-encoding-request
+ true, // user-agent
+ true, // host
+ true, // lb-token
};
-const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 74, 75, 76,
- 77, 78, 79, 80};
+
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
+ 79, 80, 81, 82};
+
+const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index baa86de142..f03a9d23b1 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -29,7 +29,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 99
+#define GRPC_STATIC_MDSTR_COUNT 100
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -61,178 +61,181 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_GRPC_TRACE_BIN (grpc_static_slice_table[13])
/* "content-type" */
#define GRPC_MDSTR_CONTENT_TYPE (grpc_static_slice_table[14])
+/* "content-encoding" */
+#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[15])
+/* "accept-encoding" */
+#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[16])
/* "grpc-internal-encoding-request" */
-#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (grpc_static_slice_table[15])
+#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (grpc_static_slice_table[17])
+/* "grpc-internal-stream-encoding-request" */
+#define GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST \
+ (grpc_static_slice_table[18])
/* "user-agent" */
-#define GRPC_MDSTR_USER_AGENT (grpc_static_slice_table[16])
+#define GRPC_MDSTR_USER_AGENT (grpc_static_slice_table[19])
/* "host" */
-#define GRPC_MDSTR_HOST (grpc_static_slice_table[17])
+#define GRPC_MDSTR_HOST (grpc_static_slice_table[20])
/* "lb-token" */
-#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[18])
+#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[21])
/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[19])
+#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[22])
/* "" */
-#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[20])
+#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[23])
/* "grpc.wait_for_ready" */
-#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[21])
+#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[24])
/* "grpc.timeout" */
-#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[22])
+#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[25])
/* "grpc.max_request_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \
- (grpc_static_slice_table[23])
+ (grpc_static_slice_table[26])
/* "grpc.max_response_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \
- (grpc_static_slice_table[24])
+ (grpc_static_slice_table[27])
/* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \
- (grpc_static_slice_table[25])
+ (grpc_static_slice_table[28])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[26])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[29])
/* "1" */
-#define GRPC_MDSTR_1 (grpc_static_slice_table[27])
+#define GRPC_MDSTR_1 (grpc_static_slice_table[30])
/* "2" */
-#define GRPC_MDSTR_2 (grpc_static_slice_table[28])
+#define GRPC_MDSTR_2 (grpc_static_slice_table[31])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[29])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[32])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[30])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[33])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[31])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[34])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[32])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[35])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[33])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[36])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[34])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[37])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[35])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[38])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[36])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[39])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[37])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[40])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[38])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[41])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[39])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[42])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[43])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[41])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[44])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[45])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[46])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[44])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[47])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[45])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[48])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[46])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[49])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[47])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[50])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[51])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[49])
-/* "accept-encoding" */
-#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[50])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[52])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[51])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[53])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[52])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[54])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[53])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[55])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[54])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[56])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[57])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[56])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[58])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[59])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[58])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[60])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[59])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[61])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[60])
-/* "content-encoding" */
-#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[61])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[62])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[63])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[64])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[65])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[66])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[66])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[67])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[68])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[68])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[69])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[69])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[70])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[70])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[71])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[71])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[72])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[72])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[73])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[73])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[74])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[75])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[76])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[77])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[77])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[78])
/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[78])
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[79])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[80])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[80])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[81])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[82])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[83])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[84])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[85])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[86])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[88])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[89])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[90])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[91])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[92])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[93])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[93])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[94])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[94])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[95])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[96])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[96])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[97])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[97])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[98])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[98])
+ (grpc_static_slice_table[99])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
@@ -244,7 +247,7 @@ extern grpc_slice_refcount
#define GRPC_STATIC_METADATA_INDEX(static_slice) \
((int)((static_slice).refcount - grpc_static_metadata_refcounts))
-#define GRPC_STATIC_MDELEM_COUNT 81
+#define GRPC_STATIC_MDELEM_COUNT 86
extern grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "grpc-status": "0" */
@@ -355,141 +358,156 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "content-disposition": "" */
#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY \
(GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[35], GRPC_MDELEM_STORAGE_STATIC))
+/* "content-encoding": "identity" */
+#define GRPC_MDELEM_CONTENT_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[36], GRPC_MDELEM_STORAGE_STATIC))
+/* "content-encoding": "gzip" */
+#define GRPC_MDELEM_CONTENT_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[37], GRPC_MDELEM_STORAGE_STATIC))
/* "content-encoding": "" */
#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[36], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[38], GRPC_MDELEM_STORAGE_STATIC))
/* "content-language": "" */
#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[37], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[39], GRPC_MDELEM_STORAGE_STATIC))
/* "content-length": "" */
#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[38], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[40], GRPC_MDELEM_STORAGE_STATIC))
/* "content-location": "" */
#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[39], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[41], GRPC_MDELEM_STORAGE_STATIC))
/* "content-range": "" */
#define GRPC_MDELEM_CONTENT_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[40], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[42], GRPC_MDELEM_STORAGE_STATIC))
/* "content-type": "" */
#define GRPC_MDELEM_CONTENT_TYPE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[41], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[43], GRPC_MDELEM_STORAGE_STATIC))
/* "cookie": "" */
#define GRPC_MDELEM_COOKIE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[42], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[44], GRPC_MDELEM_STORAGE_STATIC))
/* "date": "" */
#define GRPC_MDELEM_DATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[43], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[45], GRPC_MDELEM_STORAGE_STATIC))
/* "etag": "" */
#define GRPC_MDELEM_ETAG_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[44], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[46], GRPC_MDELEM_STORAGE_STATIC))
/* "expect": "" */
#define GRPC_MDELEM_EXPECT_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[45], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[47], GRPC_MDELEM_STORAGE_STATIC))
/* "expires": "" */
#define GRPC_MDELEM_EXPIRES_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[46], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[48], GRPC_MDELEM_STORAGE_STATIC))
/* "from": "" */
#define GRPC_MDELEM_FROM_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[47], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[49], GRPC_MDELEM_STORAGE_STATIC))
/* "host": "" */
#define GRPC_MDELEM_HOST_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[48], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[50], GRPC_MDELEM_STORAGE_STATIC))
/* "if-match": "" */
#define GRPC_MDELEM_IF_MATCH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[49], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[51], GRPC_MDELEM_STORAGE_STATIC))
/* "if-modified-since": "" */
#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[50], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[52], GRPC_MDELEM_STORAGE_STATIC))
/* "if-none-match": "" */
#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[51], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[53], GRPC_MDELEM_STORAGE_STATIC))
/* "if-range": "" */
#define GRPC_MDELEM_IF_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[52], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[54], GRPC_MDELEM_STORAGE_STATIC))
/* "if-unmodified-since": "" */
#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[53], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
/* "last-modified": "" */
#define GRPC_MDELEM_LAST_MODIFIED_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[54], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
/* "lb-token": "" */
#define GRPC_MDELEM_LB_TOKEN_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
/* "lb-cost-bin": "" */
#define GRPC_MDELEM_LB_COST_BIN_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
/* "link": "" */
#define GRPC_MDELEM_LINK_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
/* "location": "" */
#define GRPC_MDELEM_LOCATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
/* "max-forwards": "" */
#define GRPC_MDELEM_MAX_FORWARDS_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authenticate": "" */
#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authorization": "" */
#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
/* "range": "" */
#define GRPC_MDELEM_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
/* "referer": "" */
#define GRPC_MDELEM_REFERER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
/* "refresh": "" */
#define GRPC_MDELEM_REFRESH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
/* "retry-after": "" */
#define GRPC_MDELEM_RETRY_AFTER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
/* "server": "" */
#define GRPC_MDELEM_SERVER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
/* "set-cookie": "" */
#define GRPC_MDELEM_SET_COOKIE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
/* "strict-transport-security": "" */
#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
/* "transfer-encoding": "" */
#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
/* "user-agent": "" */
#define GRPC_MDELEM_USER_AGENT_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
/* "vary": "" */
#define GRPC_MDELEM_VARY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
/* "via": "" */
#define GRPC_MDELEM_VIA_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
/* "www-authenticate": "" */
#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[81], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[82], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "identity" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[83], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "gzip" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[84], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "identity,gzip" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[85], GRPC_MDELEM_STORAGE_STATIC))
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);
typedef enum {
@@ -508,7 +526,10 @@ typedef enum {
GRPC_BATCH_GRPC_TAGS_BIN,
GRPC_BATCH_GRPC_TRACE_BIN,
GRPC_BATCH_CONTENT_TYPE,
+ GRPC_BATCH_CONTENT_ENCODING,
+ GRPC_BATCH_ACCEPT_ENCODING,
GRPC_BATCH_GRPC_INTERNAL_ENCODING_REQUEST,
+ GRPC_BATCH_GRPC_INTERNAL_STREAM_ENCODING_REQUEST,
GRPC_BATCH_USER_AGENT,
GRPC_BATCH_HOST,
GRPC_BATCH_LB_TOKEN,
@@ -533,7 +554,10 @@ typedef union {
struct grpc_linked_mdelem *grpc_tags_bin;
struct grpc_linked_mdelem *grpc_trace_bin;
struct grpc_linked_mdelem *content_type;
+ struct grpc_linked_mdelem *content_encoding;
+ struct grpc_linked_mdelem *accept_encoding;
struct grpc_linked_mdelem *grpc_internal_encoding_request;
+ struct grpc_linked_mdelem *grpc_internal_stream_encoding_request;
struct grpc_linked_mdelem *user_agent;
struct grpc_linked_mdelem *host;
struct grpc_linked_mdelem *lb_token;
@@ -547,9 +571,17 @@ typedef union {
GRPC_BATCH_CALLOUTS_COUNT) \
: GRPC_BATCH_CALLOUTS_COUNT)
+extern bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT];
+
extern const uint8_t grpc_static_accept_encoding_metadata[8];
#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
(GRPC_MAKE_MDELEM( \
&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]], \
GRPC_MDELEM_STORAGE_STATIC))
+
+extern const uint8_t grpc_static_accept_stream_encoding_metadata[4];
+#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table \
+ [grpc_static_accept_stream_encoding_metadata[(algs)]], \
+ GRPC_MDELEM_STORAGE_STATIC))
#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/lib/transport/status_conversion.c b/src/core/lib/transport/status_conversion.c
index 9a76977e4b..a40d333284 100644
--- a/src/core/lib/transport/status_conversion.c
+++ b/src/core/lib/transport/status_conversion.c
@@ -18,7 +18,7 @@
#include "src/core/lib/transport/status_conversion.h"
-int grpc_status_to_http2_error(grpc_status_code status) {
+grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
switch (status) {
case GRPC_STATUS_OK:
return GRPC_HTTP2_NO_ERROR;
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 6c61f4b8d9..682a820b48 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -72,7 +72,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
cope with.
Throw this over to the executor (on a core-owned thread) and process it
there. */
- refcount->destroy.scheduler = grpc_executor_scheduler;
+ refcount->destroy.scheduler =
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
@@ -101,8 +102,11 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
void *buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
- return (grpc_slice){.refcount = &refcount->slice_refcount,
- .data.refcounted = {.bytes = buffer, .length = length}};
+ grpc_slice res;
+ res.refcount = &refcount->slice_refcount,
+ res.data.refcounted.bytes = (uint8_t *)buffer;
+ res.data.refcounted.length = length;
+ return res;
}
static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
@@ -197,11 +201,6 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
then_schedule_closure);
}
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
- return transport->vtable->get_peer(exec_ctx, transport);
-}
-
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
@@ -214,24 +213,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// is a function that must always unref cancel_error
// though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure
-
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
- grpc_error *error) {
+ grpc_error *error, grpc_call_combiner *call_combiner) {
if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message);
}
if (batch->recv_message) {
- GRPC_CLOSURE_SCHED(exec_ctx,
- batch->payload->recv_message.recv_message_ready,
- GRPC_ERROR_REF(error));
+ GRPC_CALL_COMBINER_START(exec_ctx, call_combiner,
+ batch->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_REF(error),
+ "failing recv_message_ready");
}
if (batch->recv_initial_metadata) {
- GRPC_CLOSURE_SCHED(
- exec_ctx,
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, call_combiner,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
- GRPC_ERROR_REF(error));
+ GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready");
}
GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
if (batch->cancel_stream) {
@@ -247,13 +246,13 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_op *op = arg;
+ made_transport_op *op = (made_transport_op *)arg;
GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
- made_transport_op *op = gpr_malloc(sizeof(*op));
+ made_transport_op *op = (made_transport_op *)gpr_malloc(sizeof(*op));
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
@@ -271,7 +270,7 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_stream_op *op = arg;
+ made_transport_stream_op *op = (made_transport_stream_op *)arg;
grpc_closure *c = op->inner_on_complete;
gpr_free(op);
GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
@@ -279,7 +278,8 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
- made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
+ made_transport_stream_op *op =
+ (made_transport_stream_op *)gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload;
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 099138ea14..fbf5dcb8b5 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -22,6 +22,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
@@ -152,6 +153,9 @@ struct grpc_transport_stream_op_batch_payload {
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} send_initial_metadata;
struct {
@@ -176,6 +180,9 @@ struct grpc_transport_stream_op_batch_payload {
// immediately available. This may be a signal that we received a
// Trailers-Only response.
bool *trailing_metadata_available;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} recv_initial_metadata;
struct {
@@ -293,7 +300,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_error *error);
+ grpc_error *error, grpc_call_combiner *call_combiner);
char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op);
@@ -332,10 +339,6 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
-/* Get the transports peer */
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport);
-
/* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index fc772c6dd1..bbae69c223 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -59,9 +59,6 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
- /* implementation of grpc_transport_get_peer */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
-
/* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 7b18229ba6..858664715c 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -112,6 +112,13 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec_add(&b, tmp);
}
+ if (op->collect_stats) {
+ gpr_strvec_add(&b, gpr_strdup(" "));
+ gpr_asprintf(&tmp, "COLLECT_STATS:%p",
+ op->payload->collect_stats.collect_stats);
+ gpr_strvec_add(&b, tmp);
+ }
+
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
@@ -190,7 +197,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
return out;
}
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
char *str = grpc_transport_stream_op_batch_string(op);
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
index 322ebea111..1c09f54ad9 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -28,8 +28,8 @@ extern void grpc_client_channel_init(void);
extern void grpc_client_channel_shutdown(void);
extern void grpc_tsi_gts_init(void);
extern void grpc_tsi_gts_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
+extern void grpc_server_load_reporting_plugin_init(void);
+extern void grpc_server_load_reporting_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_http_filters_init,
@@ -42,6 +42,6 @@ void grpc_register_built_in_plugins(void) {
grpc_client_channel_shutdown);
grpc_register_plugin(grpc_tsi_gts_init,
grpc_tsi_gts_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.c
index fa9974952c..9cacf3d306 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.c
@@ -44,8 +44,8 @@ extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
extern void grpc_resolver_sockaddr_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
+extern void grpc_server_load_reporting_plugin_init(void);
+extern void grpc_server_load_reporting_plugin_shutdown(void);
extern void census_grpc_plugin_init(void);
extern void census_grpc_plugin_shutdown(void);
extern void grpc_max_age_filter_init(void);
@@ -82,8 +82,8 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
grpc_resolver_sockaddr_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
grpc_register_plugin(census_grpc_plugin_init,
census_grpc_plugin_shutdown);
grpc_register_plugin(grpc_max_age_filter_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
index 7eb599d81a..7b90d796d5 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
@@ -36,8 +36,8 @@ extern void grpc_resolver_sockaddr_init(void);
extern void grpc_resolver_sockaddr_shutdown(void);
extern void grpc_resolver_fake_init(void);
extern void grpc_resolver_fake_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
+extern void grpc_server_load_reporting_plugin_init(void);
+extern void grpc_server_load_reporting_plugin_shutdown(void);
extern void grpc_lb_policy_grpclb_init(void);
extern void grpc_lb_policy_grpclb_shutdown(void);
extern void grpc_lb_policy_pick_first_init(void);
@@ -72,8 +72,8 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_sockaddr_shutdown);
grpc_register_plugin(grpc_resolver_fake_init,
grpc_resolver_fake_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
grpc_register_plugin(grpc_lb_policy_grpclb_init,
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c
index 967126ecee..64043fea08 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.c
@@ -25,7 +25,8 @@
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
-#include "src/core/tsi/transport_security.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/tsi/transport_security_grpc.h"
/* --- Constants. ---*/
#define TSI_FAKE_FRAME_HEADER_SIZE 4
@@ -74,6 +75,14 @@ typedef struct {
size_t max_frame_size;
} tsi_fake_frame_protector;
+typedef struct {
+ tsi_zero_copy_grpc_protector base;
+ grpc_slice_buffer header_sb;
+ grpc_slice_buffer protected_sb;
+ size_t max_frame_size;
+ size_t parsed_frame_size;
+} tsi_fake_zero_copy_grpc_protector;
+
/* --- Utils. ---*/
static const char *tsi_fake_handshake_message_strings[] = {
@@ -113,6 +122,28 @@ static void store32_little_endian(uint32_t value, unsigned char *buf) {
buf[0] = (unsigned char)((value)&0xFF);
}
+static uint32_t read_frame_size(const grpc_slice_buffer *sb) {
+ GPR_ASSERT(sb != NULL && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE);
+ uint8_t frame_size_buffer[TSI_FAKE_FRAME_HEADER_SIZE];
+ uint8_t *buf = frame_size_buffer;
+ /* Copies the first 4 bytes to a temporary buffer. */
+ size_t remaining = TSI_FAKE_FRAME_HEADER_SIZE;
+ for (size_t i = 0; i < sb->count; i++) {
+ size_t slice_length = GRPC_SLICE_LENGTH(sb->slices[i]);
+ if (remaining <= slice_length) {
+ memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), remaining);
+ remaining = 0;
+ break;
+ } else {
+ memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), slice_length);
+ buf += slice_length;
+ remaining -= slice_length;
+ }
+ }
+ GPR_ASSERT(remaining == 0);
+ return load32_little_endian(frame_size_buffer);
+}
+
static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
frame->offset = 0;
frame->needs_draining = needs_draining;
@@ -363,6 +394,84 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
fake_protector_unprotect, fake_protector_destroy,
};
+/* --- tsi_zero_copy_grpc_protector methods implementation. ---*/
+
+static tsi_result fake_zero_copy_grpc_protector_protect(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices,
+ grpc_slice_buffer *protected_slices) {
+ if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ /* Protects each frame. */
+ while (unprotected_slices->length > 0) {
+ size_t frame_length =
+ GPR_MIN(impl->max_frame_size,
+ unprotected_slices->length + TSI_FAKE_FRAME_HEADER_SIZE);
+ grpc_slice slice = GRPC_SLICE_MALLOC(TSI_FAKE_FRAME_HEADER_SIZE);
+ store32_little_endian((uint32_t)frame_length, GRPC_SLICE_START_PTR(slice));
+ grpc_slice_buffer_add(protected_slices, slice);
+ size_t data_length = frame_length - TSI_FAKE_FRAME_HEADER_SIZE;
+ grpc_slice_buffer_move_first(unprotected_slices, data_length,
+ protected_slices);
+ }
+ return TSI_OK;
+}
+
+static tsi_result fake_zero_copy_grpc_protector_unprotect(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices,
+ grpc_slice_buffer *unprotected_slices) {
+ if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ grpc_slice_buffer_move_into(protected_slices, &impl->protected_sb);
+ /* Unprotect each frame, if we get a full frame. */
+ while (impl->protected_sb.length >= TSI_FAKE_FRAME_HEADER_SIZE) {
+ if (impl->parsed_frame_size == 0) {
+ impl->parsed_frame_size = read_frame_size(&impl->protected_sb);
+ if (impl->parsed_frame_size <= 4) {
+ gpr_log(GPR_ERROR, "Invalid frame size.");
+ return TSI_DATA_CORRUPTED;
+ }
+ }
+ /* If we do not have a full frame, return with OK status. */
+ if (impl->protected_sb.length < impl->parsed_frame_size) break;
+ /* Strips header bytes. */
+ grpc_slice_buffer_move_first(&impl->protected_sb,
+ TSI_FAKE_FRAME_HEADER_SIZE, &impl->header_sb);
+ /* Moves data to unprotected slices. */
+ grpc_slice_buffer_move_first(
+ &impl->protected_sb,
+ impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE,
+ unprotected_slices);
+ impl->parsed_frame_size = 0;
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &impl->header_sb);
+ }
+ return TSI_OK;
+}
+
+static void fake_zero_copy_grpc_protector_destroy(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self) {
+ if (self == NULL) return;
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb);
+ grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb);
+ gpr_free(impl);
+}
+
+static const tsi_zero_copy_grpc_protector_vtable
+ zero_copy_grpc_protector_vtable = {
+ fake_zero_copy_grpc_protector_protect,
+ fake_zero_copy_grpc_protector_unprotect,
+ fake_zero_copy_grpc_protector_destroy,
+};
+
/* --- tsi_handshaker_result methods implementation. ---*/
typedef struct {
@@ -383,6 +492,15 @@ static tsi_result fake_handshaker_result_extract_peer(
return result;
}
+static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector(
+ void *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector **protector) {
+ *protector =
+ tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size);
+ return TSI_OK;
+}
+
static tsi_result fake_handshaker_result_create_frame_protector(
const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
tsi_frame_protector **protector) {
@@ -407,7 +525,7 @@ static void fake_handshaker_result_destroy(tsi_handshaker_result *self) {
static const tsi_handshaker_result_vtable handshaker_result_vtable = {
fake_handshaker_result_extract_peer,
- NULL, /* create_zero_copy_grpc_protector */
+ fake_handshaker_result_create_zero_copy_grpc_protector,
fake_handshaker_result_create_frame_protector,
fake_handshaker_result_get_unused_bytes,
fake_handshaker_result_destroy,
@@ -631,3 +749,16 @@ tsi_frame_protector *tsi_create_fake_frame_protector(
impl->base.vtable = &frame_protector_vtable;
return &impl->base;
}
+
+tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
+ size_t *max_protected_frame_size) {
+ tsi_fake_zero_copy_grpc_protector *impl = gpr_zalloc(sizeof(*impl));
+ grpc_slice_buffer_init(&impl->header_sb);
+ grpc_slice_buffer_init(&impl->protected_sb);
+ impl->max_frame_size = (max_protected_frame_size == NULL)
+ ? TSI_FAKE_DEFAULT_FRAME_SIZE
+ : *max_protected_frame_size;
+ impl->parsed_frame_size = 0;
+ impl->base.vtable = &zero_copy_grpc_protector_vtable;
+ return &impl->base;
+}
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index 934b3cbeb2..6159708a84 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -39,6 +39,11 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client);
tsi_frame_protector *tsi_create_fake_frame_protector(
size_t *max_protected_frame_size);
+/* Creates a zero-copy protector directly without going through the handshake
+ * phase. */
+tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
+ size_t *max_protected_frame_size);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 1fd65928f9..7ebf9dd96f 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -67,7 +67,13 @@
/* --- Structure definitions. ---*/
+struct tsi_ssl_handshaker_factory {
+ const tsi_ssl_handshaker_factory_vtable *vtable;
+ gpr_refcount refcount;
+};
+
struct tsi_ssl_client_handshaker_factory {
+ tsi_ssl_handshaker_factory base;
SSL_CTX *ssl_context;
unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
@@ -77,6 +83,7 @@ struct tsi_ssl_server_handshaker_factory {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
+ tsi_ssl_handshaker_factory base;
SSL_CTX **ssl_contexts;
tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
@@ -90,6 +97,7 @@ typedef struct {
BIO *into_ssl;
BIO *from_ssl;
tsi_result result;
+ tsi_ssl_handshaker_factory *factory_ref;
} tsi_ssl_handshaker;
typedef struct {
@@ -846,6 +854,47 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_destroy,
};
+/* --- tsi_server_handshaker_factory methods implementation. --- */
+
+static void tsi_ssl_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (self->vtable != NULL && self->vtable->destroy != NULL) {
+ self->vtable->destroy(self);
+ }
+ /* Note, we don't free(self) here because this object is always directly
+ * embedded in another object. If tsi_ssl_handshaker_factory_init allocates
+ * any memory, it should be free'd here. */
+}
+
+static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return NULL;
+ gpr_refn(&self->refcount, 1);
+ return self;
+}
+
+static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (gpr_unref(&self->refcount)) {
+ tsi_ssl_handshaker_factory_destroy(self);
+ }
+}
+
+static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL};
+
+/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for
+ * allocating memory for the factory. */
+static void tsi_ssl_handshaker_factory_init(
+ tsi_ssl_handshaker_factory *factory) {
+ GPR_ASSERT(factory != NULL);
+
+ factory->vtable = &handshaker_factory_vtable;
+ gpr_ref_init(&factory->refcount, 1);
+}
+
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
@@ -1013,6 +1062,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
static void ssl_handshaker_destroy(tsi_handshaker *self) {
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
+ tsi_ssl_handshaker_factory_unref(impl->factory_ref);
gpr_free(impl);
}
@@ -1030,6 +1080,7 @@ static const tsi_handshaker_vtable handshaker_vtable = {
static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
const char *server_name_indication,
+ tsi_ssl_handshaker_factory *factory,
tsi_handshaker **handshaker) {
SSL *ssl = SSL_new(ctx);
BIO *into_ssl = NULL;
@@ -1085,6 +1136,8 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
impl->from_ssl = from_ssl;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->base.vtable = &handshaker_vtable;
+ impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory);
+
*handshaker = &impl->base;
return TSI_OK;
}
@@ -1121,11 +1174,20 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker) {
return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication,
- handshaker);
+ &self->base, handshaker);
}
-void tsi_ssl_client_handshaker_factory_destroy(
+void tsi_ssl_client_handshaker_factory_unref(
tsi_ssl_client_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_client_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_client_handshaker_factory *self =
+ (tsi_ssl_client_handshaker_factory *)factory;
if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context);
if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list);
gpr_free(self);
@@ -1150,11 +1212,21 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT;
/* Create the handshaker with the first context. We will switch if needed
because of SNI in ssl_server_handshaker_factory_servername_callback. */
- return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, handshaker);
+ return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base,
+ handshaker);
}
-void tsi_ssl_server_handshaker_factory_destroy(
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_server_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_server_handshaker_factory *self =
+ (tsi_ssl_server_handshaker_factory *)factory;
size_t i;
for (i = 0; i < self->ssl_context_count; i++) {
if (self->ssl_contexts[i] != NULL) {
@@ -1263,6 +1335,9 @@ static int server_handshaker_factory_npn_advertised_callback(
/* --- tsi_ssl_handshaker_factory constructors. --- */
+static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = {
+ tsi_ssl_client_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_client_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
const char *pem_root_certs, const char *cipher_suites,
@@ -1285,6 +1360,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
impl = gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &client_handshaker_factory_vtable;
+
impl->ssl_context = ssl_context;
do {
@@ -1322,7 +1400,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
} while (0);
if (result != TSI_OK) {
- tsi_ssl_client_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL);
@@ -1332,6 +1410,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_OK;
}
+static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = {
+ tsi_ssl_server_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_server_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, const char *pem_client_root_certs,
@@ -1364,12 +1445,15 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
}
impl = gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &server_handshaker_factory_vtable;
+
impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context_count = num_key_cert_pairs;
@@ -1379,7 +1463,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
&impl->alpn_protocol_list,
&impl->alpn_protocol_list_length);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
@@ -1451,10 +1535,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
} while (0);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
+
*factory = impl;
return TSI_OK;
}
@@ -1501,3 +1586,15 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
return 0; /* Not found. */
}
+
+/* --- Testing support. --- */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable) {
+ GPR_ASSERT(factory != NULL);
+ GPR_ASSERT(factory->vtable != NULL);
+
+ const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable;
+ factory->vtable = new_vtable;
+ return orig_vtable;
+}
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index 177599930b..3abfdf5ed8 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -96,10 +96,10 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_client_handshaker_factory_destroy(
- tsi_ssl_client_handshaker_factory *self);
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_client_handshaker_factory_unref(
+ tsi_ssl_client_handshaker_factory *factory);
/* --- tsi_ssl_server_handshaker_factory object ---
@@ -158,9 +158,9 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_server_handshaker_factory_destroy(
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
@@ -170,6 +170,29 @@ void tsi_ssl_server_handshaker_factory_destroy(
- handle public suffix wildchar more strictly (e.g. *.co.uk) */
int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
+/* --- Testing support. ---
+
+ These functions and typedefs are not intended to be used outside of testing.
+ */
+
+/* Base type of client and server handshaker factories. */
+typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
+
+/* Function pointer to handshaker_factory destructor. */
+typedef void (*tsi_ssl_handshaker_factory_destructor)(
+ tsi_ssl_handshaker_factory *factory);
+
+/* Virtual table for tsi_ssl_handshaker_factory. */
+typedef struct {
+ tsi_ssl_handshaker_factory_destructor destroy;
+} tsi_ssl_handshaker_factory_vtable;
+
+/* Set destructor of handshaker_factory to new_destructor, returns previous
+ destructor. */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/tsi/test_creds/BUILD b/src/core/tsi/test_creds/BUILD
index 4b0786d7b8..732f6d91b2 100644
--- a/src/core/tsi/test_creds/BUILD
+++ b/src/core/tsi/test_creds/BUILD
@@ -15,7 +15,15 @@
licenses(["notice"]) # Apache v2
exports_files([
- "ca.pem",
- "server1.key",
- "server1.pem",
+ "ca.pem",
+ "server1.key",
+ "server1.pem",
+ "server0.key",
+ "server0.pem",
+ "client.key",
+ "client.pem",
+ "badserver.key",
+ "badserver.pem",
+ "badclient.key",
+ "badclient.pem",
])
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index b0d7039850..3bba38149c 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -84,11 +84,17 @@ struct tsi_handshaker {
};
/* Base for tsi_handshaker_result implementations.
- See transport_security_interface.h for documentation. */
+ See transport_security_interface.h for documentation.
+ The exec_ctx parameter in create_zero_copy_grpc_protector is supposed to be
+ of type grpc_exec_ctx*, but we're using void* instead to avoid making the TSI
+ API depend on grpc. The create_zero_copy_grpc_protector() method is only used
+ in grpc, where we do need the exec_ctx passed through, but the API still
+ needs to compile in other applications, where grpc_exec_ctx is not defined.
+*/
typedef struct {
tsi_result (*extract_peer)(const tsi_handshaker_result *self, tsi_peer *peer);
tsi_result (*create_zero_copy_grpc_protector)(
- const tsi_handshaker_result *self,
+ void *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
tsi_result (*create_frame_protector)(const tsi_handshaker_result *self,
diff --git a/src/core/tsi/transport_security_grpc.c b/src/core/tsi/transport_security_grpc.c
index 5bcfdfa61f..affd995230 100644
--- a/src/core/tsi/transport_security_grpc.c
+++ b/src/core/tsi/transport_security_grpc.c
@@ -20,16 +20,18 @@
/* This method creates a tsi_zero_copy_grpc_protector object. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
+ grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector) {
- if (self == NULL || self->vtable == NULL || protector == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ protector == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->create_zero_copy_grpc_protector == NULL) {
return TSI_UNIMPLEMENTED;
}
return self->vtable->create_zero_copy_grpc_protector(
- self, max_output_protected_frame_size, protector);
+ exec_ctx, self, max_output_protected_frame_size, protector);
}
/* --- tsi_zero_copy_grpc_protector common implementation. ---
@@ -37,28 +39,33 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
Calls specific implementation after state/input validation. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices,
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices,
grpc_slice_buffer *protected_slices) {
- if (self == NULL || self->vtable == NULL || unprotected_slices == NULL ||
- protected_slices == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ unprotected_slices == NULL || protected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->protect == NULL) return TSI_UNIMPLEMENTED;
- return self->vtable->protect(self, unprotected_slices, protected_slices);
+ return self->vtable->protect(exec_ctx, self, unprotected_slices,
+ protected_slices);
}
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices,
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices,
grpc_slice_buffer *unprotected_slices) {
- if (self == NULL || self->vtable == NULL || protected_slices == NULL ||
- unprotected_slices == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ protected_slices == NULL || unprotected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->unprotect == NULL) return TSI_UNIMPLEMENTED;
- return self->vtable->unprotect(self, protected_slices, unprotected_slices);
+ return self->vtable->unprotect(exec_ctx, self, protected_slices,
+ unprotected_slices);
}
-void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self) {
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self) {
if (self == NULL) return;
- self->vtable->destroy(self);
+ self->vtable->destroy(exec_ctx, self);
}
diff --git a/src/core/tsi/transport_security_grpc.h b/src/core/tsi/transport_security_grpc.h
index 5ab5297cc4..ca6755c12f 100644
--- a/src/core/tsi/transport_security_grpc.h
+++ b/src/core/tsi/transport_security_grpc.h
@@ -30,7 +30,8 @@ extern "C" {
assuming there is no fatal error.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
+ grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
/* -- tsi_zero_copy_grpc_protector object -- */
@@ -42,8 +43,8 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- This method returns TSI_OK in case of success or a specific error code in
case of failure. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices,
- grpc_slice_buffer *protected_slices);
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices);
/* Outputs unprotected bytes.
- protected_slices is the bytes of protected frames.
@@ -52,21 +53,24 @@ tsi_result tsi_zero_copy_grpc_protector_protect(
there is not enough data to output in which case unprotected_slices has 0
bytes. */
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices,
- grpc_slice_buffer *unprotected_slices);
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices);
/* Destroys the tsi_zero_copy_grpc_protector object. */
-void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self);
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self);
/* Base for tsi_zero_copy_grpc_protector implementations. */
typedef struct {
- tsi_result (*protect)(tsi_zero_copy_grpc_protector *self,
+ tsi_result (*protect)(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self,
grpc_slice_buffer *unprotected_slices,
grpc_slice_buffer *protected_slices);
- tsi_result (*unprotect)(tsi_zero_copy_grpc_protector *self,
+ tsi_result (*unprotect)(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self,
grpc_slice_buffer *protected_slices,
grpc_slice_buffer *unprotected_slices);
- void (*destroy)(tsi_zero_copy_grpc_protector *self);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self);
} tsi_zero_copy_grpc_protector_vtable;
struct tsi_zero_copy_grpc_protector {