aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-09-07 15:51:35 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-09-07 15:51:35 -0700
commit23af48d9721c8c3409e03c8620ba717a12921814 (patch)
treecbc2d82cf2ce907cc6be2c380454726d9e9cba93 /src
parent561dc32247534d9204d9f68e92259759875c6d93 (diff)
parent41630a29507c8dd5b6110f0397b346b7feab442b (diff)
Merge github.com:grpc/grpc into server_stats
Diffstat (limited to 'src')
-rw-r--r--src/core/ext/census/context.c12
-rw-r--r--src/core/ext/census/grpc_filter.c26
-rw-r--r--src/core/ext/census/mlog.c3
-rw-r--r--src/core/ext/census/resource.c22
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.c6
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c588
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c1
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c36
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c28
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h5
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.c112
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h8
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.c10
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c276
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.c58
-rw-r--r--src/core/ext/filters/load_reporting/load_reporting.c12
-rw-r--r--src/core/ext/filters/load_reporting/load_reporting_filter.c1
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.c3
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.c6
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c33
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c11
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c80
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.c12
-rw-r--r--src/core/lib/channel/channel_stack.c16
-rw-r--r--src/core/lib/channel/channel_stack.h11
-rw-r--r--src/core/lib/channel/channel_stack_builder.c29
-rw-r--r--src/core/lib/channel/channel_stack_builder.h10
-rw-r--r--src/core/lib/channel/connected_channel.c92
-rw-r--r--src/core/lib/debug/stats_data.c83
-rw-r--r--src/core/lib/iomgr/call_combiner.c202
-rw-r--r--src/core/lib/iomgr/call_combiner.h121
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c45
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c1961
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h28
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.c1184
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.h28
-rw-r--r--src/core/lib/iomgr/ev_posix.c4
-rw-r--r--src/core/lib/iomgr/iomgr.c8
-rw-r--r--src/core/lib/iomgr/tcp_posix.c2
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c203
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c91
-rw-r--r--src/core/lib/support/string.c13
-rw-r--r--src/core/lib/support/string.h3
-rw-r--r--src/core/lib/surface/call.c154
-rw-r--r--src/core/lib/surface/call.h10
-rw-r--r--src/core/lib/surface/call_log_batch.c2
-rw-r--r--src/core/lib/surface/init.c2
-rw-r--r--src/core/lib/surface/lame_client.cc19
-rw-r--r--src/core/lib/surface/server.c2
-rw-r--r--src/core/lib/transport/metadata_batch.c2
-rw-r--r--src/core/lib/transport/metadata_batch.h1
-rw-r--r--src/core/lib/transport/static_metadata.c25
-rw-r--r--src/core/lib/transport/static_metadata.h2
-rw-r--r--src/core/lib/transport/transport.c21
-rw-r--r--src/core/lib/transport/transport.h13
-rw-r--r--src/core/lib/transport/transport_impl.h3
-rw-r--r--src/core/lib/transport/transport_op_string.c7
-rw-r--r--src/core/tsi/test_creds/BUILD14
-rw-r--r--src/cpp/client/channel_cc.cc202
-rw-r--r--src/cpp/common/channel_filter.cc4
-rw-r--r--src/cpp/common/channel_filter.h11
-rw-r--r--src/cpp/server/server_cc.cc9
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec2
-rw-r--r--src/objective-c/!ProtoCompiler.podspec2
-rw-r--r--src/proto/grpc/core/BUILD24
-rw-r--r--src/proto/grpc/testing/BUILD3
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py3
-rw-r--r--src/python/grpcio_testing/grpc_testing/__init__.py289
-rw-r--r--src/python/grpcio_testing/grpc_testing/_common.py68
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/__init__.py20
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_handler.py215
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_rpc.py153
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_server.py149
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py93
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_service.py88
-rw-r--r--src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py74
-rw-r--r--src/python/grpcio_tests/tests/testing/_server_application.py66
-rw-r--r--src/python/grpcio_tests/tests/testing/_server_test.py169
-rw-r--r--src/python/grpcio_tests/tests/tests.json1
-rw-r--r--src/ruby/.rubocop_todo.yml565
-rwxr-xr-xsrc/ruby/end2end/grpc_class_init_client.rb2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h3
86 files changed, 3739 insertions, 4243 deletions
diff --git a/src/core/ext/census/context.c b/src/core/ext/census/context.c
index 1019b287d7..9b25a32e36 100644
--- a/src/core/ext/census/context.c
+++ b/src/core/ext/census/context.c
@@ -141,7 +141,7 @@ static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
// Make a copy (in 'to') of an existing tag_set.
static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
memcpy(to, from, sizeof(struct tag_set));
- to->kvm = gpr_malloc(to->kvm_size);
+ to->kvm = (char *)gpr_malloc(to->kvm_size);
memcpy(to->kvm, from->kvm, from->kvm_used);
}
@@ -184,7 +184,7 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
if (tags->kvm_used + tag_size > tags->kvm_size) {
// allocate new memory if needed
tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
- char *new_kvm = gpr_malloc(tags->kvm_size);
+ char *new_kvm = (char *)gpr_malloc(tags->kvm_size);
if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used);
gpr_free(tags->kvm);
tags->kvm = new_kvm;
@@ -274,7 +274,8 @@ static void tag_set_flatten(struct tag_set *tags) {
census_context *census_context_create(const census_context *base,
const census_tag *tags, int ntags,
census_context_status const **status) {
- census_context *context = gpr_malloc(sizeof(census_context));
+ census_context *context =
+ (census_context *)gpr_malloc(sizeof(census_context));
// If we are given a base, copy it into our new tag set. Otherwise set it
// to zero/NULL everything.
if (base == NULL) {
@@ -459,7 +460,7 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
}
tags->kvm_used = size - header_size;
tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
- tags->kvm = gpr_malloc(tags->kvm_size);
+ tags->kvm = (char *)gpr_malloc(tags->kvm_size);
if (tag_header_size != TAG_HEADER_SIZE) {
// something new in the tag information. I don't understand it, so
// don't copy it over.
@@ -481,7 +482,8 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
}
census_context *census_context_decode(const char *buffer, size_t size) {
- census_context *context = gpr_malloc(sizeof(census_context));
+ census_context *context =
+ (census_context *)gpr_malloc(sizeof(census_context));
memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
if (buffer == NULL) {
memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 13fe2e6b1c..b37ab90389 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -60,8 +60,8 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
static void client_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (op->send_initial_metadata) {
extract_and_annotate_method_tag(
op->payload->send_initial_metadata.send_initial_metadata, calld, chand);
@@ -78,9 +78,9 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (error == GRPC_ERROR_NONE) {
extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
}
@@ -90,7 +90,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the op callback */
calld->recv_initial_metadata =
@@ -117,7 +117,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
@@ -128,7 +128,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
}
@@ -136,7 +136,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
d->start_ts = args->start_time;
@@ -150,7 +150,7 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *d = elem->call_data;
+ call_data *d = (call_data *)elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
}
@@ -158,14 +158,14 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
return GRPC_ERROR_NONE;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(chand != NULL);
}
@@ -179,7 +179,6 @@ const grpc_channel_filter grpc_client_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-client"};
@@ -193,6 +192,5 @@ const grpc_channel_filter grpc_server_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-server"};
diff --git a/src/core/ext/census/mlog.c b/src/core/ext/census/mlog.c
index 937ceb101b..4b8c8466b3 100644
--- a/src/core/ext/census/mlog.c
+++ b/src/core/ext/census/mlog.c
@@ -467,7 +467,8 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
g_log.blocks = (cl_block*)gpr_malloc_aligned(
g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
+ g_log.buffer =
+ (char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
cl_block_list_initialize(&g_log.free_block_list);
cl_block_list_initialize(&g_log.dirty_block_list);
diff --git a/src/core/ext/census/resource.c b/src/core/ext/census/resource.c
index 1a676f0e1e..44a887231c 100644
--- a/src/core/ext/census/resource.c
+++ b/src/core/ext/census/resource.c
@@ -87,7 +87,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
gpr_log(GPR_INFO, "Zero-length Resource name.");
return false;
}
- vresource->name = gpr_malloc(stream->bytes_left + 1);
+ vresource->name = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->name[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
return false;
@@ -106,7 +106,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
if (stream->bytes_left == 0) {
return true;
}
- vresource->description = gpr_malloc(stream->bytes_left + 1);
+ vresource->description = (char *)gpr_malloc(stream->bytes_left + 1);
vresource->description[stream->bytes_left] = '\0';
if (!pb_read(stream, (uint8_t *)vresource->description,
stream->bytes_left)) {
@@ -134,7 +134,8 @@ static bool validate_units_helper(pb_istream_t *stream, int *count,
// Have to allocate a new array of values. Normal case is 0 or 1, so
// this should normally not be an issue.
google_census_Resource_BasicUnit *new_bup =
- gpr_malloc((size_t)*count * sizeof(google_census_Resource_BasicUnit));
+ (google_census_Resource_BasicUnit *)gpr_malloc(
+ (size_t)*count * sizeof(google_census_Resource_BasicUnit));
if (*count != 1) {
memcpy(new_bup, *bup,
(size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
@@ -207,7 +208,8 @@ size_t allocate_resource(void) {
// Expand resources if needed.
if (n_resources == n_defined_resources) {
size_t new_n_resources = n_resources ? n_resources * 2 : 2;
- resource **new_resources = gpr_malloc(new_n_resources * sizeof(resource *));
+ resource **new_resources =
+ (resource **)gpr_malloc(new_n_resources * sizeof(resource *));
if (n_resources != 0) {
memcpy(new_resources, resources, n_resources * sizeof(resource *));
}
@@ -226,7 +228,7 @@ size_t allocate_resource(void) {
}
}
GPR_ASSERT(id < n_resources && resources[id] == NULL);
- resources[id] = gpr_malloc(sizeof(resource));
+ resources[id] = (resource *)gpr_malloc(sizeof(resource));
memset(resources[id], 0, sizeof(resource));
n_defined_resources++;
next_id = (id + 1) % n_resources;
@@ -276,22 +278,24 @@ int32_t define_resource(const resource *base) {
gpr_mu_lock(&resource_lock);
size_t id = allocate_resource();
size_t len = strlen(base->name) + 1;
- resources[id]->name = gpr_malloc(len);
+ resources[id]->name = (char *)gpr_malloc(len);
memcpy(resources[id]->name, base->name, len);
if (base->description) {
len = strlen(base->description) + 1;
- resources[id]->description = gpr_malloc(len);
+ resources[id]->description = (char *)gpr_malloc(len);
memcpy(resources[id]->description, base->description, len);
}
resources[id]->prefix = base->prefix;
resources[id]->n_numerators = base->n_numerators;
len = (size_t)base->n_numerators * sizeof(*base->numerators);
- resources[id]->numerators = gpr_malloc(len);
+ resources[id]->numerators =
+ (google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->numerators, base->numerators, len);
resources[id]->n_denominators = base->n_denominators;
if (base->n_denominators != 0) {
len = (size_t)base->n_denominators * sizeof(*base->denominators);
- resources[id]->denominators = gpr_malloc(len);
+ resources[id]->denominators =
+ (google_census_Resource_BasicUnit *)gpr_malloc(len);
memcpy(resources[id]->denominators, base->denominators, len);
}
gpr_mu_unlock(&resource_lock);
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c
index b83c95275f..0a9e90d12e 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.c
@@ -191,6 +191,12 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(wa);
}
+int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
+}
+
void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index 58e31d7b45..c24e52ec1d 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -796,7 +796,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-#define MAX_WAITING_BATCHES 6
+// We also add room for a single cancel_stream batch.
+#define MAX_WAITING_BATCHES 7
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -807,24 +808,27 @@ typedef struct client_channel_call_data {
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
- // and this struct both independently store a pointer to the call
- // stack and each has its own mutex. If/when we have time, find a way
- // to avoid this without breaking the grpc_deadline_state abstraction.
+ // and this struct both independently store pointers to the call stack
+ // and call combiner. If/when we have time, find a way to avoid this
+ // without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
+ gpr_arena *arena;
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
+
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
- /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
- bit is 0), or a pointer to an error (if the lowest bit is 1) */
- gpr_atm subchannel_call_or_error;
- gpr_arena *arena;
+ grpc_subchannel_call *subchannel_call;
+ grpc_error *error;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
+ grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@@ -832,10 +836,9 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
+ grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch_payload *initial_metadata_payload;
-
- grpc_call_stack *owning_call;
+ grpc_transport_stream_op_batch *initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
@@ -843,55 +846,42 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
-typedef struct {
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
-} call_or_error;
-
-static call_or_error get_call_or_error(call_data *p) {
- gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
- if (c == 0)
- return (call_or_error){NULL, NULL};
- else if (c & 1)
- return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
- else
- return (call_or_error){(grpc_subchannel_call *)c, NULL};
+grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ return calld->subchannel_call;
}
-static bool set_call_or_error(call_data *p, call_or_error coe) {
- // this should always be under a lock
- call_or_error existing = get_call_or_error(p);
- if (existing.error != GRPC_ERROR_NONE) {
- GRPC_ERROR_UNREF(coe.error);
- return false;
- }
- GPR_ASSERT(existing.subchannel_call == NULL);
- if (coe.error != GRPC_ERROR_NONE) {
- GPR_ASSERT(coe.subchannel_call == NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_add(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->initial_metadata_batch == NULL);
+ calld->initial_metadata_batch = batch;
} else {
- GPR_ASSERT(coe.subchannel_call != NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error,
- (gpr_atm)coe.subchannel_call);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
- return true;
}
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *call_elem) {
- return get_call_or_error(call_elem->call_data).subchannel_call;
-}
-
-static void waiting_for_pick_batches_add_locked(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+// This is called via the call combiner, so access to calld is synchronized.
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ call_data *calld = arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
+ GRPC_ERROR_REF(error), calld->call_combiner);
+ }
}
-static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_error *error) {
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
@@ -900,34 +890,60 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
grpc_error_string(error));
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ fail_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_REF(error),
+ "waiting_for_pick_batches_fail");
+ }
+ if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "waiting_for_pick_batches_fail");
}
- calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- if (calld->waiting_for_pick_batches_count == 0) return;
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- waiting_for_pick_batches_fail_locked(exec_ctx, elem,
- GRPC_ERROR_REF(coe.error));
- return;
+// This is called via the call combiner, so access to calld is synchronized.
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *ignored) {
+ call_data *calld = arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_subchannel_call_process_op(
+ exec_ctx, calld->subchannel_call,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
}
+}
+
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- coe.subchannel_call);
+ chand, calld, calld->waiting_for_pick_batches_count,
+ calld->subchannel_call);
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
- calld->waiting_for_pick_batches[i]);
- }
- calld->waiting_for_pick_batches_count = 0;
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ run_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_NONE,
+ "waiting_for_pick_batches_resume");
+ }
+ GPR_ASSERT(calld->initial_metadata_batch != NULL);
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+ calld->initial_metadata_batch);
}
// Applies service config to the call. Must be invoked once we know
@@ -968,29 +984,28 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
+ channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call = NULL;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
.start_time = calld->call_start_time,
.deadline = calld->deadline,
.arena = calld->arena,
- .context = calld->subchannel_call_context};
+ .context = calld->subchannel_call_context,
+ .call_combiner = calld->call_combiner};
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args,
+ &calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
- elem->channel_data, calld, subchannel_call,
- grpc_error_string(new_error));
+ chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
} else {
- waiting_for_pick_batches_resume_locked(exec_ctx, elem);
+ waiting_for_pick_batches_resume(exec_ctx, elem);
}
GRPC_ERROR_UNREF(error);
}
@@ -1002,60 +1017,27 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
- call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
- grpc_error *failure =
- error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(failure));
- }
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
- } else if (coe.error != GRPC_ERROR_NONE) {
- /* already cancelled before subchannel became ready */
- grpc_error *child_errors[] = {error, coe.error};
- grpc_error *cancellation_error =
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Cancelled before creating subchannel", child_errors,
- GPR_ARRAY_SIZE(child_errors));
- /* if due to deadline, attach the deadline exceeded status to the error */
- if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
- cancellation_error =
- grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_DEADLINE_EXCEEDED);
- }
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: cancelled before subchannel became ready: %s",
- chand, calld, grpc_error_string(cancellation_error));
+ calld, grpc_error_string(calld->error));
}
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error);
}
-static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call =
- get_call_or_error(calld).subchannel_call;
- if (subchannel_call == NULL) {
- return NULL;
- } else {
- return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- }
-}
-
/** Return true if subchannel is available immediately (in which case
subchannel_ready_locked() should not be called), or false otherwise (in
which case subchannel_ready_locked() should be called when the subchannel
@@ -1065,39 +1047,78 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
typedef struct {
grpc_call_element *elem;
- bool cancelled;
+ bool finished;
grpc_closure closure;
+ grpc_closure cancel_closure;
} pick_after_resolver_result_args;
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ pick_after_resolver_result_args *args = arg;
+ if (args->finished) {
+ gpr_free(args);
+ return;
+ }
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ // If we don't yet have a resolver result, then a closure for
+ // pick_after_resolver_result_done_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // finished, so that when pick_after_resolver_result_done_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // subchannel_ready_locked() to propagate the error back to the caller.
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: cancelling pick waiting for resolver result",
+ chand, calld);
+ }
+ // Note: Although we are not in the call combiner here, we are
+ // basically stealing the call combiner from the pending pick, so
+ // it's safe to call subchannel_ready_locked() here -- we are
+ // essentially calling it here instead of calling it in
+ // pick_after_resolver_result_done_locked().
+ subchannel_ready_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
+}
+
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args = arg;
- if (args->cancelled) {
+ if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
+ gpr_free(args);
+ return;
+ }
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ if (error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
+ chand, calld);
+ }
+ subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
- channel_data *chand = args->elem->channel_data;
- call_data *calld = args->elem->call_data;
- if (error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
- chand, calld);
- }
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
- } else {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
- chand, calld);
- }
- if (pick_subchannel_locked(exec_ctx, args->elem)) {
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
- }
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
+ chand, calld);
+ }
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
- gpr_free(args);
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
@@ -1116,41 +1137,34 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
args, grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE);
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&args->cancel_closure,
+ pick_after_resolver_result_cancel_locked, args,
+ grpc_combiner_scheduler(chand->combiner)));
}
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- // If we don't yet have a resolver result, then a closure for
- // pick_after_resolver_result_done_locked() will have been added to
- // chand->waiting_for_resolver_result_closures, and it may not be invoked
- // until after this call has been destroyed. We mark the operation as
- // cancelled, so that when pick_after_resolver_result_done_locked()
- // is called, it will be a no-op. We also immediately invoke
- // subchannel_ready_locked() to propagate the error back to the caller.
- for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
- closure != NULL; closure = closure->next_data.next) {
- pick_after_resolver_result_args *args = closure->cb_arg;
- if (!args->cancelled && args->elem == elem) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: "
- "cancelling pick waiting for resolver result",
- chand, calld);
- }
- args->cancelled = true;
- subchannel_ready_locked(exec_ctx, elem,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
+ chand, calld, calld->lb_policy);
}
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
+ &calld->connected_subchannel,
+ GRPC_ERROR_REF(error));
}
- GRPC_ERROR_UNREF(error);
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
-// Unrefs the LB policy after invoking subchannel_ready_locked().
+// Unrefs the LB policy and invokes subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
@@ -1194,24 +1208,17 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
+ } else {
+ GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+ pick_callback_cancel_locked, elem,
+ grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- GPR_ASSERT(calld->lb_policy != NULL);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
- }
- grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
- &calld->connected_subchannel, error);
-}
-
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
@@ -1224,7 +1231,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
// Otherwise, if the service config specified a value for this
// method, use that.
uint32_t initial_metadata_flags =
- calld->initial_metadata_payload->send_initial_metadata
+ calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
@@ -1241,7 +1248,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
}
}
const grpc_lb_policy_pick_args inputs = {
- calld->initial_metadata_payload->send_initial_metadata
+ calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
@@ -1258,91 +1265,33 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
return pick_done;
}
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
- grpc_transport_stream_op_batch *batch = arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- /* need to recheck that another thread hasn't set the call */
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
- }
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
- goto done;
- }
- if (coe.subchannel_call != NULL) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
- }
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- // Add to waiting-for-pick list. If we succeed in getting a
- // subchannel call below, we'll handle this batch (along with any
- // other waiting batches) in waiting_for_pick_batches_resume_locked().
- waiting_for_pick_batches_add_locked(calld, batch);
- // If this is a cancellation, cancel the pending pick (if any) and
- // fail any pending batches.
- if (batch->cancel_stream) {
- grpc_error *error = batch->payload->cancel_stream.cancel_error;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(error));
- }
- /* Stash a copy of cancel_error in our call data, so that we can use
- it for subsequent operations. This ensures that if the call is
- cancelled before any batches are passed down (e.g., if the deadline
- is in the past when the call starts), we can return the right
- error to the caller when the first batch does get passed down. */
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
- if (calld->lb_policy != NULL) {
- pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("start_pick_locked", 0);
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ GPR_ASSERT(calld->connected_subchannel == NULL);
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ // Pick was returned synchronously.
+ if (calld->connected_subchannel == NULL) {
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy");
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
} else {
- pick_after_resolver_result_cancel_locked(exec_ctx, elem,
- GRPC_ERROR_REF(error));
- }
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- goto done;
- }
- /* if we don't have a subchannel, try to get one */
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->connected_subchannel == NULL);
- calld->initial_metadata_payload = batch->payload;
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
- /* If a subchannel is not available immediately, the polling entity from
- call_data should be provided to channel_data's interested_parties, so
- that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(exec_ctx, elem)) {
- // Pick was returned synchronously.
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
- if (calld->connected_subchannel == NULL) {
- grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy");
- set_call_or_error(calld,
- (call_or_error){.error = GRPC_ERROR_REF(error)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
- } else {
- // Create subchannel call.
- create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
- }
- } else {
- grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
+ // Create subchannel call.
+ create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
+ } else {
+ // Pick will be done asynchronously. Add the call's polling entity to
+ // the channel's interested_parties, so that I/O for the resolver
+ // and LB policy can be done under it.
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
-done:
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
+ GPR_TIMER_END("start_pick_locked", 0);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -1365,27 +1314,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error));
}
-/* The logic here is fairly complicated, due to (a) the fact that we
- need to handle the case where we receive the send op before the
- initial metadata op, and (b) the need for efficiency, especially in
- the streaming case.
-
- We use double-checked locking to initially see if initialization has been
- performed. If it has not, we acquire the combiner and perform initialization.
- If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
- GRPC_TRACER_ON(grpc_trace_channel)) {
- grpc_call_log_op(GPR_INFO, elem, batch);
- }
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
}
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
+ // If we've previously been cancelled, immediately fail any new batches.
+ if (calld->error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
+ chand, calld, grpc_error_string(calld->error));
+ }
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ goto done;
+ }
+ if (batch->cancel_stream) {
+ // Stash a copy of cancel_error in our call data, so that we can use
+ // it for subsequent operations. This ensures that if the call is
+ // cancelled before any batches are passed down (e.g., if the deadline
+ // is in the past when the call starts), we can return the right
+ // error to the caller when the first batch does get passed down.
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
+ calld, grpc_error_string(calld->error));
+ }
+ // If we have a subchannel call, send the cancellation batch down.
+ // Otherwise, fail all pending batches.
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+ } else {
+ waiting_for_pick_batches_add(calld, batch);
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
+ }
+ goto done;
+ }
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (batch->recv_trailing_metadata) {
@@ -1395,38 +1366,43 @@ static void cc_start_transport_stream_op_batch(
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete;
}
- /* try to (atomically) get the call */
- call_or_error coe = get_call_or_error(calld);
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
- if (coe.error != GRPC_ERROR_NONE) {
+ // Check if we've already gotten a subchannel call.
+ // Note that once we have completed the pick, we do not need to enter
+ // the channel combiner, which is more efficient (especially for
+ // streaming calls).
+ if (calld->subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ calld, calld->subchannel_call);
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
goto done;
}
- if (coe.subchannel_call != NULL) {
+ // We do not yet have a subchannel call.
+ // Add the batch to the waiting-for-pick list.
+ waiting_for_pick_batches_add(calld, batch);
+ // For batches containing a send_initial_metadata op, enter the channel
+ // combiner to start a pick.
+ if (batch->send_initial_metadata) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
+ }
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
+ elem, grpc_combiner_scheduler(chand->combiner)),
+ GRPC_ERROR_NONE);
+ } else {
+ // For all other batches, release the call combiner.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
+ calld);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- /* we failed; lock and figure out what to do */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "batch does not include send_initial_metadata");
}
- GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
- batch->handler_private.extra_arg = elem;
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
- start_transport_stream_op_batch_locked, batch,
- grpc_combiner_scheduler(chand->combiner)),
- GRPC_ERROR_NONE);
done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@@ -1441,10 +1417,12 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->owning_call = args->call_stack;
calld->arena = args->arena;
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
if (chand->deadline_checking_enabled) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, calld->deadline);
}
return GRPC_ERROR_NONE;
}
@@ -1463,13 +1441,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
- call_or_error coe = get_call_or_error(calld);
- GRPC_ERROR_UNREF(coe.error);
- if (coe.subchannel_call != NULL) {
- grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
+ GRPC_ERROR_UNREF(calld->error);
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = NULL;
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == NULL);
@@ -1508,7 +1485,6 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data),
cc_init_channel_elem,
cc_destroy_channel_elem,
- cc_get_peer,
cc_get_channel_info,
"client-channel",
};
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
index 568bb2ba8d..299f26b4de 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
@@ -132,6 +132,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_load_reporting"};
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
index 56ed4371a9..3ff081a514 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
@@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
@@ -125,7 +126,6 @@ static const grpc_resolver_vtable fake_resolver_vtable = {
struct grpc_fake_resolver_response_generator {
fake_resolver* resolver; // Set by the fake_resolver constructor to itself.
- grpc_channel_args* next_response;
gpr_refcount refcount;
};
@@ -151,19 +151,26 @@ void grpc_fake_resolver_response_generator_unref(
}
}
-static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- grpc_fake_resolver_response_generator* generator =
- (grpc_fake_resolver_response_generator*)arg;
+typedef struct set_response_closure_arg {
+ grpc_closure set_response_closure;
+ grpc_fake_resolver_response_generator* generator;
+ grpc_channel_args* next_response;
+} set_response_closure_arg;
+
+static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ set_response_closure_arg* closure_arg = arg;
+ grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
grpc_channel_args_destroy(exec_ctx, r->next_results);
}
- r->next_results = generator->next_response;
+ r->next_results = closure_arg->next_response;
if (r->results_upon_error != NULL) {
grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
}
- r->results_upon_error = grpc_channel_args_copy(generator->next_response);
+ r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response);
+ gpr_free(closure_arg);
fake_resolver_maybe_finish_next_locked(exec_ctx, r);
}
@@ -171,12 +178,15 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
- generator->next_response = grpc_channel_args_copy(next_response);
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator,
- grpc_combiner_scheduler(
- generator->resolver->base.combiner)),
- GRPC_ERROR_NONE);
+ set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg));
+ closure_arg->generator = generator;
+ closure_arg->next_response = grpc_channel_args_copy(next_response);
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
+ set_response_closure_fn, closure_arg,
+ grpc_combiner_scheduler(
+ generator->resolver->base.combiner)),
+ GRPC_ERROR_NONE);
}
static void* response_generator_arg_copy(void* p) {
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 5788819331..6b5b383efd 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -724,20 +724,14 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call) {
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- return top_elem->filter->get_peer(exec_ctx, top_elem);
-}
-
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call,
- grpc_transport_stream_op_batch *op) {
+ grpc_transport_stream_op_batch *batch) {
GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, op);
+ GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
+ top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
@@ -760,13 +754,15 @@ grpc_error *grpc_connected_subchannel_create_call(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- const grpc_call_element_args call_args = {.call_stack = callstk,
- .server_transport_data = NULL,
- .context = args->context,
- .path = args->path,
- .start_time = args->start_time,
- .deadline = args->deadline,
- .arena = args->arena};
+ const grpc_call_element_args call_args = {
+ .call_stack = callstk,
+ .server_transport_data = NULL,
+ .context = args->context,
+ .path = args->path,
+ .start_time = args->start_time,
+ .deadline = args->deadline,
+ .arena = args->arena,
+ .call_combiner = args->call_combiner};
grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 6d2abb04df..51d712f6a7 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -106,6 +106,7 @@ typedef struct {
gpr_timespec deadline;
gpr_arena *arena;
grpc_call_context_element *context;
+ grpc_call_combiner *call_combiner;
} grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call(
@@ -150,10 +151,6 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op_batch *op);
-/** continue querying for peer */
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call);
-
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c
index 6789903c95..565b0679dc 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.c
@@ -34,22 +34,56 @@
// grpc_deadline_state
//
+// The on_complete callback used when sending a cancel_error batch down the
+// filter stack. Yields the call combiner when the batch returns.
+static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_deadline_state* deadline_state = arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "got on_complete from cancel_stream batch");
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
+}
+
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
+static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = arg;
+ grpc_deadline_state* deadline_state = elem->call_data;
+ grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
+ deadline_state, grpc_schedule_on_exec_ctx));
+ batch->cancel_stream = true;
+ batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+}
+
// Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) {
- grpc_call_element_signal_error(
- exec_ctx, elem,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
+ grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
+ GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
+ send_cancel_op_in_call_combiner, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &deadline_state->timer_callback, error,
+ "deadline exceeded -- sending cancel_stream op");
+ } else {
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
+ "deadline_timer");
}
- GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
// Starts the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
@@ -58,51 +92,39 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
- grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
-retry:
- cur_state =
- (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
- switch (cur_state) {
+ switch (deadline_state->timer_state) {
case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_FINISHED,
- GRPC_DEADLINE_STATE_PENDING)) {
- // If we've already created and destroyed a timer, we always create a
- // new closure: we have no other guarantee that the inlined closure is
- // not in use (it may hold a pending call to timer_callback)
- closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure =
+ GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
break;
case GRPC_DEADLINE_STATE_INITIAL:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_INITIAL,
- GRPC_DEADLINE_STATE_PENDING)) {
- closure =
- GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
- elem, grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ closure =
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
break;
}
- GPR_ASSERT(closure);
+ GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
- GRPC_DEADLINE_STATE_FINISHED)) {
+ if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else {
// timer was either in STATE_INITAL (nothing to cancel)
@@ -131,6 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
+ bool in_call_combiner;
grpc_call_element* elem;
gpr_timespec deadline;
grpc_closure closure;
@@ -138,15 +161,29 @@ struct start_timer_after_init_state {
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
struct start_timer_after_init_state* state = arg;
+ grpc_deadline_state* deadline_state = state->elem->call_data;
+ if (!state->in_call_combiner) {
+ // We are initially called without holding the call combiner, so we
+ // need to bounce ourselves into it.
+ state->in_call_combiner = true;
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &state->closure, GRPC_ERROR_REF(error),
+ "scheduling deadline timer");
+ return;
+ }
start_timer_if_needed(exec_ctx, state->elem, state->deadline);
gpr_free(state);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "done scheduling deadline timer");
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
+ deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -158,7 +195,7 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
- struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
+ struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@@ -232,7 +269,8 @@ typedef struct server_call_data {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -310,7 +348,6 @@ const grpc_channel_filter grpc_client_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
@@ -325,7 +362,6 @@ const grpc_channel_filter grpc_server_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 420bf7065a..3eb102ad28 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -31,7 +31,8 @@ typedef enum grpc_deadline_timer_state {
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- gpr_atm timer_state;
+ grpc_call_combiner* call_combiner;
+ grpc_deadline_timer_state timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
@@ -50,6 +51,7 @@ typedef struct grpc_deadline_state {
// assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@@ -61,6 +63,8 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
@@ -70,6 +74,8 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op);
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index 3ca01a41b5..2d7429c41e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -36,6 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -215,13 +216,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
// There may or may not be more to read, but we don't care. If we got
@@ -302,7 +303,6 @@ static void hc_start_transport_stream_op_batch(
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
if (batch->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -414,7 +414,7 @@ static void hc_start_transport_stream_op_batch(
done:
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
} else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(exec_ctx, elem, batch);
}
@@ -426,6 +426,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -565,6 +566,5 @@ const grpc_channel_filter grpc_http_client_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-client"};
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index a32819bfe4..98a503cafc 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -35,35 +35,29 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-#define INITIAL_METADATA_UNSEEN 0
-#define HAS_COMPRESSION_ALGORITHM 2
-#define NO_COMPRESSION_ALGORITHM 4
-
-#define CANCELLED_BIT ((gpr_atm)1)
+typedef enum {
+ // Initial metadata not yet seen.
+ INITIAL_METADATA_UNSEEN = 0,
+ // Initial metadata seen; compression algorithm set.
+ HAS_COMPRESSION_ALGORITHM,
+ // Initial metadata seen; no compression algorithm set.
+ NO_COMPRESSION_ALGORITHM,
+} initial_metadata_state;
typedef struct call_data {
- grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
grpc_linked_mdelem accept_stream_encoding_storage;
- uint32_t remaining_slice_bytes;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
-
- /* Atomic recording the state of initial metadata; allowed values:
- INITIAL_METADATA_UNSEEN - initial metadata op not seen
- HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm
- set
- NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm
- set
- pointer - a stalled op containing a send_message that's waiting on initial
- metadata
- pointer | CANCELLED_BIT - request was cancelled with error pointed to */
- gpr_atm send_initial_metadata_state;
-
+ initial_metadata_state send_initial_metadata_state;
+ grpc_error *cancel_error;
+ grpc_closure start_send_message_batch_in_call_combiner;
grpc_transport_stream_op_batch *send_message_batch;
+ grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
grpc_closure *original_send_message_on_complete;
grpc_closure send_message_on_complete;
@@ -92,13 +86,13 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags,
channel_data *channeld = elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
- return 1;
+ return true;
}
if (has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
- return 1;
+ return true;
}
- return 0; /* we have an actual call-specific algorithm */
+ return false; /* we have an actual call-specific algorithm */
}
/* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@@ -226,6 +220,18 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
+static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ // Note: The call to grpc_call_next_op() results in yielding the
+ // call combiner, so we need to clear calld->send_message_batch
+ // before we do that.
+ grpc_transport_stream_op_batch *send_message_batch =
+ calld->send_message_batch;
+ calld->send_message_batch = NULL;
+ grpc_call_next_op(exec_ctx, elem, send_message_batch);
+}
+
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
@@ -234,8 +240,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&tmp);
uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags;
- const bool did_compress = grpc_msg_compress(
- exec_ctx, calld->compression_algorithm, &calld->slices, &tmp);
+ bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
+ &calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
@@ -273,7 +279,19 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
calld->original_send_message_on_complete =
calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete;
- grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
+ send_message_batch_continue(exec_ctx, elem);
+}
+
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ call_data *calld = arg;
+ if (calld->send_message_batch != NULL) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ calld->send_message_batch = NULL;
+ }
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
@@ -293,21 +311,25 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
+static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) return error;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
break;
}
}
- return GRPC_ERROR_NONE;
}
// Async callback for grpc_byte_stream_next().
@@ -315,46 +337,37 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ return;
+ }
error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
} else {
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // this function again.
- error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) goto fail;
+ continue_reading_send_message(exec_ctx, elem);
}
- return;
-fail:
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch,
- bool has_compression_algorithm) {
+static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *unused) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (!skip_compression(elem, batch->payload->send_message.send_message->flags,
- has_compression_algorithm)) {
- calld->send_message_batch = batch;
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // on_send_message_next_done().
- grpc_error *error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
- }
+ if (skip_compression(
+ elem,
+ calld->send_message_batch->payload->send_message.send_message->flags,
+ calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) {
+ send_message_batch_continue(exec_ctx, elem);
} else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, batch);
+ continue_reading_send_message(exec_ctx, elem);
}
}
@@ -362,95 +375,80 @@ static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
-
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
-
+ // Handle cancel_stream.
if (batch->cancel_stream) {
- // TODO(roth): As part of the upcoming call combiner work, change
- // this to call grpc_byte_stream_shutdown() on the incoming byte
- // stream, to cancel any in-flight calls to grpc_byte_stream_next().
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- gpr_atm cur = gpr_atm_full_xchg(
- &calld->send_initial_metadata_state,
- CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error);
- switch (cur) {
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- case INITIAL_METADATA_UNSEEN:
- break;
- default:
- if ((cur & CANCELLED_BIT) == 0) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, (grpc_transport_stream_op_batch *)cur,
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
- } else {
- GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
- }
- break;
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (calld->send_message_batch != NULL) {
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
+ } else {
+ grpc_byte_stream_shutdown(
+ exec_ctx,
+ calld->send_message_batch->payload->send_message.send_message,
+ GRPC_ERROR_REF(calld->cancel_error));
+ }
}
+ } else if (calld->cancel_error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
+ calld->call_combiner);
+ goto done;
}
-
+ // Handle send_initial_metadata.
if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
grpc_error *error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- error);
- return;
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
+ goto done;
}
- gpr_atm cur;
- retry_send_im:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
- cur != NO_COMPRESSION_ALGORITHM);
- if ((cur & CANCELLED_BIT) == 0) {
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- has_compression_algorithm
- ? HAS_COMPRESSION_ALGORITHM
- : NO_COMPRESSION_ALGORITHM)) {
- goto retry_send_im;
- }
- if (cur != INITIAL_METADATA_UNSEEN) {
- start_send_message_batch(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur,
- has_compression_algorithm);
- }
+ calld->send_initial_metadata_state = has_compression_algorithm
+ ? HAS_COMPRESSION_ALGORITHM
+ : NO_COMPRESSION_ALGORITHM;
+ // If we had previously received a batch containing a send_message op,
+ // handle it now. Note that we need to re-enter the call combiner
+ // for this, since we can't send two batches down while holding the
+ // call combiner, since the connected_channel filter (at the bottom of
+ // the call stack) will release the call combiner for each batch it sees.
+ if (calld->send_message_batch != NULL) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
+ "starting send_message after send_initial_metadata");
}
}
+ // Handle send_message.
if (batch->send_message) {
- gpr_atm cur;
- retry_send:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- switch (cur) {
- case INITIAL_METADATA_UNSEEN:
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- (gpr_atm)batch)) {
- goto retry_send;
- }
- break;
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- start_send_message_batch(exec_ctx, elem, batch,
- cur == HAS_COMPRESSION_ALGORITHM);
- break;
- default:
- if (cur & CANCELLED_BIT) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch,
- GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
- } else {
- /* >1 send_message concurrently */
- GPR_UNREACHABLE_CODE(break);
- }
+ GPR_ASSERT(calld->send_message_batch == NULL);
+ calld->send_message_batch = batch;
+ // If we have not yet seen send_initial_metadata, then we have to
+ // wait. We save the batch in calld and then drop the call
+ // combiner, which we'll have to pick up again later when we get
+ // send_initial_metadata.
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_STOP(
+ exec_ctx, calld->call_combiner,
+ "send_message batch pending send_initial_metadata");
+ goto done;
}
+ start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE);
} else {
- /* pass control down the stack */
+ // Pass control down the stack.
grpc_call_next_op(exec_ctx, elem, batch);
}
-
+done:
GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
@@ -458,16 +456,16 @@ static void compress_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
-
- /* initialize members */
+ call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
+ calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
+ GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
elem, grpc_schedule_on_exec_ctx);
-
return GRPC_ERROR_NONE;
}
@@ -475,14 +473,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- /* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
- gpr_atm imstate =
- gpr_atm_no_barrier_load(&calld->send_initial_metadata_state);
- if (imstate & CANCELLED_BIT) {
- GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT));
- }
+ GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
@@ -550,6 +543,5 @@ const grpc_channel_filter grpc_message_compress_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
- "compress"};
+ "message_compress"};
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c
index b145f12aff..a10e69ba59 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.c
@@ -32,6 +32,8 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -281,7 +283,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ // Re-enter call combiner for recv_message_ready, since the surface
+ // code will release the call combiner for each callback it receives.
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ calld->recv_message_ready, GRPC_ERROR_REF(err),
+ "resuming recv_message_ready from on_complete");
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
@@ -293,15 +299,20 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (calld->seen_path_with_query) {
- /* do nothing. This is probably a GET request, and payload will be returned
- in hs_on_complete callback. */
+ // Do nothing. This is probably a GET request, and payload will be
+ // returned in hs_on_complete callback.
+ // Note that we release the call combiner here, so that other
+ // callbacks can run.
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "pausing recv_message_ready until on_complete");
} else {
GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
-static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
@@ -323,10 +334,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata));
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
if (op->recv_initial_metadata) {
@@ -359,21 +367,25 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
+
+ return GRPC_ERROR_NONE;
}
-static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- GPR_TIMER_BEGIN("hs_start_transport_op", 0);
- hs_mutate_op(exec_ctx, elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
- GPR_TIMER_END("hs_start_transport_op", 0);
+static void hs_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
+ call_data *calld = elem->call_data;
+ GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
+ grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
+ calld->call_combiner);
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+ GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@@ -383,6 +395,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
@@ -414,7 +427,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
- hs_start_transport_op,
+ hs_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
@@ -423,6 +436,5 @@ const grpc_channel_filter grpc_http_server_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-server"};
diff --git a/src/core/ext/filters/load_reporting/load_reporting.c b/src/core/ext/filters/load_reporting/load_reporting.c
index 9745763c91..b42aa99cdb 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.c
+++ b/src/core/ext/filters/load_reporting/load_reporting.c
@@ -42,9 +42,15 @@ static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx,
void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- if (is_load_reporting_enabled(args)) {
- return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ const grpc_channel_filter *filter = arg;
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter->name);
+ const bool already_has_load_reporting_filter =
+ !grpc_channel_stack_builder_iterator_is_end(it);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ if (is_load_reporting_enabled(args) && !already_has_load_reporting_filter) {
+ return grpc_channel_stack_builder_prepend_filter(builder, filter, NULL,
+ NULL);
}
return true;
}
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/load_reporting_filter.c
index 08474efb2e..17e946937f 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/load_reporting_filter.c
@@ -223,6 +223,5 @@ const grpc_channel_filter grpc_load_reporting_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"load_reporting"};
diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c
index 7d748b9c32..450f67746f 100644
--- a/src/core/ext/filters/max_age/max_age_filter.c
+++ b/src/core/ext/filters/max_age/max_age_filter.c
@@ -273,7 +273,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
decrease_call_count(exec_ctx, chand);
}
@@ -391,7 +391,6 @@ const grpc_channel_filter grpc_max_age_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"max_age"};
diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c
index 846c7df69a..47763b1deb 100644
--- a/src/core/ext/filters/message_size/message_size_filter.c
+++ b/src/core/ext/filters/message_size/message_size_filter.c
@@ -68,6 +68,7 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
}
typedef struct call_data {
+ grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
@@ -131,7 +132,8 @@ static void start_transport_stream_op_batch(
exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_RESOURCE_EXHAUSTED));
+ GRPC_STATUS_RESOURCE_EXHAUSTED),
+ calld->call_combiner);
gpr_free(message_string);
return;
}
@@ -152,6 +154,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
+ calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -259,7 +262,6 @@ const grpc_channel_filter grpc_message_size_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"message_size"};
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
index b4d2cb4b8c..c8b2fe5f99 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
@@ -177,7 +177,6 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
0,
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"workaround_cronet_compression"};
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 3277cefc2d..7ff77b7cd9 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -1375,6 +1375,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"send_initial_metadata_finished");
}
}
+ if (op_payload->send_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
}
if (op->send_message) {
@@ -1384,11 +1388,18 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) {
+ // Return an error unless the client has already received trailing
+ // metadata from the server, since an application using a
+ // streaming call might send another message before getting a
+ // recv_message failure, breaking out of its loop, and then
+ // starting recv_trailing_metadata.
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Attempt to send message after stream was closed",
- &s->write_closed_error, 1),
+ t->is_client && s->received_trailing_metadata
+ ? GRPC_ERROR_NONE
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Attempt to send message after stream was closed",
+ &s->write_closed_error, 1),
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
@@ -1476,6 +1487,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata;
s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available;
+ if (op_payload->recv_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
@@ -1836,8 +1851,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
}
}
- if (s->read_closed && s->frame_storage.length == 0 &&
- (!pending_data || s->seen_error) &&
+ if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
s->recv_trailing_metadata_finished != NULL) {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
@@ -2944,14 +2958,6 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
/*******************************************************************************
- * INTEGRATION GLUE
- */
-
-static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
-}
-
-/*******************************************************************************
* MONITORING
*/
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
@@ -2968,7 +2974,6 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
- chttp2_get_peer,
chttp2_get_endpoint};
grpc_transport *grpc_create_chttp2_transport(
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 3c41a8958f..9fff30d54f 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -509,6 +509,8 @@ struct grpc_chttp2_stream {
/** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */
bool write_buffering;
+ /** Has trailing metadata been received. */
+ bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 18d163ee98..19bd86fd0c 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -623,6 +623,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
*s->trailing_metadata_available = true;
}
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
} else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header;
@@ -631,6 +632,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index c16ffaa5ef..5e9d97d485 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -156,17 +156,8 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) {
}
// Returns true if initial_metadata contains only default headers.
-//
-// TODO(roth): The fact that we hard-code these particular headers here
-// is fairly ugly. Need some better way to know which headers are
-// default, maybe via a bit in the static metadata table?
static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
- int num_default_fields =
- (initial_metadata->idx.named.status != NULL) +
- (initial_metadata->idx.named.content_type != NULL) +
- (initial_metadata->idx.named.grpc_encoding != NULL) +
- (initial_metadata->idx.named.grpc_accept_encoding != NULL);
- return (size_t)num_default_fields == initial_metadata->list.count;
+ return initial_metadata->list.default_count == initial_metadata->list.count;
}
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index abb558982b..587a3b83b5 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -187,9 +187,34 @@ struct stream_obj {
/* Mutex to protect storage */
gpr_mu mu;
+
+ /* Refcount object of the stream */
+ grpc_stream_refcount *refcount;
};
typedef struct stream_obj stream_obj;
+#ifndef NDEBUG
+#define GRPC_CRONET_STREAM_REF(stream, reason) \
+ grpc_cronet_stream_ref((stream), (reason))
+#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_cronet_stream_unref((exec_ctx), (stream), (reason))
+void grpc_cronet_stream_ref(stream_obj *s, const char *reason) {
+ grpc_stream_ref(s->refcount, reason);
+}
+void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s,
+ const char *reason) {
+ grpc_stream_unref(exec_ctx, s->refcount, reason);
+}
+#else
+#define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream))
+#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_cronet_stream_unref((exec_ctx), (stream))
+void grpc_cronet_stream_ref(stream_obj *s) { grpc_stream_ref(s->refcount); }
+void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s) {
+ grpc_stream_unref(exec_ctx, s->refcount);
+}
+#endif
+
static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
struct op_and_state *oas);
@@ -346,13 +371,12 @@ static void remove_from_storage(struct stream_obj *s,
This can get executed from the Cronet network thread via cronet callback
or on the application supplied thread via the perform_stream_op function.
*/
-static void execute_from_storage(stream_obj *s) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) {
gpr_mu_lock(&s->mu);
for (struct op_and_state *curr = s->storage.head; curr != NULL;) {
CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done);
GPR_ASSERT(curr->done == 0);
- enum e_op_result result = execute_stream_op(&exec_ctx, curr);
+ enum e_op_result result = execute_stream_op(exec_ctx, curr);
CRONET_LOG(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr,
op_result_string(result));
/* if this op is done, then remove it and free memory */
@@ -369,7 +393,6 @@ static void execute_from_storage(stream_obj *s) {
}
}
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -377,6 +400,8 @@ static void execute_from_storage(stream_obj *s) {
*/
static void on_failed(bidirectional_stream *stream, int net_error) {
CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -392,7 +417,9 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
}
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -400,6 +427,8 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
*/
static void on_canceled(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -415,7 +444,9 @@ static void on_canceled(bidirectional_stream *stream) {
}
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -423,6 +454,8 @@ static void on_canceled(bidirectional_stream *stream) {
*/
static void on_succeeded(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
stream_obj *s = (stream_obj *)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
@@ -430,7 +463,9 @@ static void on_succeeded(bidirectional_stream *stream) {
s->cbs = NULL;
null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -438,6 +473,7 @@ static void on_succeeded(bidirectional_stream *stream) {
*/
static void on_stream_ready(bidirectional_stream *stream) {
CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
gpr_mu_lock(&s->mu);
@@ -457,7 +493,8 @@ static void on_stream_ready(bidirectional_stream *stream) {
}
}
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -513,14 +550,15 @@ static void on_response_headers_received(
s->state.pending_read_from_cronet = true;
}
gpr_mu_unlock(&s->mu);
+ execute_from_storage(&exec_ctx, s);
grpc_exec_ctx_finish(&exec_ctx);
- execute_from_storage(s);
}
/*
Cronet callback
*/
static void on_write_completed(bidirectional_stream *stream, const char *data) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
gpr_mu_lock(&s->mu);
@@ -530,7 +568,8 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) {
}
s->state.state_callback_received[OP_SEND_MESSAGE] = true;
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -538,6 +577,7 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) {
*/
static void on_read_completed(bidirectional_stream *stream, char *data,
int count) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
stream_obj *s = (stream_obj *)stream->annotation;
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
count);
@@ -563,14 +603,15 @@ static void on_read_completed(bidirectional_stream *stream, char *data,
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
} else {
null_and_maybe_free_read_buffer(s);
s->state.rs.read_stream_closed = true;
gpr_mu_unlock(&s->mu);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -625,12 +666,11 @@ static void on_response_trailers_received(
s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
} else {
gpr_mu_unlock(&s->mu);
- grpc_exec_ctx_finish(&exec_ctx);
- execute_from_storage(s);
+ execute_from_storage(&exec_ctx, s);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
/*
@@ -1313,6 +1353,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
const void *server_data, gpr_arena *arena) {
stream_obj *s = (stream_obj *)gs;
+
+ s->refcount = refcount;
+ GRPC_CRONET_STREAM_REF(s, "cronet transport");
memset(&s->storage, 0, sizeof(s->storage));
s->storage.head = NULL;
memset(&s->state, 0, sizeof(s->state));
@@ -1370,7 +1413,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
stream_obj *s = (stream_obj *)gs;
add_to_storage(s, op);
- execute_from_storage(s);
+ execute_from_storage(exec_ctx, s);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -1386,10 +1429,6 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- return NULL;
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) {
return NULL;
@@ -1408,7 +1447,6 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
perform_op,
destroy_stream,
destroy_transport,
- get_peer,
get_endpoint};
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c
index 6f4b429ee2..b2d6f2d0c9 100644
--- a/src/core/ext/transport/inproc/inproc_transport.c
+++ b/src/core/ext/transport/inproc/inproc_transport.c
@@ -1251,20 +1251,14 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// Nothing to do here
}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup("inproc");
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return NULL;
}
static const grpc_transport_vtable inproc_vtable = {
- sizeof(inproc_stream), "inproc",
- init_stream, set_pollset,
- set_pollset_set, perform_stream_op,
- perform_transport_op, destroy_stream,
- destroy_transport, get_peer,
+ sizeof(inproc_stream), "inproc", init_stream,
+ set_pollset, set_pollset_set, perform_stream_op,
+ perform_transport_op, destroy_stream, destroy_transport,
get_endpoint};
/*******************************************************************************
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 0f8e33c4be..775c8bc667 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -233,15 +233,10 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1;
+ GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- grpc_call_element *next_elem = elem + 1;
- return next_elem->filter->get_peer(exec_ctx, next_elem);
-}
-
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *channel_info) {
@@ -265,12 +260,3 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
-
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
- op->cancel_stream = true;
- op->payload->cancel_stream.cancel_error = error;
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
-}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index a80f8aa826..ae1cac31f7 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -40,6 +40,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h"
@@ -71,6 +72,7 @@ typedef struct {
gpr_timespec start_time;
gpr_timespec deadline;
gpr_arena *arena;
+ grpc_call_combiner *call_combiner;
} grpc_call_element_args;
typedef struct {
@@ -150,9 +152,6 @@ typedef struct {
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem);
- /* Implement grpc_call_get_peer() */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
-
/* Implement grpc_channel_get_info() */
void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info);
@@ -271,8 +270,6 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op);
-/* Pass through a request to get_peer to the next child element */
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Pass through a request to get_channel_info() to the next child element */
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -288,10 +285,6 @@ void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_error *error);
-
extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index c369e33073..7f2b8e07ce 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -124,6 +124,20 @@ bool grpc_channel_stack_builder_move_prev(
return true;
}
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ GPR_ASSERT(filter_name != NULL);
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_create_iterator_at_first(builder);
+ while (grpc_channel_stack_builder_move_next(it)) {
+ if (grpc_channel_stack_builder_iterator_is_end(it)) break;
+ const char *filter_name_at_it =
+ grpc_channel_stack_builder_iterator_filter_name(it);
+ if (strcmp(filter_name, filter_name_at_it) == 0) break;
+ }
+ return it;
+}
+
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
@@ -169,6 +183,21 @@ bool grpc_channel_stack_builder_append_filter(
return ok;
}
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter_name);
+ if (grpc_channel_stack_builder_iterator_is_end(it)) {
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return false;
+ }
+ it->node->prev->next = it->node->next;
+ it->node->next->prev = it->node->prev;
+ gpr_free(it->node);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return true;
+}
+
bool grpc_channel_stack_builder_prepend_filter(
grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func, void *user_data) {
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index d43e427962..fdff2a2b6d 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -95,6 +95,11 @@ bool grpc_channel_stack_builder_move_next(
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
+/// Return an iterator at \a filter_name, or at the end of the list if not
+/// found.
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
typedef void (*grpc_post_filter_create_init_func)(
grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg);
@@ -132,6 +137,11 @@ bool grpc_channel_stack_builder_append_filter(
grpc_post_filter_create_init_func post_init_func,
void *user_data) GRPC_MUST_USE_RESULT;
+/// Remove any filter whose name is \a filter_name from \a builder. Returns true
+/// if \a filter_name was not found.
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
/// Terminate iteration and destroy \a iterator
void grpc_channel_stack_builder_iterator_destroy(
grpc_channel_stack_builder_iterator *iterator);
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index af06ca802e..8285226fc4 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -36,7 +36,57 @@ typedef struct connected_channel_channel_data {
grpc_transport *transport;
} channel_data;
-typedef struct connected_channel_call_data { void *unused; } call_data;
+typedef struct {
+ grpc_closure closure;
+ grpc_closure *original_closure;
+ grpc_call_combiner *call_combiner;
+ const char *reason;
+} callback_state;
+
+typedef struct connected_channel_call_data {
+ grpc_call_combiner *call_combiner;
+ // Closures used for returning results on the call combiner.
+ callback_state on_complete[6]; // Max number of pending batches.
+ callback_state recv_initial_metadata_ready;
+ callback_state recv_message_ready;
+} call_data;
+
+static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ callback_state *state = (callback_state *)arg;
+ GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
+ state->original_closure, GRPC_ERROR_REF(error),
+ state->reason);
+}
+
+static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ run_in_call_combiner(exec_ctx, arg, error);
+ gpr_free(arg);
+}
+
+static void intercept_callback(call_data *calld, callback_state *state,
+ bool free_when_done, const char *reason,
+ grpc_closure **original_closure) {
+ state->original_closure = *original_closure;
+ state->call_combiner = calld->call_combiner;
+ state->reason = reason;
+ *original_closure = GRPC_CLOSURE_INIT(
+ &state->closure,
+ free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner,
+ state, grpc_schedule_on_exec_ctx);
+}
+
+static callback_state *get_state_for_batch(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) return &calld->on_complete[0];
+ if (batch->send_message) return &calld->on_complete[1];
+ if (batch->send_trailing_metadata) return &calld->on_complete[2];
+ if (batch->recv_initial_metadata) return &calld->on_complete[3];
+ if (batch->recv_message) return &calld->on_complete[4];
+ if (batch->recv_trailing_metadata) return &calld->on_complete[5];
+ GPR_UNREACHABLE_CODE(return NULL);
+}
/* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal
@@ -49,13 +99,38 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
into transport stream operations */
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+ grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
+ if (batch->recv_initial_metadata) {
+ callback_state *state = &calld->recv_initial_metadata_ready;
+ intercept_callback(
+ calld, state, false, "recv_initial_metadata_ready",
+ &batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
+ }
+ if (batch->recv_message) {
+ callback_state *state = &calld->recv_message_ready;
+ intercept_callback(calld, state, false, "recv_message_ready",
+ &batch->payload->recv_message.recv_message_ready);
+ }
+ if (batch->cancel_stream) {
+ // There can be more than one cancellation batch in flight at any
+ // given time, so we can't just pick out a fixed index into
+ // calld->on_complete like we can for the other ops. However,
+ // cancellation isn't in the fast path, so we just allocate a new
+ // closure for each one.
+ callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
+ intercept_callback(calld, state, true, "on_complete (cancel_stream)",
+ &batch->on_complete);
+ } else {
+ callback_state *state = get_state_for_batch(calld, batch);
+ intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
+ }
grpc_transport_perform_stream_op(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ batch);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "passed batch to transport");
}
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -71,6 +146,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena);
@@ -118,11 +194,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
}
-static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- return grpc_transport_get_peer(exec_ctx, chand->transport);
-}
-
/* No-op. */
static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -138,7 +209,6 @@ const grpc_channel_filter grpc_connected_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- con_get_peer,
con_get_channel_info,
"connected",
};
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c
index 73703208da..6d00fc8880 100644
--- a/src/core/lib/debug/stats_data.c
+++ b/src/core/lib/debug/stats_data.c
@@ -56,36 +56,37 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"tcp_read_offer", "tcp_read_iov_size", "http2_send_message_size",
"server_cqs_checked",
};
-const int grpc_stats_table_0[64] = {
- 0, 1, 2, 3, 4, 6, 8, 11,
- 15, 20, 26, 34, 44, 57, 74, 96,
- 124, 160, 206, 265, 341, 439, 565, 727,
- 935, 1202, 1546, 1988, 2556, 3286, 4225, 5432,
- 6983, 8977, 11540, 14834, 19069, 24513, 31510, 40505,
- 52067, 66929, 86033, 110590, 142157, 182734, 234893, 301940,
- 388125, 498910, 641316, 824370, 1059674, 1362141, 1750943, 2250722,
- 2893155, 3718960, 4780478, 6144988, 7898976, 10153611, 13051794, 16777216};
+const int grpc_stats_table_0[65] = {
+ 0, 1, 2, 3, 4, 6, 8, 11,
+ 15, 20, 26, 34, 44, 57, 73, 94,
+ 121, 155, 199, 255, 327, 419, 537, 688,
+ 881, 1128, 1444, 1848, 2365, 3026, 3872, 4954,
+ 6338, 8108, 10373, 13270, 16976, 21717, 27782, 35541,
+ 45467, 58165, 74409, 95189, 121772, 155778, 199281, 254933,
+ 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
+ 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
+ 16777216};
const uint8_t grpc_stats_table_1[87] = {
- 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
- 11, 12, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 19, 20, 21, 21, 22, 23,
- 24, 24, 25, 25, 26, 27, 28, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 35,
- 36, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 46, 47, 47,
- 48, 49, 50, 50, 51, 52, 52, 53, 54, 55, 55, 56, 57, 57, 58};
-const int grpc_stats_table_2[64] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
- 15, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 49,
- 54, 59, 64, 70, 76, 83, 90, 98, 106, 115, 125, 136, 147,
- 159, 172, 186, 201, 218, 236, 255, 276, 299, 323, 349, 377, 408,
- 441, 477, 515, 556, 601, 649, 701, 757, 817, 881, 950, 1024};
-const uint8_t grpc_stats_table_3[104] = {
- 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14,
- 15, 15, 16, 16, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24,
- 24, 24, 25, 25, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33,
- 34, 34, 35, 36, 36, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 42, 43,
- 44, 45, 45, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51};
-const int grpc_stats_table_4[8] = {0, 1, 2, 4, 8, 16, 32, 64};
-const uint8_t grpc_stats_table_5[4] = {0, 1, 2, 3};
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
+ 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
+ 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
+ 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
+const int grpc_stats_table_2[65] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
+ 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
+ 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
+ 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
+const uint8_t grpc_stats_table_3[102] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
+ 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
+ 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
+const int grpc_stats_table_4[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
+const uint8_t grpc_stats_table_5[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
@@ -98,7 +99,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4682617712558473216ull) {
+ if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
@@ -113,7 +114,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
}
void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
- if (value < 12) {
+ if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
return;
@@ -123,9 +124,9 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4637300241308057600ull) {
+ if (_val.uint < 4637863191261478912ull) {
int bucket =
- grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12;
+ grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -148,7 +149,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4682617712558473216ull) {
+ if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
@@ -173,7 +174,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4682617712558473216ull) {
+ if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
@@ -188,7 +189,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
}
void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
- if (value < 12) {
+ if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE,
value);
return;
@@ -198,9 +199,9 @@ void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4637300241308057600ull) {
+ if (_val.uint < 4637863191261478912ull) {
int bucket =
- grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12;
+ grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE,
@@ -224,7 +225,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4682617712558473216ull) {
+ if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_0[bucket];
@@ -250,9 +251,9 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
- if (_val.uint < 4627448617123184640ull) {
+ if (_val.uint < 4625196817309499392ull) {
int bucket =
- grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 52)] + 3;
+ grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c
new file mode 100644
index 0000000000..48d8eaec18
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.c
@@ -0,0 +1,202 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/call_combiner.h"
+
+#include <grpc/support/log.h>
+
+grpc_tracer_flag grpc_call_combiner_trace =
+ GRPC_TRACER_INITIALIZER(false, "call_combiner");
+
+static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
+ if (cancel_state & 1) {
+ return (grpc_error*)(cancel_state & ~(gpr_atm)1);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static gpr_atm encode_cancel_state_error(grpc_error* error) {
+ return (gpr_atm)1 | (gpr_atm)error;
+}
+
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_init(&call_combiner->queue);
+}
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_destroy(&call_combiner->queue);
+ GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
+}
+
+#ifndef NDEBUG
+#define DEBUG_ARGS , const char *file, int line
+#define DEBUG_FMT_STR "%s:%d: "
+#define DEBUG_FMT_ARGS , file, line
+#else
+#define DEBUG_ARGS
+#define DEBUG_FMT_STR
+#define DEBUG_FMT_ARGS
+#endif
+
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
+ "%s] error=%s",
+ call_combiner, closure DEBUG_FMT_ARGS, reason,
+ grpc_error_string(error));
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size + 1);
+ }
+ if (prev_size == 0) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
+ }
+ // Queue was empty, so execute this closure immediately.
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+ } else {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_INFO, " QUEUING");
+ }
+ // Queue was not empty, so add closure to queue.
+ closure->error_data.error = error;
+ gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
+ }
+}
+
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
+ call_combiner DEBUG_FMT_ARGS, reason);
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size - 1);
+ }
+ GPR_ASSERT(prev_size >= 1);
+ if (prev_size > 1) {
+ while (true) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " checking queue");
+ }
+ bool empty;
+ grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
+ &call_combiner->queue, &empty);
+ if (closure == NULL) {
+ // This can happen either due to a race condition within the mpscq
+ // code or because of a race with grpc_call_combiner_start().
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue returned no result; checking again");
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
+ closure, grpc_error_string(closure->error_data.error));
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
+ break;
+ }
+ } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue empty");
+ }
+}
+
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure) {
+ while (true) {
+ // Decode original state.
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ // If error is set, invoke the cancellation closure immediately.
+ // Otherwise, store the new closure.
+ if (original_error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p "
+ "for pre-existing cancellation",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
+ break;
+ } else {
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ (gpr_atm)closure)) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
+ call_combiner, closure);
+ }
+ // If we replaced an earlier closure, invoke the original
+ // closure with GRPC_ERROR_NONE. This allows callers to clean
+ // up any resources they may be holding for the callback.
+ if (original_state != 0) {
+ closure = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling old cancel callback=%p",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+ }
+ break;
+ }
+ }
+ // cas failed, try again.
+ }
+}
+
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error) {
+ while (true) {
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ if (original_error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(error);
+ break;
+ }
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ encode_cancel_state_error(error))) {
+ if (original_state != 0) {
+ grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p",
+ call_combiner, notify_on_cancel);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
+ }
+ break;
+ }
+ // cas failed, try again.
+ }
+}
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
new file mode 100644
index 0000000000..5cfb3f0c07
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+
+#include <stddef.h>
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/mpscq.h"
+
+// A simple, lock-free mechanism for serializing activity related to a
+// single call. This is similar to a combiner but is more lightweight.
+//
+// It requires the callback (or, in the common case where the callback
+// actually kicks off a chain of callbacks, the last callback in that
+// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP())
+// when it is done with the action that was kicked off by the original
+// callback.
+
+extern grpc_tracer_flag grpc_call_combiner_trace;
+
+typedef struct {
+ gpr_atm size; // size_t, num closures in queue or currently executing
+ gpr_mpscq queue;
+ // Either 0 (if not cancelled and no cancellation closure set),
+ // a grpc_closure* (if the lowest bit is 0),
+ // or a grpc_error* (if the lowest bit is 1).
+ gpr_atm cancel_state;
+} grpc_call_combiner;
+
+// Assumes memory was initialized to zero.
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
+
+#ifndef NDEBUG
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
+ (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* file, int line, const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* file, int line, const char* reason);
+#else
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* reason);
+#endif
+
+/// Registers \a closure to be invoked by \a call_combiner when
+/// grpc_call_combiner_cancel() is called.
+///
+/// Once a closure is registered, it will always be scheduled exactly
+/// once; this allows the closure to hold references that will be freed
+/// regardless of whether or not the call was cancelled. If a cancellation
+/// does occur, the closure will be scheduled with the cancellation error;
+/// otherwise, it will be scheduled with GRPC_ERROR_NONE.
+///
+/// The closure will be scheduled in the following cases:
+/// - If grpc_call_combiner_cancel() was called prior to registering the
+/// closure, it will be scheduled immediately with the cancelation error.
+/// - If grpc_call_combiner_cancel() is called after registering the
+/// closure, the closure will be scheduled with the cancellation error.
+/// - If grpc_call_combiner_set_notify_on_cancel() is called again to
+/// register a new cancellation closure, the previous cancellation
+/// closure will be scheduled with GRPC_ERROR_NONE.
+///
+/// If \a closure is NULL, then no closure will be invoked on
+/// cancellation; this effectively unregisters the previously set closure.
+/// However, most filters will not need to explicitly unregister their
+/// callbacks, as this is done automatically when the call is destroyed.
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure);
+
+/// Indicates that the call has been cancelled.
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error);
+
+#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index 7f053fa728..b76eb9e1c9 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -698,22 +698,30 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_mu_unlock(&pollset->mu);
goto retry_lock_neighbourhood;
}
- pollset->seen_inactive = false;
- if (neighbourhood->active_root == NULL) {
- neighbourhood->active_root = pollset->next = pollset->prev = pollset;
- /* TODO: sreek. Why would this worker state be other than UNKICKED
- * here ? (since the worker isn't added to the pollset yet, there is no
- * way it can be "found" by other threads to get kicked). */
-
- /* If there is no designated poller, make this the designated poller */
- if (worker->kick_state == UNKICKED &&
- gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
- SET_KICK_STATE(worker, DESIGNATED_POLLER);
+
+ /* In the brief time we released the pollset locks above, the worker MAY
+ have been kicked. In this case, the worker should get out of this
+ pollset ASAP and hence this should neither add the pollset to
+ neighbourhood nor mark the pollset as active.
+
+ On a side note, the only way a worker's kick state could have changed
+ at this point is if it were "kicked specifically". Since the worker has
+ not added itself to the pollset yet (by calling worker_insert()), it is
+ not visible in the "kick any" path yet */
+ if (worker->kick_state == UNKICKED) {
+ pollset->seen_inactive = false;
+ if (neighbourhood->active_root == NULL) {
+ neighbourhood->active_root = pollset->next = pollset->prev = pollset;
+ /* Make this the designated poller if there isn't one already */
+ if (worker->kick_state == UNKICKED &&
+ gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
+ SET_KICK_STATE(worker, DESIGNATED_POLLER);
+ }
+ } else {
+ pollset->next = neighbourhood->active_root;
+ pollset->prev = pollset->next->prev;
+ pollset->next->prev = pollset->prev->next = pollset;
}
- } else {
- pollset->next = neighbourhood->active_root;
- pollset->prev = pollset->next->prev;
- pollset->next->prev = pollset->prev->next = pollset;
}
}
if (is_reassigning) {
@@ -1001,6 +1009,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_log(GPR_ERROR, "%s", tmp);
gpr_free(tmp);
}
+
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
@@ -1076,7 +1085,11 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
}
goto done;
}
- } else if (specific_worker->kick_state == KICKED) {
+
+ GPR_UNREACHABLE_CODE(goto done);
+ }
+
+ if (specific_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
deleted file mode 100644
index e2e3cd9003..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ /dev/null
@@ -1,1961 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-#include "src/core/lib/support/env.h"
-
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/* The maximum number of polling threads per polling island. By default no
- limit */
-static int g_max_pollers_per_pi = INT_MAX;
-
-static int grpc_wakeup_signal = -1;
-static bool is_grpc_wakeup_signal_initialized = false;
-
-/* Implements the function defined in grpc_posix.h. This function might be
- * called before even calling grpc_init() to set either a different signal to
- * use. If signum == -1, then the use of signals is disabled */
-static void grpc_use_signal(int signum) {
- grpc_wakeup_signal = signum;
- is_grpc_wakeup_signal_initialized = true;
-
- if (grpc_wakeup_signal < 0) {
- gpr_log(GPR_INFO,
- "Use of signals is disabled. Epoll engine will not be used");
- } else {
- gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
- grpc_wakeup_signal);
- }
-}
-
-struct polling_island;
-
-typedef enum {
- POLL_OBJ_FD,
- POLL_OBJ_POLLSET,
- POLL_OBJ_POLLSET_SET
-} poll_obj_type;
-
-typedef struct poll_obj {
-#ifndef NDEBUG
- poll_obj_type obj_type;
-#endif
- gpr_mu mu;
- struct polling_island *pi;
-} poll_obj;
-
-static const char *poll_obj_string(poll_obj_type po_type) {
- switch (po_type) {
- case POLL_OBJ_FD:
- return "fd";
- case POLL_OBJ_POLLSET:
- return "pollset";
- case POLL_OBJ_POLLSET_SET:
- return "pollset_set";
- }
-
- GPR_UNREACHABLE_CODE(return "UNKNOWN");
-}
-
-/*******************************************************************************
- * Fd Declarations
- */
-
-#define FD_FROM_PO(po) ((grpc_fd *)(po))
-
-struct grpc_fd {
- poll_obj po;
-
- int fd;
- /* refst format:
- bit 0 : 1=Active / 0=Orphaned
- bits 1-n : refcount
- Ref/Unref by two to avoid altering the orphaned bit */
- gpr_atm refst;
-
- /* The fd is either closed or we relinquished control of it. In either
- cases, this indicates that the 'fd' on this structure is no longer
- valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- /* The pollset that last noticed that the fd is readable. The actual type
- * stored in this is (grpc_pollset *) */
- gpr_atm read_notifier_pollset;
-
- grpc_iomgr_object iomgr_object;
-};
-
-/* Reference counting for fds */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
-#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
-#endif
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * Polling island Declarations
- */
-
-#ifndef NDEBUG
-
-#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(exec_ctx, p, r) \
- pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct worker_node {
- struct worker_node *next;
- struct worker_node *prev;
-} worker_node;
-
-/* This is also used as grpc_workqueue (by directly casing it) */
-typedef struct polling_island {
- gpr_mu mu;
- /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
- the refcount.
- Once the ref count becomes zero, this structure is destroyed which means
- we should ensure that there is never a scenario where a PI_ADD_REF() is
- racing with a PI_UNREF() that just made the ref_count zero. */
- gpr_atm ref_count;
-
- /* Pointer to the polling_island this merged into.
- * merged_to value is only set once in polling_island's lifetime (and that too
- * only if the island is merged with another island). Because of this, we can
- * use gpr_atm type here so that we can do atomic access on this and reduce
- * lock contention on 'mu' mutex.
- *
- * Note that if this field is not NULL (i.e not 0), all the remaining fields
- * (except mu and ref_count) are invalid and must be ignored. */
- gpr_atm merged_to;
-
- /* Number of threads currently polling on this island */
- gpr_atm poller_count;
-
- /* The list of workers waiting to do polling on this polling island */
- gpr_mu worker_list_mu;
- worker_node worker_list_head;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-
- /* The file descriptors in the epoll set */
- size_t fd_cnt;
- size_t fd_capacity;
- grpc_fd **fds;
-} polling_island;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-#define WORKER_FROM_WORKER_LIST_NODE(p) \
- (struct grpc_pollset_worker *)(((char *)(p)) - \
- offsetof(grpc_pollset_worker, pi_list_link))
-struct grpc_pollset_worker {
- /* Thread id of this worker */
- pthread_t pt_id;
-
- /* Used to prevent a worker from getting kicked multiple times */
- gpr_atm is_kicked;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-
- /* Indicates if it is this worker's turn to do epoll */
- gpr_atm is_polling_turn;
-
- /* Node in the polling island's worker list. */
- worker_node pi_list_link;
-};
-
-struct grpc_pollset {
- poll_obj po;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- poll_obj po;
-};
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * Polling island Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in a Polling island. This
- is useful in the polling island merge operation where we need to wakeup all
- the threads currently polling the smaller polling island (so that they can
- start polling the new/merged polling island)
-
- NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
- threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd polling_island_wakeup_fd;
-
-/* The polling island being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread polling_island *g_current_thread_polling_island;
-
-/* Forward declaration */
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void pi_add_ref(polling_island *pi);
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifndef NDEBUG
-static void pi_add_ref_dbg(polling_island *pi, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, old_cnt + 1, reason, file, line);
- }
- pi_add_ref(pi);
-}
-
-static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, (old_cnt - 1), reason, file, line);
- }
- pi_unref(exec_ctx, pi);
-}
-#endif
-
-static void pi_add_ref(polling_island *pi) {
- gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
-}
-
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- /* If ref count went to zero, delete the polling island.
- Note that this deletion not be done under a lock. Once the ref count goes
- to zero, we are guaranteed that no one else holds a reference to the
- polling island (and that there is no racing pi_add_ref() call either).
-
- Also, if we are deleting the polling island and the merged_to field is
- non-empty, we should remove a ref to the merged_to polling island
- */
- if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(exec_ctx, pi);
- if (next != NULL) {
- PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
- }
- }
-}
-
-static void worker_node_init(worker_node *node) {
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void push_back_worker_node(worker_node *head, worker_node *node) {
- node->next = head;
- node->prev = head->prev;
- head->prev->next = node;
- head->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void remove_worker_node(worker_node *node) {
- node->next->prev = node->prev;
- node->prev->next = node->next;
- /* If node's next and prev point to itself, the node is considered detached
- * from the list*/
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static worker_node *pop_front_worker_node(worker_node *head) {
- worker_node *node = head->next;
- if (node != head) {
- remove_worker_node(node);
- } else {
- node = NULL;
- }
-
- return node;
-}
-
-/* Returns true if the node's next and prev are pointing to itself (which
- indicates that the node is not in the list */
-static bool is_worker_node_detached(worker_node *node) {
- return (node->next == node->prev && node->next == node);
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function
- */
-static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
- size_t fd_count, bool add_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "polling_island_add_fds";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- for (i = 0; i < fd_count; i++) {
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fds[i];
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
-
- if (err < 0) {
- if (errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- continue;
- }
-
- if (pi->fd_cnt == pi->fd_capacity) {
- pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
- }
-
- pi->fds[pi->fd_cnt++] = fds[i];
- if (add_fd_refs) {
- GRPC_FD_REF(fds[i], "polling_island");
- }
- }
-}
-
-/* The caller is expected to hold pi->mu before calling this */
-static void polling_island_add_wakeup_fd_locked(polling_island *pi,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "polling_island_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_all_fds_locked(polling_island *pi,
- bool remove_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fds";
-
- for (i = 0; i < pi->fd_cnt; i++) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- if (remove_fd_refs) {
- GRPC_FD_UNREF(pi->fds[i], "polling_island");
- }
- }
-
- pi->fd_cnt = 0;
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
- bool is_fd_closed,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-
- for (i = 0; i < pi->fd_cnt; i++) {
- if (pi->fds[i] == fd) {
- pi->fds[i] = pi->fds[--pi->fd_cnt];
- GRPC_FD_UNREF(fd, "polling_island");
- break;
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
- grpc_fd *initial_fd,
- grpc_error **error) {
- polling_island *pi = NULL;
- const char *err_desc = "polling_island_create";
-
- *error = GRPC_ERROR_NONE;
-
- pi = gpr_malloc(sizeof(*pi));
- gpr_mu_init(&pi->mu);
- pi->fd_cnt = 0;
- pi->fd_capacity = 0;
- pi->fds = NULL;
- pi->epoll_fd = -1;
-
- gpr_atm_rel_store(&pi->ref_count, 0);
- gpr_atm_rel_store(&pi->poller_count, 0);
- gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
-
- gpr_mu_init(&pi->worker_list_mu);
- worker_node_init(&pi->worker_list_head);
-
- pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (pi->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
- if (initial_fd != NULL) {
- polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- polling_island_delete(exec_ctx, pi);
- pi = NULL;
- }
- return pi;
-}
-
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- GPR_ASSERT(pi->fd_cnt == 0);
-
- if (pi->epoll_fd >= 0) {
- close(pi->epoll_fd);
- }
- gpr_mu_destroy(&pi->mu);
- gpr_mu_destroy(&pi->worker_list_mu);
- GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
-
- gpr_free(pi->fds);
- gpr_free(pi);
-}
-
-/* Attempts to gets the last polling island in the linked list (liked by the
- * 'merged_to' field). Since this does not lock the polling island, there are no
- * guarantees that the island returned is the last island */
-static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- while (next != NULL) {
- pi = next;
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling island i.e the last polling island in
- the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
- returned polling island's mu.
- Usage: To lock/unlock polling island "pi", do the following:
- polling_island *pi_latest = polling_island_lock(pi);
- ...
- ... critical section ..
- ...
- gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
-static polling_island *polling_island_lock(polling_island *pi) {
- polling_island *next = NULL;
-
- while (true) {
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* Looks like 'pi' is the last node in the linked list but unless we check
- this by holding the pi->mu lock, we cannot be sure (i.e without the
- pi->mu lock, we don't prevent island merges).
- To be absolutely sure, check once more by holding the pi->mu lock */
- gpr_mu_lock(&pi->mu);
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* pi is infact the last node and we have the pi->mu lock. we're done */
- break;
- }
-
- /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
- * isn't the lock we are interested in. Continue traversing the list */
- gpr_mu_unlock(&pi->mu);
- }
-
- pi = next;
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling islands in the linked lists pointed by
- *p and *q (and also updates *p and *q to point to the latest polling islands)
-
- This function is needed because calling the following block of code to obtain
- locks on polling islands (*p and *q) is prone to deadlocks.
- {
- polling_island_lock(*p, true);
- polling_island_lock(*q, true);
- }
-
- Usage/example:
- polling_island *p1;
- polling_island *p2;
- ..
- polling_island_lock_pair(&p1, &p2);
- ..
- .. Critical section with both p1 and p2 locked
- ..
- // Release locks: Always call polling_island_unlock_pair() to release locks
- polling_island_unlock_pair(p1, p2);
-*/
-static void polling_island_lock_pair(polling_island **p, polling_island **q) {
- polling_island *pi_1 = *p;
- polling_island *pi_2 = *q;
- polling_island *next_1 = NULL;
- polling_island *next_2 = NULL;
-
- /* The algorithm is simple:
- - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
- keep updating pi_1 and pi_2)
- - Then obtain locks on the islands by following a lock order rule of
- locking polling_island with lower address first
- Special case: Before obtaining the locks, check if pi_1 and pi_2 are
- pointing to the same island. If that is the case, we can just call
- polling_island_lock()
- - After obtaining both the locks, double check that the polling islands
- are still the last polling islands in their respective linked lists
- (this is because there might have been polling island merges before
- we got the lock)
- - If the polling islands are the last islands, we are done. If not,
- release the locks and continue the process from the first step */
- while (true) {
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- while (next_1 != NULL) {
- pi_1 = next_1;
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- }
-
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- while (next_2 != NULL) {
- pi_2 = next_2;
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- }
-
- if (pi_1 == pi_2) {
- pi_1 = pi_2 = polling_island_lock(pi_1);
- break;
- }
-
- if (pi_1 < pi_2) {
- gpr_mu_lock(&pi_1->mu);
- gpr_mu_lock(&pi_2->mu);
- } else {
- gpr_mu_lock(&pi_2->mu);
- gpr_mu_lock(&pi_1->mu);
- }
-
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- if (next_1 == NULL && next_2 == NULL) {
- break;
- }
-
- gpr_mu_unlock(&pi_1->mu);
- gpr_mu_unlock(&pi_2->mu);
- }
-
- *p = pi_1;
- *q = pi_2;
-}
-
-static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
- if (p == q) {
- gpr_mu_unlock(&p->mu);
- } else {
- gpr_mu_unlock(&p->mu);
- gpr_mu_unlock(&q->mu);
- }
-}
-
-static polling_island *polling_island_merge(polling_island *p,
- polling_island *q,
- grpc_error **error) {
- /* Get locks on both the polling islands */
- polling_island_lock_pair(&p, &q);
-
- if (p != q) {
- /* Make sure that p points to the polling island with fewer fds than q */
- if (p->fd_cnt > q->fd_cnt) {
- GPR_SWAP(polling_island *, p, q);
- }
-
- /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
- Note that the refcounts on the fds being moved will not change here.
- This is why the last param in the following two functions is 'false') */
- polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
- polling_island_remove_all_fds_locked(p, false, error);
-
- /* Wakeup all the pollers (if any) on p so that they pickup this change */
- polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
-
- /* Add the 'merged_to' link from p --> q */
- gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
- PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
- }
- /* else if p == q, nothing needs to be done */
-
- polling_island_unlock_pair(p, q);
-
- /* Return the merged polling island (Note that no merge would have happened
- if p == q which is ok) */
- return q;
-}
-
-static grpc_error *polling_island_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
- }
-
- return error;
-}
-
-static void polling_island_global_shutdown() {
- grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-#ifndef NDEBUG
-#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
- }
-#else
-#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
-#endif
- GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
-}
-
-#ifndef NDEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
- }
-#else
-static void unref_by(grpc_fd *fd, int n) {
-#endif
- gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
- if (old == n) {
- /* Add the fd to the freelist */
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
- } else {
- GPR_ASSERT(old > n);
- }
-}
-
-/* Increment refcount by two to avoid changing the orphan bit */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- ref_by(fd, 2, reason, file, line);
-}
-
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- unref_by(fd, 2, reason, file, line);
-}
-#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
-#endif
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->po.mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
-
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->po.mu);
- }
-
- /* Note: It is not really needed to get the new_fd->po.mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->po.mu);
- new_fd->po.pi = NULL;
-#ifndef NDEBUG
- new_fd->po.obj_type = POLL_OBJ_FD;
-#endif
-
- gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
- gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->po.mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
- }
-#endif
- gpr_free(fd_name);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->po.mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->po.mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *unref_pi = NULL;
-
- gpr_mu_lock(&fd->po.mu);
- fd->on_done_closure = on_done;
-
- /* Remove the active status but keep referenced. We want this grpc_fd struct
- to be alive (and not added to freelist) until the end of this function */
- REF_BY(fd, 1, reason);
-
- /* Remove the fd from the polling island:
- - Get a lock on the latest polling island (i.e the last island in the
- linked list pointed by fd->po.pi). This is the island that
- would actually contain the fd
- - Remove the fd from the latest polling island
- - Unlock the latest polling island
- - Set fd->po.pi to NULL (but remove the ref on the polling island
- before doing this.) */
- if (fd->po.pi != NULL) {
- polling_island *pi_latest = polling_island_lock(fd->po.pi);
- polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error);
- gpr_mu_unlock(&pi_latest->mu);
-
- unref_pi = fd->po.pi;
- fd->po.pi = NULL;
- }
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else {
- close(fd->fd);
- }
-
- fd->orphaned = true;
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->po.mu);
- UNREF_BY(fd, 2, reason); /* Drop the reference */
- if (unref_pi != NULL) {
- /* Unref stale polling island here, outside the fd lock above.
- The polling island owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-static __thread bool g_initialized_sigmask;
-static __thread sigset_t g_orig_sigmask;
-static __thread sigset_t g_wakeup_sig_set;
-
-static void sig_handler(int sig_num) {
-#ifdef GRPC_EPOLL_DEBUG
- gpr_log(GPR_INFO, "Received signal %d", sig_num);
-#endif
-}
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->pt_id = pthread_self();
- worker->next = worker->prev = NULL;
- gpr_atm_no_barrier_store(&worker->is_kicked, (gpr_atm)0);
- gpr_atm_no_barrier_store(&worker->is_polling_turn, (gpr_atm)0);
- worker_node_init(&worker->pi_list_link);
-}
-
-static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- poller_kick_init();
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *worker_kick(grpc_pollset_worker *worker,
- gpr_atm *is_kicked) {
- grpc_error *err = GRPC_ERROR_NONE;
-
- /* Kick the worker only if it was not already kicked */
- if (gpr_atm_no_barrier_cas(is_kicked, (gpr_atm)0, (gpr_atm)1)) {
- GRPC_POLLING_TRACE(
- "pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
- (void *)worker, (long int)worker->pt_id);
- int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
- if (err_num != 0) {
- err = GRPC_OS_ERROR(err_num, "pthread_kill");
- }
- }
- return err;
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_kicked);
-}
-
-static grpc_error *poller_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_polling_turn);
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->po.mu);
- *mu = &pollset->po.mu;
- pollset->po.pi = NULL;
-#ifndef NDEBUG
- pollset->po.obj_type = POLL_OBJ_POLLSET;
-#endif
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-/* Convert millis to timespec (clock-type is assumed to be GPR_TIMESPAN) */
-static struct timespec millis_to_timespec(int millis) {
- struct timespec linux_ts;
- gpr_timespec gpr_ts;
-
- if (millis == -1) {
- gpr_ts = gpr_inf_future(GPR_TIMESPAN);
- } else {
- gpr_ts = gpr_time_from_millis(millis, GPR_TIMESPAN);
- }
-
- linux_ts.tv_sec = (time_t)gpr_ts.tv_sec;
- linux_ts.tv_nsec = gpr_ts.tv_nsec;
- return linux_ts;
-}
-
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
- return millis >= 1 ? millis : 1;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-
- /* Note, it is possible that fd_become_readable might be called twice with
- different 'notifier's when an fd becomes readable and it is in two epoll
- sets (This can happen briefly during polling island merges). In such cases
- it does not really matter which notifer is set as the read_notifier_pollset
- (They would both point to the same polling island anyway) */
- /* Use release store to match with acquire load in fd_get_read_notifier */
- gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
- if (ps->po.pi != NULL) {
- PI_UNREF(exec_ctx, ps->po.pi, reason);
- }
- ps->po.pi = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->po.pi to NULL */
- pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying polling island. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->po.mu);
-}
-
-/* NOTE: This function may modify 'now' */
-static bool acquire_polling_lease(grpc_exec_ctx *exec_ctx,
- grpc_pollset_worker *worker,
- polling_island *pi, gpr_timespec deadline,
- gpr_timespec *now) {
- bool is_lease_acquired = false;
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- long num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
-
- if (num_pollers >= g_max_pollers_per_pi) {
- push_back_worker_node(&pi->worker_list_head, &worker->pi_list_link);
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
-
- bool is_timeout = false;
- int ret;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, *now);
- if (timeout_ms == -1) {
- ret = sigwaitinfo(&g_wakeup_sig_set, NULL);
- } else {
- struct timespec sigwait_timeout = millis_to_timespec(timeout_ms);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx);
- ret = sigtimedwait(&g_wakeup_sig_set, NULL, &sigwait_timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- }
-
- if (ret == -1) {
- if (errno == EAGAIN) {
- is_timeout = true;
- } else {
- /* NOTE: This should not happen. If we see these log messages, it means
- we are most likely doing something incorrect in the setup * needed
- for sigwaitinfo/sigtimedwait */
- gpr_log(GPR_ERROR,
- "sigtimedwait failed with retcode: %d (timeout_ms: %d)", errno,
- timeout_ms);
- }
- }
-
- /* Did the worker come out of sigtimedwait due to a thread that just
- exited epoll and kicking it (in release_polling_lease function). */
- bool is_polling_turn = gpr_atm_acq_load(&worker->is_polling_turn);
-
- /* Did the worker come out of sigtimedwait due to a thread alerting it that
- some completion event was (likely) available in the completion queue */
- bool is_kicked = gpr_atm_no_barrier_load(&worker->is_kicked);
-
- if (is_kicked || is_timeout) {
- *now = deadline; /* Essentially make the epoll timeout = 0 */
- } else if (is_polling_turn) {
- *now = gpr_now(GPR_CLOCK_MONOTONIC); /* Reduce the epoll timeout */
- }
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- /* The node might have already been removed from the list by the poller
- that kicked this. However it is safe to call 'remove_worker_node' on
- an already detached node */
- remove_worker_node(&worker->pi_list_link);
- /* It is important to read the num_pollers again under the lock so that we
- * have the latest num_pollers value that doesn't change while we are doing
- * the "(num_pollers < g_max_pollers_per_pi)" a a few lines below */
- num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- }
-
- if (num_pollers < g_max_pollers_per_pi) {
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
- is_lease_acquired = true;
- }
-
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
- return is_lease_acquired;
-}
-
-static void release_polling_lease(polling_island *pi, grpc_error **error) {
- gpr_mu_lock(&pi->worker_list_mu);
-
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
- worker_node *node = pop_front_worker_node(&pi->worker_list_head);
- if (node != NULL) {
- grpc_pollset_worker *next_worker = WORKER_FROM_WORKER_LIST_NODE(node);
- append_error(error, poller_kick(next_worker), "poller kick error");
- }
-
- gpr_mu_unlock(&pi->worker_list_mu);
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
- grpc_pollset *pollset, polling_island *pi,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- /* Only g_max_pollers_per_pi threads can be doing polling in parallel.
- If we cannot get a lease, we cannot continue to do epoll_pwait() */
- if (!acquire_polling_lease(exec_ctx, worker, pi, deadline, &now)) {
- return;
- }
-
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "pollset_work_and_unlock";
-
- /* timeout_ms is the time between 'now' and 'deadline' */
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- ep_rv =
- epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- /* Give back the lease right away so that some other thread can enter */
- release_polling_lease(pi, error);
-
- if (ep_rv < 0) {
- if (errno != EINTR) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- } else {
- /* We were interrupted. Save an interation by doing a zero timeout
- epoll_wait to see if there are any other events of interest */
- GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
- (void *)pollset, (void *)worker);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
- }
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &polling_island_wakeup_fd) {
- GRPC_POLLING_TRACE(
- "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
- "%d) got merged",
- (void *)pollset, (void *)worker, epoll_fd);
- /* This means that our polling island is merged with a different
- island. We do not have to do anything here since the subsequent call
- to the function pollset_work_and_unlock() will pick up the correct
- epoll_fd */
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- int epoll_fd = -1;
- polling_island *pi = NULL;
- GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
-
- /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
- latest polling island pointed by pollset->po.pi
-
- Since epoll_fd is immutable, it is safe to read it without a lock on the
- polling island. There is however a possibility that the polling island from
- which we got the epoll_fd, got merged with another island in the meantime.
- This is okay because in such a case, we will wakeup right-away from
- epoll_pwait() (because any merge will poison the old polling island's epoll
- set 'polling_island_wakeup_fd') and then pick up the latest polling_island
- the next time this function - pollset_work_and_unlock()) is called */
-
- if (pollset->po.pi == NULL) {
- pollset->po.pi = polling_island_create(exec_ctx, NULL, error);
- if (pollset->po.pi == NULL) {
- GPR_TIMER_END("pollset_work_and_unlock", 0);
- return; /* Fatal error. Cannot continue */
- }
-
- PI_ADD_REF(pollset->po.pi, "ps");
- GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
- (void *)pollset, (void *)pollset->po.pi);
- }
-
- pi = polling_island_maybe_get_latest(pollset->po.pi);
- epoll_fd = pi->epoll_fd;
-
- /* Update the pollset->po.pi since the island being pointed by
- pollset->po.pi maybe older than the one pointed by pi) */
- if (pollset->po.pi != pi) {
- /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
- polling island to be deleted */
- PI_ADD_REF(pi, "ps");
- PI_UNREF(exec_ctx, pollset->po.pi, "ps");
- pollset->po.pi = pi;
- }
-
- /* Add an extra ref so that the island does not get destroyed (which means
- the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
- epoll_fd */
- PI_ADD_REF(pi, "ps_work");
- gpr_mu_unlock(&pollset->po.mu);
-
- g_current_thread_polling_island = pi;
- pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
- sig_mask, error);
- g_current_thread_polling_island = NULL;
-
- GPR_ASSERT(pi != NULL);
-
- /* Before leaving, release the extra ref we added to the polling island. It
- is important to use "pi" here (i.e our old copy of pollset->po.pi
- that we got before releasing the polling island lock). This is because
- pollset->po.pi pointer might get udpated in other parts of the
- code when there is an island merge while we are doing epoll_wait() above */
- PI_UNREF(exec_ctx, pi, "ps_work");
-
- GPR_TIMER_END("pollset_work_and_unlock", 0);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->po.mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
- (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
- worker that there is some pending work that needs immediate attention
- (like an event on the completion queue, or a polling island merge that
- results in a new epoll-fd to wait on) and that the worker should not
- spend time waiting in epoll_pwait().
-
- A worker can be kicked anytime from the point it is added to the pollset
- via push_front_worker() (or push_back_worker()) to the point it is
- removed via remove_worker().
- If the worker is kicked before/during it calls epoll_pwait(), it should
- immediately exit from epoll_wait(). If the worker is kicked after it
- returns from epoll_wait(), then nothing really needs to be done.
-
- To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
- times *except* when it is in epoll_pwait(). This way, the worker never
- misses acting on a kick */
-
- if (!g_initialized_sigmask) {
- sigemptyset(&g_wakeup_sig_set);
- sigaddset(&g_wakeup_sig_set, grpc_wakeup_signal);
- pthread_sigmask(SIG_BLOCK, &g_wakeup_sig_set, &g_orig_sigmask);
- sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
- g_initialized_sigmask = true;
- /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'.
- This is the mask used at all times *except during
- epoll_wait()*"
- g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
- this is the mask to use *during epoll_wait()*
-
- The new_mask is set on the worker before it is added to the pollset
- (i.e before it can be kicked) */
- }
-
- push_front_worker(pollset, &worker); /* Add worker to pollset */
-
- pollset_work_and_unlock(exec_ctx, pollset, &worker, now, deadline,
- &g_orig_sigmask, &error);
- grpc_exec_ctx_flush(exec_ctx);
-
- gpr_mu_lock(&pollset->po.mu);
-
- /* Note: There is no need to reset worker.is_kicked to 0 since we are no
- longer going to use this worker */
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->po.mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->po.mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
- poll_obj_type bag_type, poll_obj *item,
- poll_obj_type item_type) {
- GPR_TIMER_BEGIN("add_poll_object", 0);
-
-#ifndef NDEBUG
- GPR_ASSERT(item->obj_type == item_type);
- GPR_ASSERT(bag->obj_type == bag_type);
-#endif
-
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *pi_new = NULL;
-
- gpr_mu_lock(&bag->mu);
- gpr_mu_lock(&item->mu);
-
-retry:
- /*
- * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing
- * 2) If item->pi and bag->pi are both NULL, create a new polling island (with
- * a refcount of 2) and point item->pi and bag->pi to the new island
- * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to
- * the other's non-NULL pi
- * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the
- * polling islands and update item->pi and bag->pi to point to the new
- * island
- */
-
- /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already
- * orphaned */
- if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) {
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
- return;
- }
-
- if (item->pi == bag->pi) {
- pi_new = item->pi;
- if (pi_new == NULL) {
- /* GPR_ASSERT(item->pi == bag->pi == NULL) */
-
- /* If we are adding an fd to a bag (i.e pollset or pollset_set), then
- * we need to do some extra work to make TSAN happy */
- if (item_type == POLL_OBJ_FD) {
- /* Unlock before creating a new polling island: the polling island will
- create a workqueue which creates a file descriptor, and holding an fd
- lock here can eventually cause a loop to appear to TSAN (making it
- unhappy). We don't think it's a real loop (there's an epoch point
- where that loop possibility disappears), but the advantages of
- keeping TSAN happy outweigh any performance advantage we might have
- by keeping the lock held. */
- gpr_mu_unlock(&item->mu);
- pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
- gpr_mu_lock(&item->mu);
-
- /* Need to reverify any assumptions made between the initial lock and
- getting to this branch: if they've changed, we need to throw away our
- work and figure things out again. */
- if (item->pi != NULL) {
- GRPC_POLLING_TRACE(
- "add_poll_object: Raced creating new polling island. pi_new: %p "
- "(fd: %d, %s: %p)",
- (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
- (void *)bag);
- /* No need to lock 'pi_new' here since this is a new polling island
- and no one has a reference to it yet */
- polling_island_remove_all_fds_locked(pi_new, true, &error);
-
- /* Ref and unref so that the polling island gets deleted during unref
- */
- PI_ADD_REF(pi_new, "dance_of_destruction");
- PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
- goto retry;
- }
- } else {
- pi_new = polling_island_create(exec_ctx, NULL, &error);
- }
-
- GRPC_POLLING_TRACE(
- "add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
- "%s: %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- GRPC_POLLING_TRACE(
- "add_poll_object: Same polling island. pi: %p (%s, %s)",
- (void *)pi_new, poll_obj_string(item_type),
- poll_obj_string(bag_type));
- }
- } else if (item->pi == NULL) {
- /* GPR_ASSERT(bag->pi != NULL) */
- /* Make pi_new point to latest pi*/
- pi_new = polling_island_lock(bag->pi);
-
- if (item_type == POLL_OBJ_FD) {
- grpc_fd *fd = FD_FROM_PO(item);
- polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
- }
-
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else if (bag->pi == NULL) {
- /* GPR_ASSERT(item->pi != NULL) */
- /* Make pi_new to point to latest pi */
- pi_new = polling_island_lock(item->pi);
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- pi_new = polling_island_merge(item->pi, bag->pi, &error);
- GRPC_POLLING_TRACE(
- "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- }
-
- /* At this point, pi_new is the polling island that both item->pi and bag->pi
- MUST be pointing to */
-
- if (item->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(item_type));
- if (item->pi != NULL) {
- PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
- }
- item->pi = pi_new;
- }
-
- if (bag->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(bag_type));
- if (bag->pi != NULL) {
- PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
- }
- bag->pi = pi_new;
- }
-
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
-
- GRPC_LOG_IF_ERROR("add_poll_object", error);
- GPR_TIMER_END("add_poll_object", 0);
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
- POLL_OBJ_FD);
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
- gpr_mu_init(&pss->po.mu);
- pss->po.pi = NULL;
-#ifndef NDEBUG
- pss->po.obj_type = POLL_OBJ_POLLSET_SET;
-#endif
- return pss;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- gpr_mu_destroy(&pss->po.mu);
-
- if (pss->po.pi != NULL) {
- PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
- }
-
- gpr_free(pss);
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
- POLL_OBJ_FD);
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
- POLL_OBJ_POLLSET);
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
- POLL_OBJ_POLLSET_SET);
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- fd_global_shutdown();
- pollset_global_shutdown();
- polling_island_global_shutdown();
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-/* This is mainly for testing purposes. Checks to see if environment variable
- * GRPC_MAX_POLLERS_PER_PI is set and if so, assigns that value to
- * g_max_pollers_per_pi (any negative value is considered INT_MAX) */
-static void set_max_pollers_per_island() {
- char *s = gpr_getenv("GRPC_MAX_POLLERS_PER_PI");
- if (s) {
- g_max_pollers_per_pi = (int)strtol(s, NULL, 10);
- if (g_max_pollers_per_pi < 0) {
- g_max_pollers_per_pi = INT_MAX;
- }
- } else {
- g_max_pollers_per_pi = INT_MAX;
- }
-
- gpr_log(GPR_INFO, "Max number of pollers per polling island: %d",
- g_max_pollers_per_pi);
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- if (!explicitly_requested) {
- return NULL;
- }
-
- /* If use of signals is disabled, we cannot use epoll engine*/
- if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
- return NULL;
- }
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- if (!is_grpc_wakeup_signal_initialized) {
- grpc_use_signal(SIGRTMIN + 6);
- }
-
- set_max_pollers_per_island();
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
- polling_island_global_init())) {
- return NULL;
- }
-
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
deleted file mode 100644
index 1d6af5f52c..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested);
-
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
deleted file mode 100644
index ddb8c74d2d..0000000000
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-
-/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-struct epoll_set;
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/*******************************************************************************
- * Fd Declarations
- */
-struct grpc_fd {
- gpr_mu mu;
- struct epoll_set *eps;
-
- int fd;
-
- /* The fd is either closed or we relinquished control of it. In either cases,
- this indicates that the 'fd' on this structure is no longer valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- grpc_iomgr_object iomgr_object;
-};
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * epoll set Declarations
- */
-
-#ifndef NDEBUG
-
-#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define EPS_UNREF(exec_ctx, p, r) \
- eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define EPS_ADD_REF(p, r) eps_add_ref((p))
-#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct epoll_set {
- /* Mutex poller should acquire to poll this. This enforces that only one
- * poller can be polling on epoll_set at any time */
- gpr_mu mu;
-
- /* Ref count. Use EPS_ADD_REF() and EPS_UNREF() macros to increment/decrement
- the refcount. Once the ref count becomes zero, this structure is destroyed
- which means we should ensure that there is never a scenario where a
- EPS_ADD_REF() is racing with a EPS_UNREF() that just made the ref_count
- zero. */
- gpr_atm ref_count;
-
- /* Number of threads currently polling on this epoll set*/
- gpr_atm poller_count;
-
- /* Is the epoll set shutdown */
- gpr_atm is_shutdown;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-} epoll_set;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-struct grpc_pollset_worker {
- gpr_cv kick_cv;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-};
-
-struct grpc_pollset {
- gpr_mu mu;
- struct epoll_set *eps;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- char unused;
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Declarations
- */
-
-size_t g_num_eps = 1;
-struct epoll_set **g_epoll_sets = NULL;
-gpr_atm g_next_eps;
-size_t g_num_threads_per_eps = 1;
-gpr_thd_id *g_poller_threads = NULL;
-
-/* Used as read-notifier pollsets for fds. We won't be using read notifier
- * pollsets with this polling engine. So it does not matter what pollset we
- * return */
-grpc_pollset g_read_notifier;
-
-static void add_fd_to_eps(grpc_fd *fd);
-static bool init_epoll_sets();
-static void shutdown_epoll_sets();
-static void poller_thread_loop(void *arg);
-static void start_poller_threads();
-static void shutdown_poller_threads();
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * epoll set Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in an epoll_set informing
- that the epoll set is shutdown. This wakeup fd initialized to be readable
- and MUST NOT be consumed i.e the threads that woke up MUST NOT call
- grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd epoll_set_wakeup_fd;
-
-/* The epoll set being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread epoll_set *g_current_thread_epoll_set;
-
-/* Forward declaration */
-static void epoll_set_delete(epoll_set *eps);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void eps_add_ref(epoll_set *eps);
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
-
-#ifndef NDEBUG
-static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, old_cnt + 1, reason, file, line);
- }
- eps_add_ref(eps);
-}
-
-static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, (old_cnt - 1), reason, file, line);
- }
- eps_unref(exec_ctx, eps);
-}
-#endif
-
-static void eps_add_ref(epoll_set *eps) {
- gpr_atm_no_barrier_fetch_add(&eps->ref_count, 1);
-}
-
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps) {
- /* If ref count went to zero, delete the epoll set. This deletion is
- not done under a lock since once the ref count goes to zero, we are
- guaranteed that no one else holds a reference to the epoll set (and
- that there is no racing eps_add_ref() call either).*/
- if (1 == gpr_atm_full_fetch_add(&eps->ref_count, -1)) {
- epoll_set_delete(eps);
- }
-}
-
-static void epoll_set_add_fd_locked(epoll_set *eps, grpc_fd *fd,
- grpc_error **error) {
- int err;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "epoll_set_add_fd_locked";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_add_wakeup_fd_locked(epoll_set *eps,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- eps->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_remove_fd(epoll_set *eps, grpc_fd *fd, bool is_fd_closed,
- grpc_error **error) {
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static epoll_set *epoll_set_create(grpc_error **error) {
- epoll_set *eps = NULL;
- const char *err_desc = "epoll_set_create";
-
- *error = GRPC_ERROR_NONE;
-
- eps = gpr_malloc(sizeof(*eps));
- eps->epoll_fd = -1;
-
- gpr_mu_init(&eps->mu);
-
- gpr_atm_rel_store(&eps->ref_count, 0);
- gpr_atm_rel_store(&eps->poller_count, 0);
-
- gpr_atm_rel_store(&eps->is_shutdown, false);
-
- eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (eps->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- epoll_set_delete(eps);
- eps = NULL;
- }
- return eps;
-}
-
-static void epoll_set_delete(epoll_set *eps) {
- if (eps->epoll_fd >= 0) {
- close(eps->epoll_fd);
- }
-
- gpr_mu_destroy(&eps->mu);
-
- gpr_free(eps);
-}
-
-static grpc_error *epoll_set_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&epoll_set_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&epoll_set_wakeup_fd);
- }
-
- return error;
-}
-
-static void epoll_set_global_shutdown() {
- grpc_wakeup_fd_destroy(&epoll_set_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-static grpc_fd *get_fd_from_freelist() {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
- return new_fd;
-}
-
-static void add_fd_to_freelist(grpc_fd *fd) {
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
-}
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = get_fd_from_freelist();
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->mu);
- }
-
- /* Note: It is not really needed to get the new_fd->mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->mu);
- new_fd->eps = NULL;
-
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
- gpr_free(fd_name);
-
- /* Associate the fd with one of the eps */
- add_fd_to_eps(new_fd);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- bool is_fd_closed = already_closed;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *unref_eps = NULL;
-
- gpr_mu_lock(&fd->mu);
- fd->on_done_closure = on_done;
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else if (!is_fd_closed) {
- close(fd->fd);
- is_fd_closed = true;
- }
-
- fd->orphaned = true;
-
- /* Remove the fd from the epoll set */
- if (fd->eps != NULL) {
- epoll_set_remove_fd(fd->eps, fd, is_fd_closed, &error);
- unref_eps = fd->eps;
- fd->eps = NULL;
- }
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->mu);
-
- /* We are done with this fd. Release it (i.e add back to freelist) */
- add_fd_to_freelist(fd);
-
- if (unref_eps != NULL) {
- /* Unref stale epoll set here, outside the fd lock above.
- The epoll set owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- EPS_UNREF(exec_ctx, unref_eps, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-/* This polling engine doesn't really need the read notifier functionality. So
- * it just returns a dummy read notifier pollset */
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- return &g_read_notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-/* TODO: sreek - Not needed anymore */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->next = worker->prev = NULL;
- gpr_cv_init(&worker->kick_cv);
-}
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- gpr_cv_signal(&worker->kick_cv);
- return GRPC_ERROR_NONE;
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->mu);
- *mu = &pollset->mu;
- pollset->eps = NULL;
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- char *reason) {
- if (ps->eps != NULL) {
- EPS_UNREF(exec_ctx, ps->eps, reason);
- }
- ps->eps = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->eps to NULL */
- pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying epoll set. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->mu);
-}
-
-/* Blocking call */
-static void acquire_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_lock(&eps->mu);
- }
-}
-
-static void release_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_unlock(&eps->mu);
- }
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps,
- grpc_error **error) {
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "do_epoll_wait";
-
- int timeout_ms = -1;
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- acquire_epoll_lease(eps);
- GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms);
- release_epoll_lease(eps);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- if (ep_rv < 0) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &epoll_set_wakeup_fd) {
- gpr_atm_rel_store(&eps->is_shutdown, 1);
- gpr_log(GPR_INFO, "pollset poller: shutdown set");
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- grpc_error **error) {
- int epoll_fd = -1;
- GPR_TIMER_BEGIN("epoll_set_work", 0);
-
- /* Since epoll_fd is immutable, it is safe to read it without a lock on the
- epoll set. */
- epoll_fd = eps->epoll_fd;
-
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
- g_current_thread_epoll_set = eps;
-
- do_epoll_wait(exec_ctx, epoll_fd, eps, error);
-
- g_current_thread_epoll_set = NULL;
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
-
- GPR_TIMER_END("epoll_set_work", 0);
-}
-
-/* pollset->mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- push_front_worker(pollset, &worker);
-
- gpr_cv_wait(&worker.kick_cv, &pollset->mu,
- gpr_convert_clock_type(deadline, GPR_CLOCK_REALTIME));
- /* pollset->mu locked here */
-
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-grpc_pollset_set g_dummy_pollset_set;
-static grpc_pollset_set *pollset_set_create(void) {
- return &g_dummy_pollset_set;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- shutdown_poller_threads();
- shutdown_epoll_sets();
- fd_global_shutdown();
- pollset_global_shutdown();
- epoll_set_global_shutdown();
- gpr_log(GPR_INFO, "ev-epoll-threadpool engine shutdown complete");
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Definitions
- */
-static void add_fd_to_eps(grpc_fd *fd) {
- GPR_ASSERT(fd->eps == NULL);
- GPR_TIMER_BEGIN("add_fd_to_eps", 0);
-
- grpc_error *error = GRPC_ERROR_NONE;
- size_t idx = (size_t)gpr_atm_no_barrier_fetch_add(&g_next_eps, 1) % g_num_eps;
- epoll_set *eps = g_epoll_sets[idx];
-
- gpr_mu_lock(&fd->mu);
-
- if (fd->orphaned) {
- gpr_mu_unlock(&fd->mu);
- return; /* Early out */
- }
-
- epoll_set_add_fd_locked(eps, fd, &error);
- EPS_ADD_REF(eps, "fd");
- fd->eps = eps;
-
- GRPC_POLLING_TRACE("add_fd_to_eps (fd: %d, eps idx = %" PRIdPTR ")", fd->fd,
- idx);
- gpr_mu_unlock(&fd->mu);
-
- GRPC_LOG_IF_ERROR("add_fd_to_eps", error);
- GPR_TIMER_END("add_fd_to_eps", 0);
-}
-
-static bool init_epoll_sets() {
- grpc_error *error = GRPC_ERROR_NONE;
- bool is_success = true;
-
- g_epoll_sets = (epoll_set **)malloc(g_num_eps * sizeof(epoll_set *));
-
- for (size_t i = 0; i < g_num_eps; i++) {
- g_epoll_sets[i] = epoll_set_create(&error);
- if (g_epoll_sets[i] == NULL) {
- gpr_log(GPR_ERROR, "Error in creating a epoll set");
- g_num_eps = i; /* Helps cleanup */
- shutdown_epoll_sets();
- is_success = false;
- goto done;
- }
-
- EPS_ADD_REF(g_epoll_sets[i], "init_epoll_sets");
- }
-
- gpr_atm_no_barrier_store(&g_next_eps, 0);
- gpr_mu *mu;
- pollset_init(&g_read_notifier, &mu);
-
-done:
- GRPC_LOG_IF_ERROR("init_epoll_sets", error);
- return is_success;
-}
-
-static void shutdown_epoll_sets() {
- if (!g_epoll_sets) {
- return;
- }
-
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- for (size_t i = 0; i < g_num_eps; i++) {
- EPS_UNREF(&exec_ctx, g_epoll_sets[i], "shutdown_epoll_sets");
- }
- grpc_exec_ctx_flush(&exec_ctx);
-
- gpr_free(g_epoll_sets);
- g_epoll_sets = NULL;
- pollset_destroy(&exec_ctx, &g_read_notifier);
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void poller_thread_loop(void *arg) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *eps = (epoll_set *)arg;
-
- while (!gpr_atm_acq_load(&eps->is_shutdown)) {
- epoll_set_work(&exec_ctx, eps, &error);
- grpc_exec_ctx_flush(&exec_ctx);
- }
-
- grpc_exec_ctx_finish(&exec_ctx);
- GRPC_LOG_IF_ERROR("poller_thread_loop", error);
-}
-
-/* g_epoll_sets MUST be initialized before calling this */
-static void start_poller_threads() {
- GPR_ASSERT(g_epoll_sets);
-
- gpr_log(GPR_INFO, "Starting poller threads");
-
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- g_poller_threads = (gpr_thd_id *)malloc(num_threads * sizeof(gpr_thd_id));
- gpr_thd_options options = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&options);
-
- for (size_t i = 0; i < num_threads; i++) {
- gpr_thd_new(&g_poller_threads[i], poller_thread_loop,
- (void *)g_epoll_sets[i % g_num_eps], &options);
- }
-}
-
-static void shutdown_poller_threads() {
- GPR_ASSERT(g_poller_threads);
- GPR_ASSERT(g_epoll_sets);
- grpc_error *error = GRPC_ERROR_NONE;
-
- gpr_log(GPR_INFO, "Shutting down pollers");
-
- epoll_set *eps = NULL;
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- for (size_t i = 0; i < num_threads; i++) {
- eps = g_epoll_sets[i];
- epoll_set_add_wakeup_fd_locked(eps, &epoll_set_wakeup_fd, &error);
- }
-
- for (size_t i = 0; i < g_num_eps; i++) {
- gpr_thd_join(g_poller_threads[i]);
- }
-
- GRPC_LOG_IF_ERROR("shutdown_poller_threads", error);
- gpr_free(g_poller_threads);
- g_poller_threads = NULL;
-}
-
-/****************************************************************************/
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- if (!requested_explicitly) return NULL;
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("epoll_set_global_init", epoll_set_global_init())) {
- return NULL;
- }
-
- if (!init_epoll_sets()) {
- return NULL;
- }
-
- /* TODO (sreek): Maynot be a good idea to start threads here (especially if
- * this engine doesn't get picked. Consider introducing an engine_init
- * function in the vtable */
- start_poller_threads();
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
deleted file mode 100644
index 2ca68cc9d1..0000000000
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
-
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly);
-
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 424b40e425..bb43061ff5 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -31,8 +31,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
@@ -66,8 +64,6 @@ typedef struct {
static const event_engine_factory g_factories[] = {
{"epoll1", grpc_init_epoll1_linux},
{"epollsig", grpc_init_epollsig_linux},
- {"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
- {"epoll-limited", grpc_init_epoll_limited_pollers_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
{"epollex", grpc_init_epollex_linux},
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 3d19953eeb..1feea6d628 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -164,13 +164,7 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
bool grpc_iomgr_abort_on_leaks(void) {
char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
- if (env == NULL) return false;
- static const char *truthy[] = {"yes", "Yes", "YES", "true",
- "True", "TRUE", "1"};
- bool should_we = false;
- for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
- if (0 == strcmp(env, truthy[i])) should_we = true;
- }
+ bool should_we = gpr_is_true(env);
gpr_free(env);
return should_we;
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 793147f30a..98a2afd78b 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -250,7 +250,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_iov = iov;
- msg.msg_iovlen = tcp->incoming_buffer->count;
+ msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 531a88434f..dd7dd44e79 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -39,6 +39,8 @@
/* We can have a per-call credentials. */
typedef struct {
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
grpc_call_credentials *creds;
bool have_host;
bool have_method;
@@ -49,17 +51,12 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
- gpr_atm security_context_set;
- gpr_mu security_context_mu;
grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
- grpc_closure closure;
- // Either 0 (no cancellation and no async operation in flight),
- // a grpc_closure* (if the lowest bit is 0),
- // or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancellation_state;
- grpc_closure cancel_closure;
+ grpc_closure async_result_closure;
+ grpc_closure check_call_host_cancel_closure;
+ grpc_closure get_request_metadata_cancel_closure;
} call_data;
/* We can have a per-channel credentials. */
@@ -68,43 +65,6 @@ typedef struct {
grpc_auth_context *auth_context;
} channel_data;
-static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func,
- grpc_error **error) {
- // If the lowest bit is 1, the value is a grpc_error*.
- // Otherwise, if non-zdero, the value is a grpc_closure*.
- if (cancel_state & 1) {
- *error = (grpc_error *)(cancel_state & ~(gpr_atm)1);
- } else if (cancel_state != 0) {
- *func = (grpc_closure *)cancel_state;
- }
-}
-
-static gpr_atm encode_cancel_state_error(grpc_error *error) {
- // Set the lowest bit to 1 to indicate that it's an error.
- return (gpr_atm)1 | (gpr_atm)error;
-}
-
-// Returns an error if the call has been cancelled. Otherwise, sets the
-// cancellation function to be called upon cancellation.
-static grpc_error *set_cancel_func(grpc_call_element *elem,
- grpc_iomgr_cb_func func) {
- call_data *calld = (call_data *)elem->call_data;
- // Decode original state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *original_error = GRPC_ERROR_NONE;
- grpc_closure *original_func = NULL;
- decode_cancel_state(original_state, &original_func, &original_error);
- // If error is set, return it.
- if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
- // Otherwise, store func.
- GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
- grpc_schedule_on_exec_ctx);
- GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
- gpr_atm_rel_store(&calld->cancellation_state,
- (gpr_atm)&calld->cancel_closure);
- return GRPC_ERROR_NONE;
-}
-
static void reset_auth_metadata_context(
grpc_auth_metadata_context *auth_md_context) {
if (auth_md_context->service_url != NULL) {
@@ -153,7 +113,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
} else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED);
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
}
}
@@ -191,8 +152,12 @@ static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- grpc_call_credentials_cancel_get_request_metadata(
- exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_call_credentials_cancel_get_request_metadata(
+ exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "cancel_get_request_metadata");
}
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
@@ -223,7 +188,8 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
return;
}
} else {
@@ -234,22 +200,25 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
- grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
- return;
- }
GPR_ASSERT(calld->pollent != NULL);
- GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
- grpc_schedule_on_exec_ctx);
+
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata,
+ batch, grpc_schedule_on_exec_ctx);
grpc_error *error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
- &calld->md_array, &calld->closure, &error)) {
+ &calld->md_array, &calld->async_result_closure, &error)) {
// Synchronous return; invoke on_credentials_metadata() directly.
on_credentials_metadata(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error);
+ } else {
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
+ cancel_get_request_metadata, elem,
+ grpc_schedule_on_exec_ctx));
}
}
@@ -258,7 +227,6 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
-
if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch);
} else {
@@ -271,7 +239,8 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
gpr_free(error_msg);
}
}
@@ -281,9 +250,12 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
- grpc_channel_security_connector_cancel_check_call_host(
- exec_ctx, chand->security_connector, &calld->closure,
- GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_channel_security_connector_cancel_check_call_host(
+ exec_ctx, chand->security_connector, &calld->async_result_closure,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host");
}
static void auth_start_transport_stream_op_batch(
@@ -295,52 +267,19 @@ static void auth_start_transport_stream_op_batch(
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (batch->cancel_stream) {
- while (true) {
- // Decode the original cancellation state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *func = NULL;
- decode_cancel_state(original_state, &func, &cancel_error);
- // If we had already set a cancellation error, there's nothing
- // more to do.
- if (cancel_error != GRPC_ERROR_NONE) break;
- // If there's a cancel func, call it.
- // Note that even if the cancel func has been changed by some
- // other thread between when we decoded it and now, it will just
- // be a no-op.
- cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- if (func != NULL) {
- GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error));
- }
- // Encode the new error into cancellation state.
- if (gpr_atm_full_cas(&calld->cancellation_state, original_state,
- encode_cancel_state_error(cancel_error))) {
- break; // Success.
- }
- // The cas failed, so try again.
- }
- } else {
- /* double checked lock over security context to ensure it's set once */
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- gpr_mu_lock(&calld->security_context_mu);
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- GPR_ASSERT(batch->payload->context != NULL);
- if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- batch->payload->context[GRPC_CONTEXT_SECURITY].value =
- grpc_client_security_context_create();
- batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_client_security_context_destroy;
- }
- grpc_client_security_context *sec_ctx =
- batch->payload->context[GRPC_CONTEXT_SECURITY].value;
- GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
- sec_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
- gpr_atm_rel_store(&calld->security_context_set, 1);
- }
- gpr_mu_unlock(&calld->security_context_mu);
+ if (!batch->cancel_stream) {
+ GPR_ASSERT(batch->payload->context != NULL);
+ if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
}
+ grpc_client_security_context *sec_ctx =
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (batch->send_initial_metadata) {
@@ -365,26 +304,27 @@ static void auth_start_transport_stream_op_batch(
}
}
if (calld->have_host) {
- grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
+ batch->handler_private.extra_arg = elem;
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch,
+ grpc_schedule_on_exec_ctx);
+ char *call_host = grpc_slice_to_c_string(calld->host);
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host,
+ chand->auth_context, &calld->async_result_closure, &error)) {
+ // Synchronous return; invoke on_host_checked() directly.
+ on_host_checked(exec_ctx, batch, error);
+ GRPC_ERROR_UNREF(error);
} else {
- char *call_host = grpc_slice_to_c_string(calld->host);
- batch->handler_private.extra_arg = elem;
- grpc_error *error = GRPC_ERROR_NONE;
- if (grpc_channel_security_connector_check_call_host(
- exec_ctx, chand->security_connector, call_host,
- chand->auth_context,
- GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch,
- grpc_schedule_on_exec_ctx),
- &error)) {
- // Synchronous return; invoke on_host_checked() directly.
- on_host_checked(exec_ctx, batch, error);
- GRPC_ERROR_UNREF(error);
- }
- gpr_free(call_host);
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
+ cancel_check_call_host, elem,
+ grpc_schedule_on_exec_ctx));
}
+ gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */
}
@@ -400,8 +340,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(*calld));
- gpr_mu_init(&calld->security_context_mu);
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -426,12 +366,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
- gpr_mu_destroy(&calld->security_context_mu);
- gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *cancel_func = NULL;
- decode_cancel_state(cancel_state, &cancel_func, &cancel_error);
- GRPC_ERROR_UNREF(cancel_error);
}
/* Constructor for channel_data */
@@ -490,6 +424,5 @@ const grpc_channel_filter grpc_client_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client-auth"};
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 9bf3f0ca0f..7f523c0883 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -26,7 +26,15 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+typedef enum {
+ STATE_INIT = 0,
+ STATE_DONE,
+ STATE_CANCELLED,
+} async_state;
+
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+ grpc_call_stack *owning_call;
grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
@@ -34,6 +42,8 @@ typedef struct call_data {
const grpc_metadata *consumed_md;
size_t num_consumed_md;
grpc_auth_context *auth_context;
+ grpc_closure cancel_closure;
+ gpr_atm state; // async_state
} call_data;
typedef struct channel_data {
@@ -78,54 +88,94 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md);
}
-/* called from application code */
-static void on_md_processing_done(
- void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
- const grpc_metadata *response_md, size_t num_response_md,
- grpc_status_code status, const char *error_details) {
- grpc_call_element *elem = user_data;
+static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_metadata *consumed_md,
+ size_t num_consumed_md,
+ const grpc_metadata *response_md,
+ size_t num_response_md,
+ grpc_error *error) {
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. "
"Ignoring...");
}
- grpc_error *error = GRPC_ERROR_NONE;
- if (status == GRPC_STATUS_OK) {
+ if (error == GRPC_ERROR_NONE) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
error = grpc_metadata_batch_filter(
- &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
+ exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
- } else {
- if (error_details == NULL) {
- error_details = "Authentication metadata processing failed.";
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
+ error);
+}
+
+// Called from application code.
+static void on_md_processing_done(
+ void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+ const grpc_metadata *response_md, size_t num_response_md,
+ grpc_status_code status, const char *error_details) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ // If the call was not cancelled while we were in flight, process the result.
+ if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_DONE)) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (status != GRPC_STATUS_OK) {
+ if (error_details == NULL) {
+ error_details = "Authentication metadata processing failed.";
+ }
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status);
}
- error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status);
+ on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
+ response_md, num_response_md, error);
}
+ // Clean up.
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
- GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
- error);
+ GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata");
grpc_exec_ctx_finish(&exec_ctx);
}
+static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = elem->call_data;
+ // If the result was not already processed, invoke the callback now.
+ if (error != GRPC_ERROR_NONE &&
+ gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_CANCELLED)) {
+ on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
+}
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
+ grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
+ // We're calling out to the application, so we need to make sure
+ // to drop the call combiner early if we get cancelled.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call");
+ GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->cancel_closure);
+ GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process(
@@ -159,6 +209,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ calld->call_combiner = args->call_combiner;
+ calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -218,6 +270,5 @@ const grpc_channel_filter grpc_server_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server-auth"};
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index b65009754a..ec93303024 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -298,3 +298,16 @@ void *gpr_memrchr(const void *s, int c, size_t n) {
}
return NULL;
}
+
+bool gpr_is_true(const char *s) {
+ if (s == NULL) {
+ return false;
+ }
+ static const char *truthy[] = {"yes", "true", "1"};
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
+ if (0 == gpr_stricmp(s, truthy[i])) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index e11df8439d..5a56fa3a0a 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -19,6 +19,7 @@
#ifndef GRPC_CORE_LIB_SUPPORT_STRING_H
#define GRPC_CORE_LIB_SUPPORT_STRING_H
+#include <stdbool.h>
#include <stddef.h>
#include <grpc/support/port_platform.h>
@@ -106,6 +107,8 @@ int gpr_stricmp(const char *a, const char *b);
void *gpr_memrchr(const void *s, int c, size_t n);
+/** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */
+bool gpr_is_true(const char *s);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index e68c201134..3aa20ffcd7 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -122,6 +122,7 @@ typedef struct batch_control {
bool is_closure;
} notify_tag;
} completion_data;
+ grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
@@ -151,6 +152,7 @@ typedef struct {
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
+ grpc_call_combiner call_combiner;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
@@ -184,6 +186,11 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
+ grpc_metadata compression_md;
+
+ // A char* indicating the peer name.
+ gpr_atm peer_string;
+
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
@@ -262,8 +269,9 @@ grpc_tracer_flag grpc_compression_trace =
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op);
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *op,
+ grpc_closure *start_batch_closure);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status,
const char *description);
@@ -328,6 +336,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
+ grpc_call_combiner_init(&call->call_combiner);
*out_call = call;
call->channel = args->channel;
call->cq = args->cq;
@@ -436,7 +445,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
.path = path,
.start_time = call->start_time,
.deadline = send_deadline,
- .arena = call->arena};
+ .arena = call->arena,
+ .call_combiner = &call->call_combiner};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) {
@@ -503,6 +513,8 @@ static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
grpc_call *c = call;
grpc_channel *channel = c->channel;
+ grpc_call_combiner_destroy(&c->call_combiner);
+ gpr_free((char *)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
}
@@ -586,6 +598,12 @@ void grpc_call_unref(grpc_call *c) {
if (cancel) {
cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
+ } else {
+ // Unset the call combiner cancellation closure. This has the
+ // effect of scheduling the previously set cancellation closure, if
+ // any, so that it can release any internal references it may be
+ // holding to the call stack.
+ grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL);
}
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
@@ -602,30 +620,37 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return GRPC_CALL_OK;
}
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op) {
- grpc_call_element *elem;
-
- GPR_TIMER_BEGIN("execute_op", 0);
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
- GPR_TIMER_END("execute_op", 0);
+// This is called via the call combiner to start sending a batch down
+// the filter stack.
+static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_transport_stream_op_batch *batch = arg;
+ grpc_call *call = batch->handler_private.extra_arg;
+ GPR_TIMER_BEGIN("execute_batch", 0);
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+ GPR_TIMER_END("execute_batch", 0);
+}
+
+// start_batch_closure points to a caller-allocated closure to be used
+// for entering the call combiner.
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *batch,
+ grpc_closure *start_batch_closure) {
+ batch->handler_private.extra_arg = call;
+ GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
+ GRPC_ERROR_NONE, "executing batch");
}
char *grpc_call_get_peer(grpc_call *call) {
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result;
- GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
- result = elem->filter->get_peer(&exec_ctx, elem);
- if (result == NULL) {
- result = grpc_channel_get_target(call->channel);
- }
- if (result == NULL) {
- result = gpr_strdup("unknown");
- }
- grpc_exec_ctx_finish(&exec_ctx);
- return result;
+ char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string);
+ if (peer_string != NULL) return gpr_strdup(peer_string);
+ peer_string = grpc_channel_get_target(call->channel);
+ if (peer_string != NULL) return peer_string;
+ return gpr_strdup("unknown");
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@@ -652,20 +677,41 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return GRPC_CALL_OK;
}
-static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
+typedef struct {
+ grpc_call *call;
+ grpc_closure start_batch;
+ grpc_closure finish_batch;
+} cancel_state;
+
+// The on_complete callback used when sending a cancel_stream batch down
+// the filter stack. Yields the call combiner when the batch is done.
+static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
+ cancel_state *state = (cancel_state *)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
+ "on_complete for cancel_stream op");
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
+ gpr_free(state);
}
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
+ // Inform the call combiner of the cancellation, so that it can cancel
+ // any in-flight asynchronous actions that may be holding the call
+ // combiner. This ensures that the cancel_stream batch can be sent
+ // down the filter stack in a timely manner.
+ grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
- GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
+ cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state));
+ state->call = c;
+ GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
+ grpc_schedule_on_exec_ctx);
+ grpc_transport_stream_op_batch *op =
+ grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
- execute_op(exec_ctx, c, op);
+ execute_batch(exec_ctx, c, op, &state->start_batch);
}
static grpc_error *error_from_status(grpc_status_code status,
@@ -1431,6 +1477,18 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
}
+// The recv_message_ready callback used when sending a batch containing
+// a recv_message op down the filter stack. Yields the call combiner
+// before processing the received message.
+static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *bctlp,
+ grpc_error *error) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
+ receiving_stream_ready(exec_ctx, bctlp, error);
+}
+
static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
@@ -1537,6 +1595,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
+ "recv_initial_metadata_ready");
+
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
@@ -1590,7 +1651,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
-
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
@@ -1610,9 +1672,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
- // sent_initial_metadata guards against variable reuse.
- grpc_metadata compression_md;
-
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1660,7 +1719,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
/* process compression level */
- memset(&compression_md, 0, sizeof(compression_md));
+ memset(&call->compression_md, 0, sizeof(call->compression_md));
size_t additional_metadata_count = 0;
grpc_compression_level effective_compression_level =
GRPC_COMPRESS_LEVEL_NONE;
@@ -1698,9 +1757,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
const grpc_stream_compression_algorithm calgo =
stream_compression_algorithm_for_level_locked(
call, effective_stream_compression_level);
- compression_md.key =
+ call->compression_md.key =
GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST;
- compression_md.value =
+ call->compression_md.value =
grpc_stream_compression_algorithm_slice(calgo);
} else {
const grpc_compression_algorithm calgo =
@@ -1708,8 +1767,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, effective_compression_level);
/* the following will be picked up by the compress filter and used
* as the call's compression algorithm. */
- compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
- compression_md.value = grpc_compression_algorithm_slice(calgo);
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_compression_algorithm_slice(calgo);
additional_metadata_count++;
}
}
@@ -1724,7 +1785,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client,
- &compression_md, (int)additional_metadata_count)) {
+ &call->compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1736,6 +1797,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
op->flags;
+ if (call->is_client) {
+ stream_op_payload->send_initial_metadata.peer_string =
+ &call->peer_string;
+ }
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
@@ -1868,6 +1933,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready;
+ if (!call->is_client) {
+ stream_op_payload->recv_initial_metadata.peer_string =
+ &call->peer_string;
+ }
num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_MESSAGE:
@@ -1884,8 +1953,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream;
- GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
- bctl, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&call->receiving_stream_ready,
+ receiving_stream_ready_in_call_combiner, bctl,
+ grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
@@ -1955,7 +2025,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
- execute_op(exec_ctx, call, stream_op);
+ execute_batch(exec_ctx, call, stream_op, &bctl->start_batch);
done:
GPR_TIMER_END("grpc_call_start_batch", 0);
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 185bfccb77..d537637cbb 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_LIB_SURFACE_CALL_H
#define GRPC_CORE_LIB_SURFACE_CALL_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/surface/api_trace.h"
@@ -26,10 +30,6 @@
#include <grpc/grpc.h>
#include <grpc/impl/codegen/compression_types.h>
-#ifdef __cplusplus
-extern "C" {
-#endif
-
typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
@@ -89,7 +89,7 @@ grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
/* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
-void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c
index 4443aba58a..4a1c265817 100644
--- a/src/core/lib/surface/call_log_batch.c
+++ b/src/core/lib/surface/call_log_batch.c
@@ -103,7 +103,7 @@ char *grpc_op_string(const grpc_op *op) {
return out;
}
-void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag) {
char *tmp;
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 898476daee..280315036f 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -31,6 +31,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
@@ -129,6 +130,7 @@ void grpc_init(void) {
grpc_register_tracer(&grpc_trace_channel_stack_builder);
grpc_register_tracer(&grpc_http1_trace);
grpc_register_tracer(&grpc_cq_pluck_trace); // default on
+ grpc_register_tracer(&grpc_call_combiner_trace);
grpc_register_tracer(&grpc_combiner_trace);
grpc_register_tracer(&grpc_server_channel_trace);
grpc_register_tracer(&grpc_bdp_estimator_trace);
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index a0791080a9..6286f9159d 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -40,6 +40,7 @@ namespace grpc_core {
namespace {
struct CallData {
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata;
@@ -52,14 +53,14 @@ struct ChannelData {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) {
- CallData *calld = static_cast<CallData *>(elem->call_data);
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
- ChannelData *chand = static_cast<ChannelData *>(elem->channel_data);
+ ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@@ -79,6 +80,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void lame_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@@ -87,12 +89,8 @@ static void lame_start_transport_stream_op_batch(
op->payload->recv_trailing_metadata.recv_trailing_metadata);
}
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
-}
-
-static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return NULL;
+ exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
+ calld->call_combiner);
}
static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
@@ -122,6 +120,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -156,7 +156,6 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
sizeof(grpc_core::ChannelData),
grpc_core::init_channel_elem,
grpc_core::destroy_channel_elem,
- grpc_core::lame_get_peer,
grpc_core::lame_get_channel_info,
"lame-client",
};
@@ -176,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
- auto chand = static_cast<grpc_core::ChannelData *>(elem->channel_data);
+ auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 786d754b49..df21467545 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -792,7 +792,6 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
@@ -965,7 +964,6 @@ const grpc_channel_filter grpc_server_top_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server",
};
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c
index 8f24b8527c..a077052561 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.c
@@ -105,6 +105,7 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
return GRPC_ERROR_NONE;
}
if (batch->idx.array[idx] == NULL) {
+ if (grpc_static_callout_is_default[idx]) ++batch->list.default_count;
batch->idx.array[idx] = storage;
return GRPC_ERROR_NONE;
}
@@ -120,6 +121,7 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch,
if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
return;
}
+ if (grpc_static_callout_is_default[idx]) --batch->list.default_count;
GPR_ASSERT(batch->idx.array[idx] != NULL);
batch->idx.array[idx] = NULL;
}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 1b11a3e252..57d298c75c 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -41,6 +41,7 @@ typedef struct grpc_linked_mdelem {
typedef struct grpc_mdelem_list {
size_t count;
+ size_t default_count; // Number of default keys.
grpc_linked_mdelem *head;
grpc_linked_mdelem *tail;
} grpc_mdelem_list;
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 28f05d5c44..b20d94aeac 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -823,6 +823,31 @@ grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{.refcount = &grpc_static_metadata_refcounts[97],
.data.refcounted = {g_bytes + 1040, 13}}},
};
+bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
+ true, // :path
+ true, // :method
+ true, // :status
+ true, // :authority
+ true, // :scheme
+ true, // te
+ true, // grpc-message
+ true, // grpc-status
+ true, // grpc-payload-bin
+ true, // grpc-encoding
+ true, // grpc-accept-encoding
+ true, // grpc-server-stats-bin
+ true, // grpc-tags-bin
+ true, // grpc-trace-bin
+ true, // content-type
+ true, // content-encoding
+ true, // accept-encoding
+ true, // grpc-internal-encoding-request
+ true, // grpc-internal-stream-encoding-request
+ true, // user-agent
+ true, // host
+ true, // lb-token
+};
+
const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
79, 80, 81, 82};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 93ab90dff8..f03a9d23b1 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -571,6 +571,8 @@ typedef union {
GRPC_BATCH_CALLOUTS_COUNT) \
: GRPC_BATCH_CALLOUTS_COUNT)
+extern bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT];
+
extern const uint8_t grpc_static_accept_encoding_metadata[8];
#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \
(GRPC_MAKE_MDELEM( \
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 6c61f4b8d9..650b0559aa 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -197,11 +197,6 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
then_schedule_closure);
}
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
- return transport->vtable->get_peer(exec_ctx, transport);
-}
-
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
@@ -214,24 +209,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// is a function that must always unref cancel_error
// though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure
-
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
- grpc_error *error) {
+ grpc_error *error, grpc_call_combiner *call_combiner) {
if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message);
}
if (batch->recv_message) {
- GRPC_CLOSURE_SCHED(exec_ctx,
- batch->payload->recv_message.recv_message_ready,
- GRPC_ERROR_REF(error));
+ GRPC_CALL_COMBINER_START(exec_ctx, call_combiner,
+ batch->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_REF(error),
+ "failing recv_message_ready");
}
if (batch->recv_initial_metadata) {
- GRPC_CLOSURE_SCHED(
- exec_ctx,
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, call_combiner,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
- GRPC_ERROR_REF(error));
+ GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready");
}
GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
if (batch->cancel_stream) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 099138ea14..fbf5dcb8b5 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -22,6 +22,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
@@ -152,6 +153,9 @@ struct grpc_transport_stream_op_batch_payload {
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} send_initial_metadata;
struct {
@@ -176,6 +180,9 @@ struct grpc_transport_stream_op_batch_payload {
// immediately available. This may be a signal that we received a
// Trailers-Only response.
bool *trailing_metadata_available;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} recv_initial_metadata;
struct {
@@ -293,7 +300,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_error *error);
+ grpc_error *error, grpc_call_combiner *call_combiner);
char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op);
@@ -332,10 +339,6 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
-/* Get the transports peer */
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport);
-
/* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index fc772c6dd1..bbae69c223 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -59,9 +59,6 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
- /* implementation of grpc_transport_get_peer */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
-
/* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 7b18229ba6..409a6c4103 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -112,6 +112,13 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec_add(&b, tmp);
}
+ if (op->collect_stats) {
+ gpr_strvec_add(&b, gpr_strdup(" "));
+ gpr_asprintf(&tmp, "COLLECT_STATS:%p",
+ op->payload->collect_stats.collect_stats);
+ gpr_strvec_add(&b, tmp);
+ }
+
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
diff --git a/src/core/tsi/test_creds/BUILD b/src/core/tsi/test_creds/BUILD
index 4b0786d7b8..732f6d91b2 100644
--- a/src/core/tsi/test_creds/BUILD
+++ b/src/core/tsi/test_creds/BUILD
@@ -15,7 +15,15 @@
licenses(["notice"]) # Apache v2
exports_files([
- "ca.pem",
- "server1.key",
- "server1.pem",
+ "ca.pem",
+ "server1.key",
+ "server1.pem",
+ "server0.key",
+ "server0.pem",
+ "client.key",
+ "client.pem",
+ "badserver.key",
+ "badserver.pem",
+ "badclient.key",
+ "badclient.pem",
])
diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc
index f2d9bb07c9..19a25c838f 100644
--- a/src/cpp/client/channel_cc.cc
+++ b/src/cpp/client/channel_cc.cc
@@ -18,7 +18,10 @@
#include <grpc++/channel.h>
+#include <chrono>
+#include <condition_variable>
#include <memory>
+#include <mutex>
#include <grpc++/client_context.h>
#include <grpc++/completion_queue.h>
@@ -35,17 +38,197 @@
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
namespace grpc {
+namespace {
+int kConnectivityCheckIntervalMsec = 500;
+void WatchStateChange(void* arg);
+
+class TagSaver final : public CompletionQueueTag {
+ public:
+ explicit TagSaver(void* tag) : tag_(tag) {}
+ ~TagSaver() override {}
+ bool FinalizeResult(void** tag, bool* status) override {
+ *tag = tag_;
+ delete this;
+ return true;
+ }
+
+ private:
+ void* tag_;
+};
+
+// Constantly watches channel connectivity status to reconnect a transiently
+// disconnected channel. This is a temporary work-around before we have retry
+// support.
+class ChannelConnectivityWatcher : private GrpcLibraryCodegen {
+ public:
+ static void StartWatching(grpc_channel* channel) {
+ if (!IsDisabled()) {
+ std::unique_lock<std::mutex> lock(g_watcher_mu_);
+ if (g_watcher_ == nullptr) {
+ g_watcher_ = new ChannelConnectivityWatcher();
+ }
+ g_watcher_->StartWatchingLocked(channel);
+ }
+ }
+
+ static void StopWatching() {
+ if (!IsDisabled()) {
+ std::unique_lock<std::mutex> lock(g_watcher_mu_);
+ if (g_watcher_->StopWatchingLocked()) {
+ delete g_watcher_;
+ g_watcher_ = nullptr;
+ }
+ }
+ }
+
+ private:
+ ChannelConnectivityWatcher() : channel_count_(0), shutdown_(false) {
+ gpr_ref_init(&ref_, 0);
+ gpr_thd_options options = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&options);
+ gpr_thd_new(&thd_id_, &WatchStateChange, this, &options);
+ }
+
+ static bool IsDisabled() {
+ char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER");
+ bool disabled = gpr_is_true(env);
+ gpr_free(env);
+ return disabled;
+ }
+
+ void WatchStateChangeImpl() {
+ bool ok = false;
+ void* tag = NULL;
+ CompletionQueue::NextStatus status = CompletionQueue::GOT_EVENT;
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(shutdown_mu_);
+ if (shutdown_) {
+ // Drain cq_ if the watcher is shutting down
+ status = cq_.AsyncNext(&tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME));
+ } else {
+ status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME));
+ // Make sure we've seen 2 TIMEOUTs before going to sleep
+ if (status == CompletionQueue::TIMEOUT) {
+ status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME));
+ if (status == CompletionQueue::TIMEOUT) {
+ shutdown_cv_.wait_for(lock, std::chrono::milliseconds(
+ kConnectivityCheckIntervalMsec));
+ continue;
+ }
+ }
+ }
+ }
+ ChannelState* channel_state = static_cast<ChannelState*>(tag);
+ channel_state->state =
+ grpc_channel_check_connectivity_state(channel_state->channel, false);
+ if (channel_state->state == GRPC_CHANNEL_SHUTDOWN) {
+ void* shutdown_tag = NULL;
+ channel_state->shutdown_cq.Next(&shutdown_tag, &ok);
+ delete channel_state;
+ if (gpr_unref(&ref_)) {
+ break;
+ }
+ } else {
+ TagSaver* tag_saver = new TagSaver(channel_state);
+ grpc_channel_watch_connectivity_state(
+ channel_state->channel, channel_state->state,
+ gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), tag_saver);
+ }
+ }
+ }
+
+ void StartWatchingLocked(grpc_channel* channel) {
+ if (thd_id_ != 0) {
+ gpr_ref(&ref_);
+ ++channel_count_;
+ ChannelState* channel_state = new ChannelState(channel);
+ // The first grpc_channel_watch_connectivity_state() is not used to
+ // monitor the channel state change, but to hold a reference of the
+ // c channel. So that WatchStateChangeImpl() can observe state ==
+ // GRPC_CHANNEL_SHUTDOWN before the channel gets destroyed.
+ grpc_channel_watch_connectivity_state(
+ channel_state->channel, channel_state->state,
+ gpr_inf_future(GPR_CLOCK_REALTIME), channel_state->shutdown_cq.cq(),
+ new TagSaver(nullptr));
+ grpc_channel_watch_connectivity_state(
+ channel_state->channel, channel_state->state,
+ gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(),
+ new TagSaver(channel_state));
+ }
+ }
+
+ bool StopWatchingLocked() {
+ if (--channel_count_ == 0) {
+ {
+ std::unique_lock<std::mutex> lock(shutdown_mu_);
+ shutdown_ = true;
+ shutdown_cv_.notify_one();
+ }
+ gpr_thd_join(thd_id_);
+ return true;
+ }
+ return false;
+ }
+
+ friend void WatchStateChange(void* arg);
+ struct ChannelState {
+ explicit ChannelState(grpc_channel* channel)
+ : channel(channel), state(GRPC_CHANNEL_IDLE){};
+ grpc_channel* channel;
+ grpc_connectivity_state state;
+ CompletionQueue shutdown_cq;
+ };
+ gpr_thd_id thd_id_;
+ CompletionQueue cq_;
+ gpr_refcount ref_;
+ int channel_count_;
+
+ std::mutex shutdown_mu_;
+ std::condition_variable shutdown_cv_; // protected by shutdown_mu_
+ bool shutdown_; // protected by shutdown_mu_
+
+ static std::mutex g_watcher_mu_;
+ static ChannelConnectivityWatcher* g_watcher_; // protected by g_watcher_mu_
+};
+
+std::mutex ChannelConnectivityWatcher::g_watcher_mu_;
+ChannelConnectivityWatcher* ChannelConnectivityWatcher::g_watcher_ = nullptr;
+
+void WatchStateChange(void* arg) {
+ ChannelConnectivityWatcher* watcher =
+ static_cast<ChannelConnectivityWatcher*>(arg);
+ watcher->WatchStateChangeImpl();
+}
+} // namespace
+
static internal::GrpcLibraryInitializer g_gli_initializer;
Channel::Channel(const grpc::string& host, grpc_channel* channel)
: host_(host), c_channel_(channel) {
g_gli_initializer.summon();
+ if (grpc_channel_support_connectivity_watcher(channel)) {
+ ChannelConnectivityWatcher::StartWatching(channel);
+ }
}
-Channel::~Channel() { grpc_channel_destroy(c_channel_); }
+Channel::~Channel() {
+ const bool stop_watching =
+ grpc_channel_support_connectivity_watcher(c_channel_);
+ grpc_channel_destroy(c_channel_);
+ if (stop_watching) {
+ ChannelConnectivityWatcher::StopWatching();
+ }
+}
namespace {
@@ -130,23 +313,6 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) {
return grpc_channel_check_connectivity_state(c_channel_, try_to_connect);
}
-namespace {
-class TagSaver final : public CompletionQueueTag {
- public:
- explicit TagSaver(void* tag) : tag_(tag) {}
- ~TagSaver() override {}
- bool FinalizeResult(void** tag, bool* status) override {
- *tag = tag_;
- delete this;
- return true;
- }
-
- private:
- void* tag_;
-};
-
-} // namespace
-
void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline,
CompletionQueue* cq, void* tag) {
diff --git a/src/cpp/common/channel_filter.cc b/src/cpp/common/channel_filter.cc
index 448d9fbcf2..ea44cff832 100644
--- a/src/cpp/common/channel_filter.cc
+++ b/src/cpp/common/channel_filter.cc
@@ -68,10 +68,6 @@ void CallData::SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent);
}
-char *CallData::GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return grpc_call_next_get_peer(exec_ctx, elem);
-}
-
// internal code used by RegisterChannelFilter()
namespace internal {
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index c3d187d7e1..c1aeb3f724 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -268,9 +268,6 @@ class CallData {
virtual void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);
-
- /// Gets the peer name.
- virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
};
namespace internal {
@@ -349,11 +346,6 @@ class ChannelFilter final {
CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent);
}
-
- static char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
- return call_data->GetPeer(exec_ctx, elem);
- }
};
struct FilterRecord {
@@ -396,8 +388,7 @@ void RegisterChannelFilter(
FilterType::call_data_size, FilterType::InitCallElement,
FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement,
FilterType::channel_data_size, FilterType::InitChannelElement,
- FilterType::DestroyChannelElement, FilterType::GetPeer,
- FilterType::GetChannelInfo, name}};
+ FilterType::DestroyChannelElement, FilterType::GetChannelInfo, name}};
internal::channel_filters->push_back(filter_record);
}
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 2483300cb1..6bd3ecda32 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -17,6 +17,7 @@
#include <grpc++/server.h>
+#include <cstdlib>
#include <sstream>
#include <utility>
@@ -38,6 +39,7 @@
#include "src/core/ext/transport/inproc/inproc_transport.h"
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/surface/call.h"
#include "src/cpp/client/create_channel_internal.h"
#include "src/cpp/server/health/default_health_check_service.h"
#include "src/cpp/thread_manager/thread_manager.h"
@@ -607,7 +609,12 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
grpc_op cops[MAX_OPS];
ops->FillOps(call->call(), cops, &nops);
auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
- GPR_ASSERT(GRPC_CALL_OK == result);
+ if (result != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "Fatal: grpc_call_start_batch returned %d", result);
+ grpc_call_log_batch(__FILE__, __LINE__, GPR_LOG_SEVERITY_ERROR,
+ call->call(), cops, nops, ops);
+ abort();
+ }
}
ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
index 3a282b0526..7d073c9a84 100644
--- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -101,7 +101,7 @@ Pod::Spec.new do |s|
s.preserve_paths = plugin
# Restrict the protoc version to the one supported by this plugin.
- s.dependency '!ProtoCompiler', '3.3.0'
+ s.dependency '!ProtoCompiler', '3.4.0'
# For the Protobuf dependency not to complain:
s.ios.deployment_target = '7.0'
s.osx.deployment_target = '10.9'
diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec
index c3f95f9f42..25c437911f 100644
--- a/src/objective-c/!ProtoCompiler.podspec
+++ b/src/objective-c/!ProtoCompiler.podspec
@@ -36,7 +36,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler'
- v = '3.3.0'
+ v = '3.4.0'
s.version = v
s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
s.description = <<-DESC
diff --git a/src/proto/grpc/core/BUILD b/src/proto/grpc/core/BUILD
new file mode 100644
index 0000000000..46de9fae18
--- /dev/null
+++ b/src/proto/grpc/core/BUILD
@@ -0,0 +1,24 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+licenses(["notice"]) # Apache v2
+
+load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
+
+grpc_package(name = "core", visibility = "public")
+
+grpc_proto_library(
+ name = "stats_proto",
+ srcs = ["stats.proto"],
+)
diff --git a/src/proto/grpc/testing/BUILD b/src/proto/grpc/testing/BUILD
index 07e08117f0..36d3782262 100644
--- a/src/proto/grpc/testing/BUILD
+++ b/src/proto/grpc/testing/BUILD
@@ -84,6 +84,9 @@ grpc_proto_library(
name = "stats_proto",
srcs = ["stats.proto"],
has_services = False,
+ deps = [
+ "//src/proto/grpc/core:stats_proto",
+ ]
)
grpc_proto_library(
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 1cbf345ab6..2071827b64 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -77,6 +77,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
+ 'src/core/lib/iomgr/call_combiner.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
@@ -85,8 +86,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll1_linux.c',
- 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c',
- 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c',
'src/core/lib/iomgr/ev_epollex_linux.c',
'src/core/lib/iomgr/ev_epollsig_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
diff --git a/src/python/grpcio_testing/grpc_testing/__init__.py b/src/python/grpcio_testing/grpc_testing/__init__.py
index 14e25f09e2..917e11808e 100644
--- a/src/python/grpcio_testing/grpc_testing/__init__.py
+++ b/src/python/grpcio_testing/grpc_testing/__init__.py
@@ -293,6 +293,278 @@ class Channel(six.with_metaclass(abc.ABCMeta), grpc.Channel):
raise NotImplementedError()
+class UnaryUnaryServerRpc(six.with_metaclass(abc.ABCMeta)):
+ """Fixture for a unary-unary RPC serviced by a system under test.
+
+ Enables users to "play client" for the RPC.
+ """
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata emitted by the system under test.
+
+ This method blocks until the system under test has added initial
+ metadata to the RPC (or has provided one or more response messages or
+ has terminated the RPC, either of which will cause gRPC Python to
+ synthesize initial metadata for the RPC).
+
+ Returns:
+ The initial metadata for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def termination(self):
+ """Blocks until the system under test has terminated the RPC.
+
+ Returns:
+ A (response, trailing_metadata, code, details) sequence with the RPC's
+ response, trailing metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamServerRpc(six.with_metaclass(abc.ABCMeta)):
+ """Fixture for a unary-stream RPC serviced by a system under test.
+
+ Enables users to "play client" for the RPC.
+ """
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata emitted by the system under test.
+
+ This method blocks until the system under test has added initial
+ metadata to the RPC (or has provided one or more response messages or
+ has terminated the RPC, either of which will cause gRPC Python to
+ synthesize initial metadata for the RPC).
+
+ Returns:
+ The initial metadata for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def take_response(self):
+ """Draws one of the responses added to the RPC by the system under test.
+
+ Successive calls to this method return responses in the same order in
+ which the system under test added them to the RPC.
+
+ Returns:
+ A response message added to the RPC by the system under test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def termination(self):
+ """Blocks until the system under test has terminated the RPC.
+
+ Returns:
+ A (trailing_metadata, code, details) sequence with the RPC's trailing
+ metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryServerRpc(six.with_metaclass(abc.ABCMeta)):
+ """Fixture for a stream-unary RPC serviced by a system under test.
+
+ Enables users to "play client" for the RPC.
+ """
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata emitted by the system under test.
+
+ This method blocks until the system under test has added initial
+ metadata to the RPC (or has provided one or more response messages or
+ has terminated the RPC, either of which will cause gRPC Python to
+ synthesize initial metadata for the RPC).
+
+ Returns:
+ The initial metadata for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_request(self, request):
+ """Sends a request to the system under test.
+
+ Args:
+ request: A request message for the RPC to be "sent" to the system
+ under test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def requests_closed(self):
+ """Indicates the end of the RPC's request stream."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def termination(self):
+ """Blocks until the system under test has terminated the RPC.
+
+ Returns:
+ A (response, trailing_metadata, code, details) sequence with the RPC's
+ response, trailing metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamServerRpc(six.with_metaclass(abc.ABCMeta)):
+ """Fixture for a stream-stream RPC serviced by a system under test.
+
+ Enables users to "play client" for the RPC.
+ """
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata emitted by the system under test.
+
+ This method blocks until the system under test has added initial
+ metadata to the RPC (or has provided one or more response messages or
+ has terminated the RPC, either of which will cause gRPC Python to
+ synthesize initial metadata for the RPC).
+
+ Returns:
+ The initial metadata for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_request(self, request):
+ """Sends a request to the system under test.
+
+ Args:
+ request: A request message for the RPC to be "sent" to the system
+ under test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def requests_closed(self):
+ """Indicates the end of the RPC's request stream."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def take_response(self):
+ """Draws one of the responses added to the RPC by the system under test.
+
+ Successive calls to this method return responses in the same order in
+ which the system under test added them to the RPC.
+
+ Returns:
+ A response message added to the RPC by the system under test.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def termination(self):
+ """Blocks until the system under test has terminated the RPC.
+
+ Returns:
+ A (trailing_metadata, code, details) sequence with the RPC's trailing
+ metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+
+class Server(six.with_metaclass(abc.ABCMeta)):
+ """A server with which to test a system that services RPCs."""
+
+ @abc.abstractmethod
+ def invoke_unary_unary(
+ self, method_descriptor, invocation_metadata, request, timeout):
+ """Invokes an RPC to be serviced by the system under test.
+
+ Args:
+ method_descriptor: A descriptor.MethodDescriptor describing a unary-unary
+ RPC method.
+ invocation_metadata: The RPC's invocation metadata.
+ request: The RPC's request.
+ timeout: A duration of time in seconds for the RPC or None to
+ indicate that the RPC has no time limit.
+
+ Returns:
+ A UnaryUnaryServerRpc with which to "play client" for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_unary_stream(
+ self, method_descriptor, invocation_metadata, request, timeout):
+ """Invokes an RPC to be serviced by the system under test.
+
+ Args:
+ method_descriptor: A descriptor.MethodDescriptor describing a unary-stream
+ RPC method.
+ invocation_metadata: The RPC's invocation metadata.
+ request: The RPC's request.
+ timeout: A duration of time in seconds for the RPC or None to
+ indicate that the RPC has no time limit.
+
+ Returns:
+ A UnaryStreamServerRpc with which to "play client" for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_stream_unary(
+ self, method_descriptor, invocation_metadata, timeout):
+ """Invokes an RPC to be serviced by the system under test.
+
+ Args:
+ method_descriptor: A descriptor.MethodDescriptor describing a stream-unary
+ RPC method.
+ invocation_metadata: The RPC's invocation metadata.
+ timeout: A duration of time in seconds for the RPC or None to
+ indicate that the RPC has no time limit.
+
+ Returns:
+ A StreamUnaryServerRpc with which to "play client" for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_stream_stream(
+ self, method_descriptor, invocation_metadata, timeout):
+ """Invokes an RPC to be serviced by the system under test.
+
+ Args:
+ method_descriptor: A descriptor.MethodDescriptor describing a stream-stream
+ RPC method.
+ invocation_metadata: The RPC's invocation metadata.
+ timeout: A duration of time in seconds for the RPC or None to
+ indicate that the RPC has no time limit.
+
+ Returns:
+ A StreamStreamServerRpc with which to "play client" for the RPC.
+ """
+ raise NotImplementedError()
+
+
class Time(six.with_metaclass(abc.ABCMeta)):
"""A simulation of time.
@@ -406,3 +678,20 @@ def channel(service_descriptors, time):
"""
from grpc_testing import _channel
return _channel.testing_channel(service_descriptors, time)
+
+
+def server_from_dictionary(descriptors_to_servicers, time):
+ """Creates a Server for use in tests of a gRPC Python-using system.
+
+ Args:
+ descriptors_to_servicers: A dictionary from descriptor.ServiceDescriptors
+ defining RPC services to servicer objects (usually instances of classes
+ that implement "Servicer" interfaces defined in generated "_pb2_grpc"
+ modules) implementing those services.
+ time: A Time to be used for tests.
+
+ Returns:
+ A Server for use in tests.
+ """
+ from grpc_testing import _server
+ return _server.server_from_dictionary(descriptors_to_servicers, time)
diff --git a/src/python/grpcio_testing/grpc_testing/_common.py b/src/python/grpcio_testing/grpc_testing/_common.py
index cb4a7f5fa2..1517434ca7 100644
--- a/src/python/grpcio_testing/grpc_testing/_common.py
+++ b/src/python/grpcio_testing/grpc_testing/_common.py
@@ -37,6 +37,16 @@ def fuss_with_metadata(metadata):
return _fuss(tuple(metadata))
+def rpc_names(service_descriptors):
+ rpc_names_to_descriptors = {}
+ for service_descriptor in service_descriptors:
+ for method_descriptor in service_descriptor.methods_by_name.values():
+ rpc_name = '/{}/{}'.format(
+ service_descriptor.full_name, method_descriptor.name)
+ rpc_names_to_descriptors[rpc_name] = method_descriptor
+ return rpc_names_to_descriptors
+
+
class ChannelRpcRead(
collections.namedtuple(
'ChannelRpcRead',
@@ -90,3 +100,61 @@ class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
+
+
+class ServerRpcRead(
+ collections.namedtuple('ServerRpcRead',
+ ('request', 'requests_closed', 'terminated',))):
+ pass
+
+
+REQUESTS_CLOSED = ServerRpcRead(None, True, False)
+TERMINATED = ServerRpcRead(None, False, True)
+
+
+class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
+
+ @abc.abstractmethod
+ def send_initial_metadata(self, initial_metadata):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def take_request(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_response(self, response):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_termination(self, trailing_metadata, code, details):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_termination_callback(self, callback):
+ raise NotImplementedError()
+
+
+class Serverish(six.with_metaclass(abc.ABCMeta)):
+
+ @abc.abstractmethod
+ def invoke_unary_unary(
+ self, method_descriptor, handler, invocation_metadata, request,
+ deadline):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_unary_stream(
+ self, method_descriptor, handler, invocation_metadata, request,
+ deadline):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_stream_unary(
+ self, method_descriptor, handler, invocation_metadata, deadline):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def invoke_stream_stream(
+ self, method_descriptor, handler, invocation_metadata, deadline):
+ raise NotImplementedError()
diff --git a/src/python/grpcio_testing/grpc_testing/_server/__init__.py b/src/python/grpcio_testing/grpc_testing/_server/__init__.py
new file mode 100644
index 0000000000..759512949a
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from grpc_testing._server import _server
+
+
+def server_from_dictionary(descriptors_to_servicers, time):
+ return _server.server_from_descriptor_to_servicers(
+ descriptors_to_servicers, time)
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_handler.py b/src/python/grpcio_testing/grpc_testing/_server/_handler.py
new file mode 100644
index 0000000000..b47e04c718
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_handler.py
@@ -0,0 +1,215 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import threading
+
+import grpc
+from grpc_testing import _common
+
+_CLIENT_INACTIVE = object()
+
+
+class Handler(_common.ServerRpcHandler):
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_request(self, request):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def take_response(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def requests_closed(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_response_termination(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_response_termination(self):
+ raise NotImplementedError()
+
+
+class _Handler(Handler):
+
+ def __init__(self, requests_closed):
+ self._condition = threading.Condition()
+ self._requests = []
+ self._requests_closed = requests_closed
+ self._initial_metadata = None
+ self._responses = []
+ self._trailing_metadata = None
+ self._code = None
+ self._details = None
+ self._unary_response = None
+ self._expiration_future = None
+ self._termination_callbacks = []
+
+ def send_initial_metadata(self, initial_metadata):
+ with self._condition:
+ self._initial_metadata = initial_metadata
+ self._condition.notify_all()
+
+ def take_request(self):
+ with self._condition:
+ while True:
+ if self._code is None:
+ if self._requests:
+ request = self._requests.pop(0)
+ self._condition.notify_all()
+ return _common.ServerRpcRead(request, False, False)
+ elif self._requests_closed:
+ return _common.REQUESTS_CLOSED
+ else:
+ self._condition.wait()
+ else:
+ return _common.TERMINATED
+
+ def is_active(self):
+ with self._condition:
+ return self._code is None
+
+ def add_response(self, response):
+ with self._condition:
+ self._responses.append(response)
+ self._condition.notify_all()
+
+ def send_termination(self, trailing_metadata, code, details):
+ with self._condition:
+ self._trailing_metadata = trailing_metadata
+ self._code = code
+ self._details = details
+ if self._expiration_future is not None:
+ self._expiration_future.cancel()
+ self._condition.notify_all()
+
+ def add_termination_callback(self, termination_callback):
+ with self._condition:
+ if self._code is None:
+ self._termination_callbacks.append(termination_callback)
+ return True
+ else:
+ return False
+
+ def initial_metadata(self):
+ with self._condition:
+ while True:
+ if self._initial_metadata is None:
+ if self._code is None:
+ self._condition.wait()
+ else:
+ raise ValueError(
+ 'No initial metadata despite status code!')
+ else:
+ return self._initial_metadata
+
+ def add_request(self, request):
+ with self._condition:
+ self._requests.append(request)
+ self._condition.notify_all()
+
+ def take_response(self):
+ with self._condition:
+ while True:
+ if self._responses:
+ response = self._responses.pop(0)
+ self._condition.notify_all()
+ return response
+ elif self._code is None:
+ self._condition.wait()
+ else:
+ raise ValueError('No more responses!')
+
+ def requests_closed(self):
+ with self._condition:
+ self._requests_closed = True
+ self._condition.notify_all()
+
+ def cancel(self):
+ with self._condition:
+ if self._code is None:
+ self._code = _CLIENT_INACTIVE
+ termination_callbacks = self._termination_callbacks
+ self._termination_callbacks = None
+ if self._expiration_future is not None:
+ self._expiration_future.cancel()
+ self._condition.notify_all()
+ for termination_callback in termination_callbacks:
+ termination_callback()
+
+ def unary_response_termination(self):
+ with self._condition:
+ while True:
+ if self._code is _CLIENT_INACTIVE:
+ raise ValueError('Huh? Cancelled but wanting status?')
+ elif self._code is None:
+ self._condition.wait()
+ else:
+ if self._unary_response is None:
+ if self._responses:
+ self._unary_response = self._responses.pop(0)
+ return (
+ self._unary_response, self._trailing_metadata,
+ self._code, self._details,)
+
+
+ def stream_response_termination(self):
+ with self._condition:
+ while True:
+ if self._code is _CLIENT_INACTIVE:
+ raise ValueError('Huh? Cancelled but wanting status?')
+ elif self._code is None:
+ self._condition.wait()
+ else:
+ return self._trailing_metadata, self._code, self._details,
+
+ def expire(self):
+ with self._condition:
+ if self._code is None:
+ if self._initial_metadata is None:
+ self._initial_metadata = _common.FUSSED_EMPTY_METADATA
+ self._trailing_metadata = _common.FUSSED_EMPTY_METADATA
+ self._code = grpc.StatusCode.DEADLINE_EXCEEDED
+ self._details = 'Took too much time!'
+ termination_callbacks = self._termination_callbacks
+ self._termination_callbacks = None
+ self._condition.notify_all()
+ for termination_callback in termination_callbacks:
+ termination_callback()
+
+ def set_expiration_future(self, expiration_future):
+ with self._condition:
+ self._expiration_future = expiration_future
+
+
+def handler_without_deadline(requests_closed):
+ return _Handler(requests_closed)
+
+
+def handler_with_deadline(requests_closed, time, deadline):
+ handler = _Handler(requests_closed)
+ expiration_future = time.call_at(handler.expire, deadline)
+ handler.set_expiration_future(expiration_future)
+ return handler
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_rpc.py b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py
new file mode 100644
index 0000000000..f81876f4b2
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py
@@ -0,0 +1,153 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import threading
+
+import grpc
+from grpc_testing import _common
+
+
+class Rpc(object):
+
+ def __init__(self, handler, invocation_metadata):
+ self._condition = threading.Condition()
+ self._handler = handler
+ self._invocation_metadata = invocation_metadata
+ self._initial_metadata_sent = False
+ self._pending_trailing_metadata = None
+ self._pending_code = None
+ self._pending_details = None
+ self._callbacks = []
+ self._active = True
+ self._rpc_errors = []
+
+ def _ensure_initial_metadata_sent(self):
+ if not self._initial_metadata_sent:
+ self._handler.send_initial_metadata(_common.FUSSED_EMPTY_METADATA)
+ self._initial_metadata_sent = True
+
+ def _call_back(self):
+ callbacks = tuple(self._callbacks)
+ self._callbacks = None
+
+ def call_back():
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Exception calling server-side callback!')
+
+ callback_calling_thread = threading.Thread(target=call_back)
+ callback_calling_thread.start()
+
+ def _terminate(self, trailing_metadata, code, details):
+ if self._active:
+ self._active = False
+ self._handler.send_termination(trailing_metadata, code, details)
+ self._call_back()
+ self._condition.notify_all()
+
+ def _complete(self):
+ if self._pending_trailing_metadata is None:
+ trailing_metadata = _common.FUSSED_EMPTY_METADATA
+ else:
+ trailing_metadata = self._pending_trailing_metadata
+ if self._pending_code is None:
+ code = grpc.StatusCode.OK
+ else:
+ code = self._pending_code
+ details = '' if self._pending_details is None else self._pending_details
+ self._terminate(trailing_metadata, code, details)
+
+ def _abort(self, code, details):
+ self._terminate(_common.FUSSED_EMPTY_METADATA, code, details)
+
+ def add_rpc_error(self, rpc_error):
+ with self._condition:
+ self._rpc_errors.append(rpc_error)
+
+ def application_cancel(self):
+ with self._condition:
+ self._abort(
+ grpc.StatusCode.CANCELLED,
+ 'Cancelled by server-side application!')
+
+ def application_exception_abort(self, exception):
+ with self._condition:
+ if exception not in self._rpc_errors:
+ logging.exception('Exception calling application!')
+ self._abort(
+ grpc.StatusCode.UNKNOWN,
+ 'Exception calling application: {}'.format(exception))
+
+ def extrinsic_abort(self):
+ with self._condition:
+ if self._active:
+ self._active = False
+ self._call_back()
+ self._condition.notify_all()
+
+ def unary_response_complete(self, response):
+ with self._condition:
+ self._ensure_initial_metadata_sent()
+ self._handler.add_response(response)
+ self._complete()
+
+ def stream_response(self, response):
+ with self._condition:
+ self._ensure_initial_metadata_sent()
+ self._handler.add_response(response)
+
+ def stream_response_complete(self):
+ with self._condition:
+ self._ensure_initial_metadata_sent()
+ self._complete()
+
+ def send_initial_metadata(self, initial_metadata):
+ with self._condition:
+ if self._initial_metadata_sent:
+ return False
+ else:
+ self._handler.send_initial_metadata(initial_metadata)
+ self._initial_metadata_sent = True
+ return True
+
+ def is_active(self):
+ with self._condition:
+ return self._active
+
+ def add_callback(self, callback):
+ with self._condition:
+ if self._callbacks is None:
+ return False
+ else:
+ self._callbacks.append(callback)
+ return True
+
+ def invocation_metadata(self):
+ with self._condition:
+ return self._invocation_metadata
+
+ def set_trailing_metadata(self, trailing_metadata):
+ with self._condition:
+ self._pending_trailing_metadata = trailing_metadata
+
+ def set_code(self, code):
+ with self._condition:
+ self._pending_code = code
+
+ def set_details(self, details):
+ with self._condition:
+ self._pending_details = details
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_server.py b/src/python/grpcio_testing/grpc_testing/_server/_server.py
new file mode 100644
index 0000000000..66bcfc13c0
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_server.py
@@ -0,0 +1,149 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import threading
+
+import grpc_testing
+from grpc_testing import _common
+from grpc_testing._server import _handler
+from grpc_testing._server import _rpc
+from grpc_testing._server import _server_rpc
+from grpc_testing._server import _service
+from grpc_testing._server import _servicer_context
+
+
+def _implementation(descriptors_to_servicers, method_descriptor):
+ servicer = descriptors_to_servicers[method_descriptor.containing_service]
+ return getattr(servicer, method_descriptor.name)
+
+
+def _unary_unary_service(request):
+ def service(implementation, rpc, servicer_context):
+ _service.unary_unary(
+ implementation, rpc, request, servicer_context)
+ return service
+
+
+def _unary_stream_service(request):
+ def service(implementation, rpc, servicer_context):
+ _service.unary_stream(
+ implementation, rpc, request, servicer_context)
+ return service
+
+
+def _stream_unary_service(handler):
+ def service(implementation, rpc, servicer_context):
+ _service.stream_unary(implementation, rpc, handler, servicer_context)
+ return service
+
+
+def _stream_stream_service(handler):
+ def service(implementation, rpc, servicer_context):
+ _service.stream_stream(implementation, rpc, handler, servicer_context)
+ return service
+
+
+class _Serverish(_common.Serverish):
+
+ def __init__(self, descriptors_to_servicers, time):
+ self._descriptors_to_servicers = descriptors_to_servicers
+ self._time = time
+
+ def _invoke(
+ self, service_behavior, method_descriptor, handler,
+ invocation_metadata, deadline):
+ implementation = _implementation(
+ self._descriptors_to_servicers, method_descriptor)
+ rpc = _rpc.Rpc(handler, invocation_metadata)
+ if handler.add_termination_callback(rpc.extrinsic_abort):
+ servicer_context = _servicer_context.ServicerContext(
+ rpc, self._time, deadline)
+ service_thread = threading.Thread(
+ target=service_behavior,
+ args=(implementation, rpc, servicer_context,))
+ service_thread.start()
+
+ def invoke_unary_unary(
+ self, method_descriptor, handler, invocation_metadata, request,
+ deadline):
+ self._invoke(
+ _unary_unary_service(request), method_descriptor, handler,
+ invocation_metadata, deadline)
+
+ def invoke_unary_stream(
+ self, method_descriptor, handler, invocation_metadata, request,
+ deadline):
+ self._invoke(
+ _unary_stream_service(request), method_descriptor, handler,
+ invocation_metadata, deadline)
+
+ def invoke_stream_unary(
+ self, method_descriptor, handler, invocation_metadata, deadline):
+ self._invoke(
+ _stream_unary_service(handler), method_descriptor, handler,
+ invocation_metadata, deadline)
+
+ def invoke_stream_stream(
+ self, method_descriptor, handler, invocation_metadata, deadline):
+ self._invoke(
+ _stream_stream_service(handler), method_descriptor, handler,
+ invocation_metadata, deadline)
+
+
+def _deadline_and_handler(requests_closed, time, timeout):
+ if timeout is None:
+ return None, _handler.handler_without_deadline(requests_closed)
+ else:
+ deadline = time.time() + timeout
+ handler = _handler.handler_with_deadline(requests_closed, time, deadline)
+ return deadline, handler
+
+
+class _Server(grpc_testing.Server):
+
+ def __init__(self, serverish, time):
+ self._serverish = serverish
+ self._time = time
+
+ def invoke_unary_unary(
+ self, method_descriptor, invocation_metadata, request, timeout):
+ deadline, handler = _deadline_and_handler(True, self._time, timeout)
+ self._serverish.invoke_unary_unary(
+ method_descriptor, handler, invocation_metadata, request, deadline)
+ return _server_rpc.UnaryUnaryServerRpc(handler)
+
+ def invoke_unary_stream(
+ self, method_descriptor, invocation_metadata, request, timeout):
+ deadline, handler = _deadline_and_handler(True, self._time, timeout)
+ self._serverish.invoke_unary_stream(
+ method_descriptor, handler, invocation_metadata, request, deadline)
+ return _server_rpc.UnaryStreamServerRpc(handler)
+
+ def invoke_stream_unary(
+ self, method_descriptor, invocation_metadata, timeout):
+ deadline, handler = _deadline_and_handler(False, self._time, timeout)
+ self._serverish.invoke_stream_unary(
+ method_descriptor, handler, invocation_metadata, deadline)
+ return _server_rpc.StreamUnaryServerRpc(handler)
+
+ def invoke_stream_stream(
+ self, method_descriptor, invocation_metadata, timeout):
+ deadline, handler = _deadline_and_handler(False, self._time, timeout)
+ self._serverish.invoke_stream_stream(
+ method_descriptor, handler, invocation_metadata, deadline)
+ return _server_rpc.StreamStreamServerRpc(handler)
+
+
+def server_from_descriptor_to_servicers(descriptors_to_servicers, time):
+ return _Server(_Serverish(descriptors_to_servicers, time), time)
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py b/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py
new file mode 100644
index 0000000000..30de8ff0e2
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py
@@ -0,0 +1,93 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc_testing
+
+
+class UnaryUnaryServerRpc(grpc_testing.UnaryUnaryServerRpc):
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def initial_metadata(self):
+ return self._handler.initial_metadata()
+
+ def cancel(self):
+ self._handler.cancel()
+
+ def termination(self):
+ return self._handler.unary_response_termination()
+
+
+class UnaryStreamServerRpc(grpc_testing.UnaryStreamServerRpc):
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def initial_metadata(self):
+ return self._handler.initial_metadata()
+
+ def take_response(self):
+ return self._handler.take_response()
+
+ def cancel(self):
+ self._handler.cancel()
+
+ def termination(self):
+ return self._handler.stream_response_termination()
+
+
+class StreamUnaryServerRpc(grpc_testing.StreamUnaryServerRpc):
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def initial_metadata(self):
+ return self._handler.initial_metadata()
+
+ def send_request(self, request):
+ self._handler.add_request(request)
+
+ def requests_closed(self):
+ self._handler.requests_closed()
+
+ def cancel(self):
+ self._handler.cancel()
+
+ def termination(self):
+ return self._handler.unary_response_termination()
+
+
+class StreamStreamServerRpc(grpc_testing.StreamStreamServerRpc):
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def initial_metadata(self):
+ return self._handler.initial_metadata()
+
+ def send_request(self, request):
+ self._handler.add_request(request)
+
+ def requests_closed(self):
+ self._handler.requests_closed()
+
+ def take_response(self):
+ return self._handler.take_response()
+
+ def cancel(self):
+ self._handler.cancel()
+
+ def termination(self):
+ return self._handler.stream_response_termination()
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_service.py b/src/python/grpcio_testing/grpc_testing/_server/_service.py
new file mode 100644
index 0000000000..36b0a2f7ff
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_service.py
@@ -0,0 +1,88 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+
+
+class _RequestIterator(object):
+
+ def __init__(self, rpc, handler):
+ self._rpc = rpc
+ self._handler = handler
+
+ def _next(self):
+ read = self._handler.take_request()
+ if read.requests_closed:
+ raise StopIteration()
+ elif read.terminated:
+ rpc_error = grpc.RpcError()
+ self._rpc.add_rpc_error(rpc_error)
+ raise rpc_error
+ else:
+ return read.request
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+
+def _unary_response(argument, implementation, rpc, servicer_context):
+ try:
+ response = implementation(argument, servicer_context)
+ except Exception as exception: # pylint: disable=broad-except
+ rpc.application_exception_abort(exception)
+ else:
+ rpc.unary_response_complete(response)
+
+
+def _stream_response(argument, implementation, rpc, servicer_context):
+ try:
+ response_iterator = implementation(argument, servicer_context)
+ except Exception as exception: # pylint: disable=broad-except
+ rpc.application_exception_abort(exception)
+ else:
+ while True:
+ try:
+ response = next(response_iterator)
+ except StopIteration:
+ rpc.stream_response_complete()
+ break
+ except Exception as exception: # pylint: disable=broad-except
+ rpc.application_exception_abort(exception)
+ break
+ else:
+ rpc.stream_response(response)
+
+
+def unary_unary(implementation, rpc, request, servicer_context):
+ _unary_response(request, implementation, rpc, servicer_context)
+
+
+def unary_stream(implementation, rpc, request, servicer_context):
+ _stream_response(request, implementation, rpc, servicer_context)
+
+
+def stream_unary(implementation, rpc, handler, servicer_context):
+ _unary_response(
+ _RequestIterator(rpc, handler), implementation, rpc, servicer_context)
+
+
+def stream_stream(implementation, rpc, handler, servicer_context):
+ _stream_response(
+ _RequestIterator(rpc, handler), implementation, rpc, servicer_context)
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py
new file mode 100644
index 0000000000..496689ded0
--- /dev/null
+++ b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py
@@ -0,0 +1,74 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc
+from grpc_testing import _common
+
+
+class ServicerContext(grpc.ServicerContext):
+
+ def __init__(self, rpc, time, deadline):
+ self._rpc = rpc
+ self._time = time
+ self._deadline = deadline
+
+ def is_active(self):
+ return self._rpc.is_active()
+
+ def time_remaining(self):
+ if self._rpc.is_active():
+ if self._deadline is None:
+ return None
+ else:
+ return max(0.0, self._deadline - self._time.time())
+ else:
+ return 0.0
+
+ def cancel(self):
+ self._rpc.application_cancel()
+
+ def add_callback(self, callback):
+ return self._rpc.add_callback(callback)
+
+ def invocation_metadata(self):
+ return self._rpc.invocation_metadata()
+
+ def peer(self):
+ raise NotImplementedError()
+
+ def peer_identities(self):
+ raise NotImplementedError()
+
+ def peer_identity_key(self):
+ raise NotImplementedError()
+
+ def auth_context(self):
+ raise NotImplementedError()
+
+ def send_initial_metadata(self, initial_metadata):
+ initial_metadata_sent = self._rpc.send_initial_metadata(
+ _common.fuss_with_metadata(initial_metadata))
+ if not initial_metadata_sent:
+ raise ValueError(
+ 'ServicerContext.send_initial_metadata called too late!')
+
+ def set_trailing_metadata(self, trailing_metadata):
+ self._rpc.set_trailing_metadata(
+ _common.fuss_with_metadata(trailing_metadata))
+
+ def set_code(self, code):
+ self._rpc.set_code(code)
+
+ def set_details(self, details):
+ self._rpc.set_details(details)
diff --git a/src/python/grpcio_tests/tests/testing/_server_application.py b/src/python/grpcio_tests/tests/testing/_server_application.py
new file mode 100644
index 0000000000..06f09c8cb4
--- /dev/null
+++ b/src/python/grpcio_tests/tests/testing/_server_application.py
@@ -0,0 +1,66 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""An example gRPC Python-using server-side application."""
+
+import grpc
+
+# requests_pb2 is a semantic dependency of this module.
+from tests.testing import _application_common
+from tests.testing.proto import requests_pb2 # pylint: disable=unused-import
+from tests.testing.proto import services_pb2
+from tests.testing.proto import services_pb2_grpc
+
+
+class FirstServiceServicer(services_pb2_grpc.FirstServiceServicer):
+ """Services RPCs."""
+
+ def UnUn(self, request, context):
+ if _application_common.UNARY_UNARY_REQUEST == request:
+ return _application_common.UNARY_UNARY_RESPONSE
+ else:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ context.set_details('Something is wrong with your request!')
+ return services_pb2.Down()
+
+ def UnStre(self, request, context):
+ if _application_common.UNARY_STREAM_REQUEST != request:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ context.set_details('Something is wrong with your request!')
+ return
+ yield services_pb2.Strange()
+
+ def StreUn(self, request_iterator, context):
+ context.send_initial_metadata((
+ ('server_application_metadata_key', 'Hi there!',),))
+ for request in request_iterator:
+ if request != _application_common.STREAM_UNARY_REQUEST:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ context.set_details('Something is wrong with your request!')
+ return services_pb2.Strange()
+ elif not context.is_active():
+ return services_pb2.Strange()
+ else:
+ return _application_common.STREAM_UNARY_RESPONSE
+
+ def StreStre(self, request_iterator, context):
+ for request in request_iterator:
+ if request != _application_common.STREAM_STREAM_REQUEST:
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+ context.set_details('Something is wrong with your request!')
+ return
+ elif not context.is_active():
+ return
+ else:
+ yield _application_common.STREAM_STREAM_RESPONSE
+ yield _application_common.STREAM_STREAM_RESPONSE
diff --git a/src/python/grpcio_tests/tests/testing/_server_test.py b/src/python/grpcio_tests/tests/testing/_server_test.py
new file mode 100644
index 0000000000..7897bcce01
--- /dev/null
+++ b/src/python/grpcio_tests/tests/testing/_server_test.py
@@ -0,0 +1,169 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import unittest
+
+import grpc
+import grpc_testing
+
+from tests.testing import _application_common
+from tests.testing import _application_testing_common
+from tests.testing import _server_application
+from tests.testing.proto import services_pb2
+
+
+# TODO(https://github.com/google/protobuf/issues/3452): Drop this skip.
+@unittest.skipIf(
+ services_pb2.DESCRIPTOR.services_by_name.get('FirstService') is None,
+ 'Fix protobuf issue 3452!')
+class FirstServiceServicerTest(unittest.TestCase):
+
+ def setUp(self):
+ self._real_time = grpc_testing.strict_real_time()
+ self._fake_time = grpc_testing.strict_fake_time(time.time())
+ servicer = _server_application.FirstServiceServicer()
+ descriptors_to_servicers = {
+ _application_testing_common.FIRST_SERVICE: servicer
+ }
+ self._real_time_server = grpc_testing.server_from_dictionary(
+ descriptors_to_servicers, self._real_time)
+ self._fake_time_server = grpc_testing.server_from_dictionary(
+ descriptors_to_servicers, self._fake_time)
+
+ def test_successful_unary_unary(self):
+ rpc = self._real_time_server.invoke_unary_unary(
+ _application_testing_common.FIRST_SERVICE_UNUN, (),
+ _application_common.UNARY_UNARY_REQUEST, None)
+ initial_metadata = rpc.initial_metadata()
+ response, trailing_metadata, code, details = rpc.termination()
+
+ self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response)
+ self.assertIs(code, grpc.StatusCode.OK)
+
+ def test_successful_unary_stream(self):
+ rpc = self._real_time_server.invoke_unary_stream(
+ _application_testing_common.FIRST_SERVICE_UNSTRE, (),
+ _application_common.UNARY_STREAM_REQUEST, None)
+ initial_metadata = rpc.initial_metadata()
+ trailing_metadata, code, details = rpc.termination()
+
+ self.assertIs(code, grpc.StatusCode.OK)
+
+ def test_successful_stream_unary(self):
+ rpc = self._real_time_server.invoke_stream_unary(
+ _application_testing_common.FIRST_SERVICE_STREUN, (), None)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.requests_closed()
+ initial_metadata = rpc.initial_metadata()
+ response, trailing_metadata, code, details = rpc.termination()
+
+ self.assertEqual(_application_common.STREAM_UNARY_RESPONSE, response)
+ self.assertIs(code, grpc.StatusCode.OK)
+
+ def test_successful_stream_stream(self):
+ rpc = self._real_time_server.invoke_stream_stream(
+ _application_testing_common.FIRST_SERVICE_STRESTRE, (), None)
+ rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
+ initial_metadata = rpc.initial_metadata()
+ responses = [
+ rpc.take_response(),
+ rpc.take_response(),
+ ]
+ rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
+ rpc.send_request(_application_common.STREAM_STREAM_REQUEST)
+ responses.extend([
+ rpc.take_response(),
+ rpc.take_response(),
+ rpc.take_response(),
+ rpc.take_response(),
+ ])
+ rpc.requests_closed()
+ trailing_metadata, code, details = rpc.termination()
+
+ for response in responses:
+ self.assertEqual(_application_common.STREAM_STREAM_RESPONSE,
+ response)
+ self.assertIs(code, grpc.StatusCode.OK)
+
+ def test_server_rpc_idempotence(self):
+ rpc = self._real_time_server.invoke_unary_unary(
+ _application_testing_common.FIRST_SERVICE_UNUN, (),
+ _application_common.UNARY_UNARY_REQUEST, None)
+ first_initial_metadata = rpc.initial_metadata()
+ second_initial_metadata = rpc.initial_metadata()
+ third_initial_metadata = rpc.initial_metadata()
+ first_termination = rpc.termination()
+ second_termination = rpc.termination()
+ third_termination = rpc.termination()
+
+ for later_initial_metadata in (second_initial_metadata,
+ third_initial_metadata,):
+ self.assertEqual(first_initial_metadata, later_initial_metadata)
+ response = first_termination[0]
+ terminal_metadata = first_termination[1]
+ code = first_termination[2]
+ details = first_termination[3]
+ for later_termination in (second_termination, third_termination,):
+ self.assertEqual(response, later_termination[0])
+ self.assertEqual(terminal_metadata, later_termination[1])
+ self.assertIs(code, later_termination[2])
+ self.assertEqual(details, later_termination[3])
+ self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response)
+ self.assertIs(code, grpc.StatusCode.OK)
+
+ def test_misbehaving_client_unary_unary(self):
+ rpc = self._real_time_server.invoke_unary_unary(
+ _application_testing_common.FIRST_SERVICE_UNUN, (),
+ _application_common.ERRONEOUS_UNARY_UNARY_REQUEST, None)
+ initial_metadata = rpc.initial_metadata()
+ response, trailing_metadata, code, details = rpc.termination()
+
+ self.assertIsNot(code, grpc.StatusCode.OK)
+
+ def test_infinite_request_stream_real_time(self):
+ rpc = self._real_time_server.invoke_stream_unary(
+ _application_testing_common.FIRST_SERVICE_STREUN, (),
+ _application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ initial_metadata = rpc.initial_metadata()
+ self._real_time.sleep_for(
+ _application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ response, trailing_metadata, code, details = rpc.termination()
+
+ self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def test_infinite_request_stream_fake_time(self):
+ rpc = self._fake_time_server.invoke_stream_unary(
+ _application_testing_common.FIRST_SERVICE_STREUN, (),
+ _application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ initial_metadata = rpc.initial_metadata()
+ self._fake_time.sleep_for(
+ _application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2)
+ rpc.send_request(_application_common.STREAM_UNARY_REQUEST)
+ response, trailing_metadata, code, details = rpc.termination()
+
+ self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index c10719b86f..d61297b918 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -10,6 +10,7 @@
"protoc_plugin.beta_python_plugin_test.PythonPluginTest",
"reflection._reflection_servicer_test.ReflectionServicerTest",
"testing._client_test.ClientTest",
+ "testing._server_test.FirstServiceServicerTest",
"testing._time_test.StrictFakeTimeTest",
"testing._time_test.StrictRealTimeTest",
"unit._api_test.AllTest",
diff --git a/src/ruby/.rubocop_todo.yml b/src/ruby/.rubocop_todo.yml
index 05db404582..32b84b8de0 100644
--- a/src/ruby/.rubocop_todo.yml
+++ b/src/ruby/.rubocop_todo.yml
@@ -1,44 +1,569 @@
-# This configuration was generated by `rubocop --auto-gen-config`
-# on 2015-05-22 13:23:34 -0700 using RuboCop version 0.30.1.
+# This configuration was generated by
+# `rubocop --auto-gen-config`
+# on 2017-09-04 17:00:36 +0200 using RuboCop version 0.49.1.
# The point is for the user to remove these configuration records
# one by one as the offenses are removed from the code base.
# Note that changes in the inspected code, or installation of new
# versions of RuboCop, may require this file to be generated again.
-# Offense count: 30
-Metrics/AbcSize:
- Max: 38
+# Offense count: 3
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, IndentOneStep, IndentationWidth.
+# SupportedStyles: case, end
+Layout/CaseIndentation:
+ Exclude:
+ - 'tools/platform_check.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Layout/CommentIndentation:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Layout/EmptyLineAfterMagicComment:
+ Exclude:
+ - 'tools/grpc-tools.gemspec'
+
+# Offense count: 33
+# Cop supports --auto-correct.
+# Configuration parameters: AllowAdjacentOneLineDefs, NumberOfEmptyLines.
+Layout/EmptyLineBetweenDefs:
+ Exclude:
+ - 'qps/client.rb'
+ - 'qps/histogram.rb'
+ - 'qps/proxy-worker.rb'
+ - 'qps/server.rb'
+ - 'qps/worker.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Layout/EmptyLines:
+ Exclude:
+ - 'qps/qps-common.rb'
+
+# Offense count: 8
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines
+Layout/EmptyLinesAroundClassBody:
+ Exclude:
+ - 'pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb'
+ - 'pb/grpc/testing/metrics_services_pb.rb'
+ - 'pb/src/proto/grpc/testing/test_services_pb.rb'
+ - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb'
+ - 'qps/src/proto/grpc/testing/services_services_pb.rb'
+
+# Offense count: 28
+# Cop supports --auto-correct.
+# Configuration parameters: AllowForAlignment, ForceEqualSignAlignment.
+Layout/ExtraSpacing:
+ Enabled: false
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: normal, rails
+Layout/IndentationConsistency:
+ Exclude:
+ - 'pb/grpc/health/checker.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: Width, IgnoredPatterns.
+Layout/IndentationWidth:
+ Exclude:
+ - 'pb/grpc/health/checker.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: symmetrical, new_line, same_line
+Layout/MultilineHashBraceLayout:
+ Exclude:
+ - 'spec/generic/active_call_spec.rb'
+
+# Offense count: 70
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: symmetrical, new_line, same_line
+Layout/MultilineMethodCallBraceLayout:
+ Enabled: false
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, IndentationWidth.
+# SupportedStyles: aligned, indented, indented_relative_to_receiver
+Layout/MultilineMethodCallIndentation:
+ Exclude:
+ - 'spec/generic/rpc_desc_spec.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: symmetrical, new_line, same_line
+Layout/MultilineMethodDefinitionBraceLayout:
+ Exclude:
+ - 'spec/generic/client_stub_spec.rb'
+
+# Offense count: 5
+# Cop supports --auto-correct.
+Layout/SpaceAfterColon:
+ Exclude:
+ - 'lib/grpc/generic/rpc_server.rb'
+
+# Offense count: 7
+# Cop supports --auto-correct.
+Layout/SpaceAfterComma:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 27
+# Cop supports --auto-correct.
+# Configuration parameters: AllowForAlignment.
+Layout/SpaceAroundOperators:
+ Exclude:
+ - 'qps/client.rb'
+ - 'qps/histogram.rb'
+ - 'qps/proxy-worker.rb'
+ - 'qps/server.rb'
+ - 'spec/generic/active_call_spec.rb'
+ - 'spec/generic/rpc_server_spec.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, EnforcedStyleForEmptyBraces, SupportedStylesForEmptyBraces, SpaceBeforeBlockParameters.
+# SupportedStyles: space, no_space
+# SupportedStylesForEmptyBraces: space, no_space
+Layout/SpaceInsideBlockBraces:
+ Exclude:
+ - 'stress/stress_client.rb'
+
+# Offense count: 4
+# Cop supports --auto-correct.
+Layout/SpaceInsideBrackets:
+ Exclude:
+ - 'tools/bin/grpc_tools_ruby_protoc'
+ - 'tools/bin/grpc_tools_ruby_protoc_plugin'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, EnforcedStyleForEmptyBraces, SupportedStylesForEmptyBraces.
+# SupportedStyles: space, no_space, compact
+# SupportedStylesForEmptyBraces: space, no_space
+Layout/SpaceInsideHashLiteralBraces:
+ Exclude:
+ - 'qps/server.rb'
+
+# Offense count: 6
+# Cop supports --auto-correct.
+Layout/SpaceInsidePercentLiteralDelimiters:
+ Exclude:
+ - 'spec/generic/client_stub_spec.rb'
+ - 'tools/grpc-tools.gemspec'
# Offense count: 3
-# Configuration parameters: CountComments.
-Metrics/ClassLength:
- Max: 200
+# Cop supports --auto-correct.
+Layout/Tab:
+ Exclude:
+ - 'pb/grpc/health/checker.rb'
+ - 'qps/client.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Layout/TrailingWhitespace:
+ Exclude:
+ - 'qps/worker.rb'
+
+# Offense count: 1
+Lint/IneffectiveAccessModifier:
+ Exclude:
+ - 'lib/grpc/generic/active_call.rb'
+
+# Offense count: 4
+# Cop supports --auto-correct.
+Lint/PercentStringArray:
+ Exclude:
+ - 'spec/client_server_spec.rb'
+ - 'spec/generic/active_call_spec.rb'
+ - 'spec/generic/client_stub_spec.rb'
+
+# Offense count: 4
+Lint/ScriptPermission:
+ Exclude:
+ - 'qps/client.rb'
+ - 'qps/histogram.rb'
+ - 'qps/qps-common.rb'
+ - 'qps/server.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments.
+Lint/UnusedBlockArgument:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods.
+Lint/UnusedMethodArgument:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 1
+# Configuration parameters: ContextCreatingMethods, MethodCreatingMethods.
+Lint/UselessAccessModifier:
+ Exclude:
+ - 'lib/grpc/logconfig.rb'
+
+# Offense count: 1
+Lint/UselessAssignment:
+ Exclude:
+ - 'qps/client.rb'
-# Offense count: 35
+# Offense count: 4
+Lint/Void:
+ Exclude:
+ - 'stress/metrics_server.rb'
+ - 'stress/stress_client.rb'
+
+# Offense count: 53
+Metrics/AbcSize:
+ Max: 57
+
+# Offense count: 81
+# Configuration parameters: CountComments, ExcludedMethods.
+Metrics/BlockLength:
+ Max: 715
+
+# Offense count: 82
+# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns.
+# URISchemes: http, https
+Metrics/LineLength:
+ Max: 141
+
+# Offense count: 82
# Configuration parameters: CountComments.
Metrics/MethodLength:
- Max: 36
+ Max: 54
-# Offense count: 7
+# Offense count: 5
# Configuration parameters: CountKeywordArgs.
Metrics/ParameterLists:
- Max: 8
+ Max: 7
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Performance/RedundantBlockCall:
+ Exclude:
+ - 'spec/generic/client_stub_spec.rb'
+
+# Offense count: 5
+# Cop supports --auto-correct.
+# Configuration parameters: MaxKeyValuePairs.
+Performance/RedundantMerge:
+ Exclude:
+ - 'spec/generic/active_call_spec.rb'
+ - 'spec/generic/client_stub_spec.rb'
+
+# Offense count: 8
+# Cop supports --auto-correct.
+Performance/TimesMap:
+ Exclude:
+ - 'spec/channel_spec.rb'
+ - 'spec/client_server_spec.rb'
+ - 'spec/server_spec.rb'
+
+# Offense count: 7
+Style/AccessorMethodName:
+ Exclude:
+ - 'qps/server.rb'
+ - 'stress/metrics_server.rb'
+ - 'stress/stress_client.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: prefer_alias, prefer_alias_method
+Style/Alias:
+ Exclude:
+ - 'lib/grpc/generic/rpc_server.rb'
+ - 'lib/grpc/notifier.rb'
+
+# Offense count: 7
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, ProceduralMethods, FunctionalMethods, IgnoredMethods.
+# SupportedStyles: line_count_based, semantic, braces_for_chaining
+# ProceduralMethods: benchmark, bm, bmbm, create, each_with_object, measure, new, realtime, tap, with_object
+# FunctionalMethods: let, let!, subject, watch
+# IgnoredMethods: lambda, proc, it
+Style/BlockDelimiters:
+ Exclude:
+ - 'qps/client.rb'
+ - 'qps/proxy-worker.rb'
+ - 'qps/server.rb'
+ - 'qps/worker.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+Style/ClassMethods:
+ Exclude:
+ - 'tools/platform_check.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, SingleLineConditionsOnly, IncludeTernaryExpressions.
+# SupportedStyles: assign_to_condition, assign_inside_condition
+Style/ConditionalAssignment:
+ Exclude:
+ - 'lib/grpc/generic/rpc_server.rb'
+ - 'lib/grpc/generic/service.rb'
+
+# Offense count: 19
+Style/Documentation:
+ Exclude:
+ - 'spec/**/*'
+ - 'test/**/*'
+ - 'pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb'
+ - 'pb/grpc/testing/metrics_services_pb.rb'
+ - 'pb/src/proto/grpc/testing/test_pb.rb'
+ - 'qps/client.rb'
+ - 'qps/histogram.rb'
+ - 'qps/proxy-worker.rb'
+ - 'qps/server.rb'
+ - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb'
+ - 'qps/src/proto/grpc/testing/services_pb.rb'
+ - 'qps/src/proto/grpc/testing/services_services_pb.rb'
+ - 'qps/worker.rb'
+ - 'stress/metrics_server.rb'
+ - 'stress/stress_client.rb'
+ - 'tools/platform_check.rb'
-# Offense count: 9
+# Offense count: 8
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: compact, expanded
+Style/EmptyMethod:
+ Exclude:
+ - 'bin/noproto_server.rb'
+ - 'lib/grpc/logconfig.rb'
+ - 'spec/generic/rpc_desc_spec.rb'
+
+# Offense count: 2
+# Configuration parameters: ExpectMatchingDefinition, Regex, IgnoreExecutableScripts, AllowedAcronyms.
+# AllowedAcronyms: CLI, DSL, ACL, API, ASCII, CPU, CSS, DNS, EOF, GUID, HTML, HTTP, HTTPS, ID, IP, JSON, LHS, QPS, RAM, RHS, RPC, SLA, SMTP, SQL, SSH, TCP, TLS, TTL, UDP, UI, UID, UUID, URI, URL, UTF8, VM, XML, XMPP, XSRF, XSS
+Style/FileName:
+ Exclude:
+ - 'qps/src/proto/grpc/testing/proxy-service_pb.rb'
+ - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb'
+
+# Offense count: 12
# Configuration parameters: AllowedVariables.
Style/GlobalVars:
- Enabled: false
+ Exclude:
+ - 'ext/grpc/extconf.rb'
+
+# Offense count: 3
+# Configuration parameters: MinBodyLength.
+Style/GuardClause:
+ Exclude:
+ - 'lib/grpc/generic/bidi_call.rb'
+ - 'lib/grpc/generic/rpc_server.rb'
+ - 'qps/client.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, UseHashRocketsWithSymbolValues, PreferHashRocketsForNonAlnumEndingSymbols.
+# SupportedStyles: ruby19, hash_rockets, no_mixed_keys, ruby19_no_mixed_keys
+Style/HashSyntax:
+ Exclude:
+ - 'stress/metrics_server.rb'
+
+# Offense count: 1
+Style/IfInsideElse:
+ Exclude:
+ - 'lib/grpc/generic/rpc_desc.rb'
+
+# Offense count: 4
+# Cop supports --auto-correct.
+# Configuration parameters: MaxLineLength.
+Style/IfUnlessModifier:
+ Exclude:
+ - 'ext/grpc/extconf.rb'
+ - 'qps/histogram.rb'
+ - 'stress/stress_client.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Style/MethodCallWithoutArgsParentheses:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 3
+# Cop supports --auto-correct.
+Style/MultilineIfModifier:
+ Exclude:
+ - 'lib/grpc/generic/bidi_call.rb'
+ - 'lib/grpc/generic/client_stub.rb'
+ - 'spec/spec_helper.rb'
+
+# Offense count: 7
+# Cop supports --auto-correct.
+Style/MutableConstant:
+ Exclude:
+ - 'ext/grpc/extconf.rb'
+ - 'lib/grpc/version.rb'
+ - 'spec/compression_options_spec.rb'
+ - 'spec/generic/active_call_spec.rb'
+ - 'tools/version.rb'
# Offense count: 1
-# Configuration parameters: EnforcedStyle, MinBodyLength, SupportedStyles.
-Style/Next:
+# Cop supports --auto-correct.
+Style/NegatedWhile:
+ Exclude:
+ - 'qps/client.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+# Configuration parameters: AutoCorrect, EnforcedStyle, SupportedStyles.
+# SupportedStyles: predicate, comparison
+Style/NumericPredicate:
+ Exclude:
+ - 'spec/**/*'
+ - 'ext/grpc/extconf.rb'
+
+# Offense count: 7
+# Cop supports --auto-correct.
+Style/ParallelAssignment:
+ Exclude:
+ - 'bin/math_server.rb'
+ - 'lib/grpc/generic/rpc_server.rb'
+ - 'spec/generic/client_stub_spec.rb'
+ - 'spec/generic/rpc_desc_spec.rb'
+ - 'spec/generic/rpc_server_pool_spec.rb'
+ - 'spec/generic/rpc_server_spec.rb'
+
+# Offense count: 8
+# Cop supports --auto-correct.
+# Configuration parameters: PreferredDelimiters.
+Style/PercentLiteralDelimiters:
+ Exclude:
+ - 'end2end/grpc_class_init_driver.rb'
+ - 'spec/client_server_spec.rb'
+ - 'spec/generic/active_call_spec.rb'
+ - 'spec/generic/client_stub_spec.rb'
+ - 'tools/grpc-tools.gemspec'
+
+# Offense count: 3
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: compact, exploded
+Style/RaiseArgs:
+ Exclude:
+ - 'stress/metrics_server.rb'
+
+# Offense count: 4
+# Cop supports --auto-correct.
+Style/RedundantParentheses:
+ Exclude:
+ - 'lib/grpc/generic/rpc_server.rb'
+ - 'qps/client.rb'
+ - 'qps/proxy-worker.rb'
+ - 'spec/generic/rpc_desc_spec.rb'
+
+# Offense count: 5
+# Cop supports --auto-correct.
+# Configuration parameters: AllowMultipleReturnValues.
+Style/RedundantReturn:
+ Exclude:
+ - 'end2end/grpc_class_init_client.rb'
+
+# Offense count: 77
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: only_raise, only_fail, semantic
+Style/SignalException:
Enabled: false
# Offense count: 2
-# Configuration parameters: Methods.
-Style/SingleLineBlockParams:
- Enabled: false
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles.
+# SupportedStyles: use_perl_names, use_english_names
+Style/SpecialGlobalVars:
+ Exclude:
+ - 'ext/grpc/extconf.rb'
+ - 'stress/stress_client.rb'
+
+# Offense count: 189
+# Cop supports --auto-correct.
+# Configuration parameters: EnforcedStyle, SupportedStyles, ConsistentQuotesInMultiline.
+# SupportedStyles: single_quotes, double_quotes
+Style/StringLiterals:
+ Exclude:
+ - 'pb/grpc/testing/metrics_pb.rb'
+ - 'pb/src/proto/grpc/testing/empty_pb.rb'
+ - 'pb/src/proto/grpc/testing/messages_pb.rb'
+ - 'qps/proxy-worker.rb'
+ - 'qps/server.rb'
+ - 'qps/src/proto/grpc/testing/control_pb.rb'
+ - 'qps/src/proto/grpc/testing/messages_pb.rb'
+ - 'qps/src/proto/grpc/testing/payloads_pb.rb'
+ - 'qps/src/proto/grpc/testing/proxy-service_pb.rb'
+ - 'qps/src/proto/grpc/testing/stats_pb.rb'
+ - 'qps/worker.rb'
# Offense count: 1
Style/StructInheritance:
- Enabled: false
+ Exclude:
+ - 'lib/grpc/generic/rpc_desc.rb'
+
+# Offense count: 10
+# Cop supports --auto-correct.
+# Configuration parameters: MinSize, SupportedStyles.
+# SupportedStyles: percent, brackets
+Style/SymbolArray:
+ EnforcedStyle: brackets
+
+# Offense count: 2
+# Cop supports --auto-correct.
+# Configuration parameters: IgnoredMethods.
+# IgnoredMethods: respond_to, define_method
+Style/SymbolProc:
+ Exclude:
+ - 'qps/client.rb'
+ - 'stress/stress_client.rb'
+
+# Offense count: 6
+# Cop supports --auto-correct.
+# Configuration parameters: AllowNamedUnderscoreVariables.
+Style/TrailingUnderscoreVariable:
+ Exclude:
+ - 'spec/channel_credentials_spec.rb'
+ - 'spec/server_credentials_spec.rb'
+
+# Offense count: 3
+# Cop supports --auto-correct.
+# Configuration parameters: ExactNameMatch, AllowPredicates, AllowDSLWriters, IgnoreClassMethods, Whitelist.
+# Whitelist: to_ary, to_a, to_c, to_enum, to_h, to_hash, to_i, to_int, to_io, to_open, to_path, to_proc, to_r, to_regexp, to_str, to_s, to_sym
+Style/TrivialAccessors:
+ Exclude:
+ - 'qps/histogram.rb'
+
+# Offense count: 3
+# Cop supports --auto-correct.
+Style/UnneededInterpolation:
+ Exclude:
+ - 'pb/grpc/health/checker.rb'
+
+# Offense count: 1
+# Cop supports --auto-correct.
+Style/YodaCondition:
+ Exclude:
+ - 'stress/stress_client.rb'
+
+# Offense count: 2
+# Cop supports --auto-correct.
+Style/ZeroLengthPredicate:
+ Exclude:
+ - 'lib/grpc/generic/rpc_server.rb'
diff --git a/src/ruby/end2end/grpc_class_init_client.rb b/src/ruby/end2end/grpc_class_init_client.rb
index b9e3d6054f..c35719a71f 100755
--- a/src/ruby/end2end/grpc_class_init_client.rb
+++ b/src/ruby/end2end/grpc_class_init_client.rb
@@ -41,7 +41,7 @@ def run_gc_stress_test(test_proc)
GC.enable
construct_many(test_proc)
- GC.start(full_mark: true, immediate_sweep: true)
+ GC.start
construct_many(test_proc)
end
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 9671d794c5..0402ce34fb 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -93,6 +93,7 @@ grpc_alarm_destroy_type grpc_alarm_destroy_import;
grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import;
grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_external_connectivity_watchers_import;
grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
+grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import;
grpc_channel_create_call_type grpc_channel_create_call_import;
grpc_channel_ping_type grpc_channel_ping_import;
grpc_channel_register_call_type grpc_channel_register_call_import;
@@ -399,6 +400,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state");
grpc_channel_num_external_connectivity_watchers_import = (grpc_channel_num_external_connectivity_watchers_type) GetProcAddress(library, "grpc_channel_num_external_connectivity_watchers");
grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state");
+ grpc_channel_support_connectivity_watcher_import = (grpc_channel_support_connectivity_watcher_type) GetProcAddress(library, "grpc_channel_support_connectivity_watcher");
grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call");
grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping");
grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index b64199be8e..e3704e592b 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -260,6 +260,9 @@ extern grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_ext
typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import
+typedef int(*grpc_channel_support_connectivity_watcher_type)(grpc_channel *channel);
+extern grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import;
+#define grpc_channel_support_connectivity_watcher grpc_channel_support_connectivity_watcher_import
typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, grpc_slice method, const grpc_slice *host, gpr_timespec deadline, void *reserved);
extern grpc_channel_create_call_type grpc_channel_create_call_import;
#define grpc_channel_create_call grpc_channel_create_call_import