diff options
author | Mark D. Roth <roth@google.com> | 2016-10-14 13:10:54 -0700 |
---|---|---|
committer | Mark D. Roth <roth@google.com> | 2016-10-14 13:10:54 -0700 |
commit | d7ec5e18cebc3d7fcfbc1a187b7407bf9f4ba38a (patch) | |
tree | 2a261657f3bf88314e00978fb6a42b94b1a7c32a /src/core/ext | |
parent | ff08f33e094640caf8e8ce5d8d2a2f82c3cc4f3b (diff) | |
parent | f3d71d21b9d115d8e4fdbe29f019918af09c7b13 (diff) |
Merge remote-tracking branch 'upstream/master' into service_config
Diffstat (limited to 'src/core/ext')
-rw-r--r-- | src/core/ext/client_config/client_channel.c | 22 | ||||
-rw-r--r-- | src/core/ext/client_config/lb_policy.h | 6 | ||||
-rw-r--r-- | src/core/ext/lb_policy/grpclb/grpclb.c | 29 | ||||
-rw-r--r-- | src/core/ext/lb_policy/pick_first/pick_first.c | 12 | ||||
-rw-r--r-- | src/core/ext/lb_policy/round_robin/round_robin.c | 12 | ||||
-rw-r--r-- | src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c | 196 |
6 files changed, 139 insertions, 138 deletions
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c index 0ad9278d4f..beaa9637c3 100644 --- a/src/core/ext/client_config/client_channel.c +++ b/src/core/ext/client_config/client_channel.c @@ -542,10 +542,14 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - call_data *calld = arg; + grpc_call_element *elem = arg; + call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; gpr_mu_lock(&calld->mu); GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); + grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; if (calld->connected_subchannel == NULL) { gpr_atm_no_barrier_store(&calld->subchannel_call, 1); @@ -594,6 +598,9 @@ typedef struct { grpc_closure closure; } continue_picking_args; +/** Return true if subchannel is available immediately (in which case on_ready + should not be called), or false otherwise (in which case on_ready should be + called when the subchannel is available). */ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, @@ -678,8 +685,8 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } // TODO(dgq): make this deadline configurable somehow. const grpc_lb_policy_pick_args inputs = { - calld->pollent, initial_metadata, initial_metadata_flags, - &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)}; + initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem, + gpr_inf_future(GPR_CLOCK_MONOTONIC)}; const bool result = grpc_lb_policy_pick( exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready); GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel"); @@ -721,6 +728,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op *op) { call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; GRPC_CALL_LOG_OP(GPR_INFO, elem, op); grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); /* try to (atomically) get the call */ @@ -788,14 +796,20 @@ retry: calld->connected_subchannel == NULL && op->send_initial_metadata != NULL) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; - grpc_closure_init(&calld->next_step, subchannel_ready, calld); + grpc_closure_init(&calld->next_step, subchannel_ready, elem); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); + /* If a subchannel is not available immediately, the polling entity from + call_data should be provided to channel_data's interested_parties, so + that IO of the lb_policy and resolver could be done under it. */ if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, op->send_initial_metadata_flags, &calld->connected_subchannel, &calld->next_step, GRPC_ERROR_NONE)) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); + } else { + grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); } } /* if we've got a subchannel, then let's ask it to create a call */ diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_config/lb_policy.h index 110d08fcac..de424cd105 100644 --- a/src/core/ext/client_config/lb_policy.h +++ b/src/core/ext/client_config/lb_policy.h @@ -35,7 +35,6 @@ #define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H #include "src/core/ext/client_config/subchannel.h" -#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/transport/connectivity_state.h" /** A load balancing policy: specified by a vtable and a struct (which @@ -55,8 +54,6 @@ struct grpc_lb_policy { /** Extra arguments for an LB pick */ typedef struct grpc_lb_policy_pick_args { - /** Parties interested in the pick's progress */ - grpc_polling_entity *pollent; /** Initial metadata associated with the picking call. */ grpc_metadata_batch *initial_metadata; /** Bitmask used for selective cancelling. See \a @@ -153,7 +150,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy, once the pick is complete with its error argument set to indicate success or failure. - Any I/O should be done under \a pick_args->pollent. */ + Any IO should be done under the \a interested_parties \a grpc_pollset_set + in the \a grpc_lb_policy struct. */ int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, const grpc_lb_policy_pick_args *pick_args, grpc_connected_subchannel **target, void **user_data, diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index a50c632bd7..ed6ab3ce49 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -331,8 +331,8 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx, if (server->port >> 16 != 0) { if (log) { gpr_log(GPR_ERROR, - "Invalid port '%d' at index %zu of serverlist. Ignoring.", - server->port, idx); + "Invalid port '%d' at index %lu of serverlist. Ignoring.", + server->port, (unsigned long)idx); } return false; } @@ -340,9 +340,9 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx, if (ip->size != 4 && ip->size != 16) { if (log) { gpr_log(GPR_ERROR, - "Expected IP to be 4 or 16 bytes, got %d at index %zu of " + "Expected IP to be 4 or 16 bytes, got %d at index %lu of " "serverlist. Ignoring", - ip->size, idx); + ip->size, (unsigned long)idx); } return false; } @@ -461,6 +461,9 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, (intptr_t)glb_policy->rr_policy); } GPR_ASSERT(glb_policy->rr_policy != NULL); + grpc_pollset_set_add_pollset_set(exec_ctx, + glb_policy->rr_policy->interested_parties, + glb_policy->base.interested_parties); glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity( exec_ctx, glb_policy->rr_policy, &error); grpc_lb_policy_notify_on_state_change( @@ -691,8 +694,6 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, &pp->wrapped_on_complete, @@ -724,8 +725,6 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); grpc_exec_ctx_sched( exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); @@ -811,8 +810,6 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, } else { /* else, the pending pick will be registered and taken care of by the * pending pick list inside the RR policy (glb_policy->rr_policy) */ - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - glb_policy->base.interested_parties); add_pending_pick(&glb_policy->pending_picks, pick_args, target, on_complete); @@ -936,7 +933,7 @@ static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) { /* Note the following LB call progresses every time there's activity in \a * glb_policy->base.interested_parties, which is comprised of the polling - * entities passed to glb_pick(). */ + * entities from \a client_channel. */ lb_client->lb_call = grpc_channel_create_pollset_set_call( glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS, glb_policy->base.interested_parties, @@ -1075,8 +1072,8 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { if (serverlist != NULL) { gpr_slice_unref(response_slice); if (grpc_lb_glb_trace) { - gpr_log(GPR_INFO, "Serverlist with %zu servers received", - serverlist->num_servers); + gpr_log(GPR_INFO, "Serverlist with %lu servers received", + (unsigned long)serverlist->num_servers); } /* update serverlist */ @@ -1160,10 +1157,10 @@ static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg, if (grpc_lb_glb_trace) { gpr_log(GPR_INFO, "status from lb server received. Status = %d, Details = '%s', " - "Capaticy " - "= %zu", + "Capacity " + "= %lu", lb_client->status, lb_client->status_details, - lb_client->status_details_capacity); + (unsigned long)lb_client->status_details_capacity); } /* TODO(dgq): deal with stream termination properly (fire up another one? * fail the original call?) */ diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index c0efbe511b..da20b9281b 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -39,7 +39,6 @@ typedef struct pending_pick { struct pending_pick *next; - grpc_polling_entity *pollent; uint32_t initial_metadata_flags; grpc_connected_subchannel **target; grpc_closure *on_complete; @@ -119,8 +118,6 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while (pp != NULL) { pending_pick *next = pp->next; *pp->target = NULL; - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); pp = next; @@ -138,8 +135,6 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -168,8 +163,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched( exec_ctx, pp->on_complete, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); @@ -229,11 +222,8 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (!p->started_picking) { start_picking(exec_ctx, p); } - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - p->base.interested_parties); pp = gpr_malloc(sizeof(*pp)); pp->next = p->pending_picks; - pp->pollent = pick_args->pollent; pp->target = target; pp->initial_metadata_flags = pick_args->initial_metadata_flags; pp->on_complete = on_complete; @@ -319,8 +309,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = selected; - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); } diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 37856c228d..e238dc2922 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -78,9 +78,6 @@ int grpc_lb_round_robin_trace = 0; typedef struct pending_pick { struct pending_pick *next; - /* polling entity for the pick()'s async notification */ - grpc_polling_entity *pollent; - /* output argument where to store the pick()ed user_data. It'll be NULL if no * such data is present or there's an error (the definite test for errors is * \a target being NULL). */ @@ -318,8 +315,6 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -348,8 +343,6 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *pp->target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -422,11 +415,8 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (!p->started_picking) { start_picking(exec_ctx, p); } - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - p->base.interested_parties); pp = gpr_malloc(sizeof(*pp)); pp->next = p->pending_picks; - pp->pollent = pick_args->pollent; pp->target = target; pp->on_complete = on_complete; pp->initial_metadata_flags = pick_args->initial_metadata_flags; @@ -482,8 +472,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)", (void *)selected->subchannel, (void *)selected); } - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); } diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c index da3e284fcf..563271f4f8 100644 --- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c @@ -61,13 +61,12 @@ typedef struct server_secure_state { grpc_server_credentials *creds; bool is_shutdown; gpr_mu mu; - gpr_refcount refcount; - grpc_closure destroy_closure; - grpc_closure *destroy_callback; + grpc_closure tcp_server_shutdown_complete; + grpc_closure *server_destroy_listener_done; } server_secure_state; typedef struct server_secure_connect { - server_secure_state *state; + server_secure_state *server_state; grpc_pollset *accepting_pollset; grpc_tcp_server_acceptor *acceptor; grpc_handshake_manager *handshake_mgr; @@ -77,39 +76,28 @@ typedef struct server_secure_connect { grpc_channel_args *args; } server_secure_connect; -static void state_ref(server_secure_state *state) { gpr_ref(&state->refcount); } - -static void state_unref(server_secure_state *state) { - if (gpr_unref(&state->refcount)) { - /* ensure all threads have unlocked */ - gpr_mu_lock(&state->mu); - gpr_mu_unlock(&state->mu); - /* clean up */ - GRPC_SECURITY_CONNECTOR_UNREF(&state->sc->base, "server"); - grpc_server_credentials_unref(state->creds); - gpr_free(state); - } -} - static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, grpc_security_status status, grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context) { - server_secure_connect *state = statep; + server_secure_connect *connection_state = statep; if (status == GRPC_SECURITY_OK) { if (secure_endpoint) { - gpr_mu_lock(&state->state->mu); - if (!state->state->is_shutdown) { + gpr_mu_lock(&connection_state->server_state->mu); + if (!connection_state->server_state->is_shutdown) { grpc_transport *transport = grpc_create_chttp2_transport( - exec_ctx, grpc_server_get_channel_args(state->state->server), + exec_ctx, grpc_server_get_channel_args( + connection_state->server_state->server), secure_endpoint, 0); grpc_arg args_to_add[2]; - args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds); + args_to_add[0] = grpc_server_credentials_to_arg( + connection_state->server_state->creds); args_to_add[1] = grpc_auth_context_to_arg(auth_context); grpc_channel_args *args_copy = grpc_channel_args_copy_and_add( - state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); - grpc_server_setup_transport(exec_ctx, state->state->server, transport, - state->accepting_pollset, args_copy); + connection_state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); + grpc_server_setup_transport( + exec_ctx, connection_state->server_state->server, transport, + connection_state->accepting_pollset, args_copy); grpc_channel_args_destroy(args_copy); grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL); } else { @@ -117,21 +105,21 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, * gone away. */ grpc_endpoint_destroy(exec_ctx, secure_endpoint); } - gpr_mu_unlock(&state->state->mu); + gpr_mu_unlock(&connection_state->server_state->mu); } } else { gpr_log(GPR_ERROR, "Secure transport failed with error %d", status); } - grpc_channel_args_destroy(state->args); - state_unref(state->state); - gpr_free(state); + grpc_channel_args_destroy(connection_state->args); + grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp); + gpr_free(connection_state); } static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, grpc_channel_args *args, gpr_slice_buffer *read_buffer, void *user_data, grpc_error *error) { - server_secure_connect *state = user_data; + server_secure_connect *connection_state = user_data; if (error != GRPC_ERROR_NONE) { const char *error_str = grpc_error_string(error); gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); @@ -139,81 +127,107 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, GRPC_ERROR_UNREF(error); grpc_channel_args_destroy(args); gpr_free(read_buffer); - grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr); - grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); - state_unref(state->state); - gpr_free(state); + grpc_handshake_manager_shutdown(exec_ctx, connection_state->handshake_mgr); + grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); + grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp); + gpr_free(connection_state); return; } - grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); - state->handshake_mgr = NULL; + grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); + connection_state->handshake_mgr = NULL; // TODO(roth, jboeuf): Convert security connector handshaking to use new // handshake API, and then move the code from on_secure_handshake_done() // into this function. - state->args = args; + connection_state->args = args; grpc_server_security_connector_do_handshake( - exec_ctx, state->state->sc, state->acceptor, endpoint, read_buffer, - state->deadline, on_secure_handshake_done, state); + exec_ctx, connection_state->server_state->sc, connection_state->acceptor, + endpoint, read_buffer, connection_state->deadline, + on_secure_handshake_done, connection_state); } static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { - server_secure_connect *state = gpr_malloc(sizeof(*state)); - state->state = statep; - state_ref(state->state); - state->accepting_pollset = accepting_pollset; - state->acceptor = acceptor; - state->handshake_mgr = grpc_handshake_manager_create(); + server_secure_state *server_state = statep; + server_secure_connect *connection_state = NULL; + gpr_mu_lock(&server_state->mu); + if (server_state->is_shutdown) { + gpr_mu_unlock(&server_state->mu); + grpc_endpoint_destroy(exec_ctx, tcp); + return; + } + gpr_mu_unlock(&server_state->mu); + grpc_tcp_server_ref(server_state->tcp); + connection_state = gpr_malloc(sizeof(*connection_state)); + connection_state->server_state = server_state; + connection_state->accepting_pollset = accepting_pollset; + connection_state->acceptor = acceptor; + connection_state->handshake_mgr = grpc_handshake_manager_create(); // TODO(roth): We should really get this timeout value from channel // args instead of hard-coding it. - state->deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(120, GPR_TIMESPAN)); + connection_state->deadline = gpr_time_add( + gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN)); grpc_handshake_manager_do_handshake( - exec_ctx, state->handshake_mgr, tcp, - grpc_server_get_channel_args(state->state->server), state->deadline, - acceptor, on_handshake_done, state); + exec_ctx, connection_state->handshake_mgr, tcp, + grpc_server_get_channel_args(connection_state->server_state->server), + connection_state->deadline, acceptor, on_handshake_done, + connection_state); } /* Server callback: start listening on our ports */ -static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep, - grpc_pollset **pollsets, size_t pollset_count) { - server_secure_state *state = statep; - grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count, - on_accept, state); +static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server, + void *statep, grpc_pollset **pollsets, + size_t pollset_count) { + server_secure_state *server_state = statep; + gpr_mu_lock(&server_state->mu); + server_state->is_shutdown = false; + gpr_mu_unlock(&server_state->mu); + grpc_tcp_server_start(exec_ctx, server_state->tcp, pollsets, pollset_count, + on_accept, server_state); } -static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, - grpc_error *error) { - server_secure_state *state = statep; - if (state->destroy_callback != NULL) { - state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg, - GRPC_ERROR_REF(error)); +static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *statep, + grpc_error *error) { + server_secure_state *server_state = statep; + /* ensure all threads have unlocked */ + gpr_mu_lock(&server_state->mu); + grpc_closure *destroy_done = server_state->server_destroy_listener_done; + GPR_ASSERT(server_state->is_shutdown); + gpr_mu_unlock(&server_state->mu); + /* clean up */ + grpc_server_security_connector_shutdown(exec_ctx, server_state->sc); + + /* Flush queued work before a synchronous unref. */ + grpc_exec_ctx_flush(exec_ctx); + GRPC_SECURITY_CONNECTOR_UNREF(&server_state->sc->base, "server"); + grpc_server_credentials_unref(server_state->creds); + + if (destroy_done != NULL) { + destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error)); + grpc_exec_ctx_flush(exec_ctx); } - grpc_server_security_connector_shutdown(exec_ctx, state->sc); - state_unref(state); + gpr_free(server_state); } -/* Server callback: destroy the tcp listener (so we don't generate further - callbacks) */ -static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep, - grpc_closure *callback) { - server_secure_state *state = statep; +static void server_destroy_listener(grpc_exec_ctx *exec_ctx, + grpc_server *server, void *statep, + grpc_closure *callback) { + server_secure_state *server_state = statep; grpc_tcp_server *tcp; - gpr_mu_lock(&state->mu); - state->is_shutdown = true; - state->destroy_callback = callback; - tcp = state->tcp; - gpr_mu_unlock(&state->mu); + gpr_mu_lock(&server_state->mu); + server_state->is_shutdown = true; + server_state->server_destroy_listener_done = callback; + tcp = server_state->tcp; + gpr_mu_unlock(&server_state->mu); grpc_tcp_server_shutdown_listeners(exec_ctx, tcp); - grpc_tcp_server_unref(exec_ctx, tcp); + grpc_tcp_server_unref(exec_ctx, server_state->tcp); } int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, grpc_server_credentials *creds) { grpc_resolved_addresses *resolved = NULL; grpc_tcp_server *tcp = NULL; - server_secure_state *state = NULL; + server_secure_state *server_state = NULL; size_t i; size_t count = 0; int port_num = -1; @@ -253,22 +267,22 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, if (err != GRPC_ERROR_NONE) { goto error; } - state = gpr_malloc(sizeof(*state)); - memset(state, 0, sizeof(*state)); - grpc_closure_init(&state->destroy_closure, destroy_done, state); - err = grpc_tcp_server_create(&state->destroy_closure, + server_state = gpr_malloc(sizeof(*server_state)); + memset(server_state, 0, sizeof(*server_state)); + grpc_closure_init(&server_state->tcp_server_shutdown_complete, + tcp_server_shutdown_complete, server_state); + err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete, grpc_server_get_channel_args(server), &tcp); if (err != GRPC_ERROR_NONE) { goto error; } - state->server = server; - state->tcp = tcp; - state->sc = sc; - state->creds = grpc_server_credentials_ref(creds); - state->is_shutdown = false; - gpr_mu_init(&state->mu); - gpr_ref_init(&state->refcount, 1); + server_state->server = server; + server_state->tcp = tcp; + server_state->sc = sc; + server_state->creds = grpc_server_credentials_ref(creds); + server_state->is_shutdown = true; + gpr_mu_init(&server_state->mu); errors = gpr_malloc(sizeof(*errors) * resolved->naddrs); for (i = 0; i < resolved->naddrs; i++) { @@ -313,7 +327,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, grpc_resolved_addresses_destroy(resolved); /* Register with the server only upon success */ - grpc_server_add_listener(&exec_ctx, server, state, start, destroy); + grpc_server_add_listener(&exec_ctx, server, server_state, + server_start_listener, server_destroy_listener); grpc_exec_ctx_finish(&exec_ctx); return port_num; @@ -334,10 +349,11 @@ error: grpc_tcp_server_unref(&exec_ctx, tcp); } else { if (sc) { + grpc_exec_ctx_flush(&exec_ctx); GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server"); } - if (state) { - gpr_free(state); + if (server_state) { + gpr_free(server_state); } } grpc_exec_ctx_finish(&exec_ctx); |