diff options
Diffstat (limited to 'src/core')
20 files changed, 318 insertions, 281 deletions
diff --git a/src/core/README.md b/src/core/README.md index 0d8c0d5bd9..44c6f24772 100644 --- a/src/core/README.md +++ b/src/core/README.md @@ -1,8 +1,4 @@ #Overview -This directory contains source code for shared C library. Libraries in other languages in this repository (C++, Ruby, +This directory contains source code for C library (a.k.a the *gRPC C core*) that provides all gRPC's core functionality through a low level API. Libraries in other languages in this repository (C++, Ruby, Python, PHP, NodeJS, Objective-C) are layered on top of this library. - -#Status - -Beta diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c index a6056c3e8d..cbf79afa17 100644 --- a/src/core/ext/client_config/client_channel.c +++ b/src/core/ext/client_config/client_channel.c @@ -513,10 +513,14 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - call_data *calld = arg; + grpc_call_element *elem = arg; + call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; gpr_mu_lock(&calld->mu); GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); + grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; if (calld->connected_subchannel == NULL) { gpr_atm_no_barrier_store(&calld->subchannel_call, 1); @@ -564,6 +568,9 @@ typedef struct { grpc_closure closure; } continue_picking_args; +/** Return true if subchannel is available immediately (in which case on_ready + should not be called), or false otherwise (in which case on_ready should be + called when the subchannel is available). */ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, @@ -629,8 +636,8 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, gpr_mu_unlock(&chand->mu); // TODO(dgq): make this deadline configurable somehow. const grpc_lb_policy_pick_args inputs = { - calld->pollent, initial_metadata, initial_metadata_flags, - &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)}; + initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem, + gpr_inf_future(GPR_CLOCK_MONOTONIC)}; r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready); GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel"); @@ -672,6 +679,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op *op) { call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; GRPC_CALL_LOG_OP(GPR_INFO, elem, op); grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); /* try to (atomically) get the call */ @@ -739,14 +747,20 @@ retry: calld->connected_subchannel == NULL && op->send_initial_metadata != NULL) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; - grpc_closure_init(&calld->next_step, subchannel_ready, calld); + grpc_closure_init(&calld->next_step, subchannel_ready, elem); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); + /* If a subchannel is not available immediately, the polling entity from + call_data should be provided to channel_data's interested_parties, so + that IO of the lb_policy and resolver could be done under it. */ if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, op->send_initial_metadata_flags, &calld->connected_subchannel, &calld->next_step, GRPC_ERROR_NONE)) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); + } else { + grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); } } /* if we've got a subchannel, then let's ask it to create a call */ diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_config/lb_policy.h index 110d08fcac..de424cd105 100644 --- a/src/core/ext/client_config/lb_policy.h +++ b/src/core/ext/client_config/lb_policy.h @@ -35,7 +35,6 @@ #define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H #include "src/core/ext/client_config/subchannel.h" -#include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/transport/connectivity_state.h" /** A load balancing policy: specified by a vtable and a struct (which @@ -55,8 +54,6 @@ struct grpc_lb_policy { /** Extra arguments for an LB pick */ typedef struct grpc_lb_policy_pick_args { - /** Parties interested in the pick's progress */ - grpc_polling_entity *pollent; /** Initial metadata associated with the picking call. */ grpc_metadata_batch *initial_metadata; /** Bitmask used for selective cancelling. See \a @@ -153,7 +150,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy, once the pick is complete with its error argument set to indicate success or failure. - Any I/O should be done under \a pick_args->pollent. */ + Any IO should be done under the \a interested_parties \a grpc_pollset_set + in the \a grpc_lb_policy struct. */ int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, const grpc_lb_policy_pick_args *pick_args, grpc_connected_subchannel **target, void **user_data, diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index ae1f2a3b4c..9af92b787d 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -69,8 +69,8 @@ * possible scenarios: * * 1. This is the first server list received. There was no previous instance of - * the Round Robin policy. \a rr_handover() will instantiate the RR policy - * and perform all the pending operations over it. + * the Round Robin policy. \a rr_handover_locked() will instantiate the RR + * policy and perform all the pending operations over it. * 2. There's already a RR policy instance active. We need to introduce the new * one build from the new serverlist, but taking care not to disrupt the * operations in progress over the old RR instance. This is done by @@ -78,7 +78,7 @@ * references are held on the old RR policy, it'll be destroyed and \a * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN * state. At this point we can transition to a new RR instance safely, which - * is done once again via \a rr_handover(). + * is done once again via \a rr_handover_locked(). * * * Once a RR policy instance is in place (and getting updated as described), @@ -86,8 +86,8 @@ * forwarding them to the RR instance. Any time there's no RR policy available * (ie, right after the creation of the gRPCLB policy, if an empty serverlist * is received, etc), pick/ping requests are added to a list of pending - * picks/pings to be flushed and serviced as part of \a rr_handover() the moment - * the RR policy instance becomes available. + * picks/pings to be flushed and serviced as part of \a rr_handover_locked() the + * moment the RR policy instance becomes available. * * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the * high level design and details. */ @@ -134,6 +134,9 @@ static void initial_metadata_add_lb_token( } typedef struct wrapped_rr_closure_arg { + /* the closure instance using this struct as argument */ + grpc_closure wrapper_closure; + /* the original closure. Usually a on_complete/notify cb for pick() and ping() * calls against the internal RR instance, respectively. */ grpc_closure *wrapped_closure; @@ -155,9 +158,8 @@ typedef struct wrapped_rr_closure_arg { /* The RR instance related to the closure */ grpc_lb_policy *rr_policy; - /* when not NULL, represents a pending_{pick,ping} node to be freed upon - * closure execution */ - void *owning_pending_node; /* to be freed if not NULL */ + /* heap memory to be freed upon closure execution. */ + void *free_when_done; } wrapped_rr_closure_arg; /* The \a on_complete closure passed as part of the pick requires keeping a @@ -183,10 +185,10 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg, } } GPR_ASSERT(wc_arg->wrapped_closure != NULL); - grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error), NULL); - gpr_free(wc_arg->owning_pending_node); + GPR_ASSERT(wc_arg->free_when_done != NULL); + gpr_free(wc_arg->free_when_done); } /* Linked list of pending pick requests. It stores all information needed to @@ -207,10 +209,6 @@ typedef struct pending_pick { * upon error. */ grpc_connected_subchannel **target; - /* a closure wrapping the original on_complete one to be invoked once the - * pick() has completed (regardless of success) */ - grpc_closure wrapped_on_complete; - /* args for wrapped_on_complete */ wrapped_rr_closure_arg wrapped_on_complete_arg; } pending_pick; @@ -230,8 +228,9 @@ static void add_pending_pick(pending_pick **root, pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata; pp->wrapped_on_complete_arg.lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; - grpc_closure_init(&pp->wrapped_on_complete, wrapped_rr_closure, - &pp->wrapped_on_complete_arg); + pp->wrapped_on_complete_arg.free_when_done = pp; + grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure, + wrapped_rr_closure, &pp->wrapped_on_complete_arg); *root = pp; } @@ -239,10 +238,6 @@ static void add_pending_pick(pending_pick **root, typedef struct pending_ping { struct pending_ping *next; - /* a closure wrapping the original on_complete one to be invoked once the - * ping() has completed (regardless of success) */ - grpc_closure wrapped_notify; - /* args for wrapped_notify */ wrapped_rr_closure_arg wrapped_notify_arg; } pending_ping; @@ -251,10 +246,11 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) { pending_ping *pping = gpr_malloc(sizeof(*pping)); memset(pping, 0, sizeof(pending_ping)); memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg)); - pping->next = *root; - grpc_closure_init(&pping->wrapped_notify, wrapped_rr_closure, - &pping->wrapped_notify_arg); pping->wrapped_notify_arg.wrapped_closure = notify; + pping->wrapped_notify_arg.free_when_done = pping; + pping->next = *root; + grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure, + wrapped_rr_closure, &pping->wrapped_notify_arg); *root = pping; } @@ -307,13 +303,6 @@ typedef struct glb_lb_policy { /** for tracking of the RR connectivity */ rr_connectivity_data *rr_connectivity; - - /* a wrapped (see \a wrapped_rr_closure) on-complete closure for readily - * available RR picks */ - grpc_closure wrapped_on_complete; - - /* arguments for the wrapped_on_complete closure */ - wrapped_rr_closure_arg wc_arg; } glb_lb_policy; /* Keeps track and reacts to changes in connectivity of the RR instance */ @@ -329,8 +318,8 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx, if (server->port >> 16 != 0) { if (log) { gpr_log(GPR_ERROR, - "Invalid port '%d' at index %zu of serverlist. Ignoring.", - server->port, idx); + "Invalid port '%d' at index %lu of serverlist. Ignoring.", + server->port, (unsigned long)idx); } return false; } @@ -338,9 +327,9 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx, if (ip->size != 4 && ip->size != 16) { if (log) { gpr_log(GPR_ERROR, - "Expected IP to be 4 or 16 bytes, got %d at index %zu of " + "Expected IP to be 4 or 16 bytes, got %d at index %lu of " "serverlist. Ignoring", - ip->size, idx); + ip->size, (unsigned long)idx); } return false; } @@ -399,14 +388,14 @@ static grpc_lb_addresses *process_serverlist( GPR_ARRAY_SIZE(server->load_balance_token) - 1; grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer( (uint8_t *)server->load_balance_token, lb_token_size); - user_data = grpc_mdelem_from_metadata_strings( - GRPC_MDSTR_LOAD_REPORTING_INITIAL, lb_token_mdstr); + user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN, + lb_token_mdstr); } else { gpr_log(GPR_ERROR, "Missing LB token for backend address '%s'. The empty token will " "be used instead", grpc_sockaddr_to_uri((struct sockaddr *)&addr.addr)); - user_data = GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY; + user_data = GRPC_MDELEM_LB_TOKEN_EMPTY; } grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len, @@ -424,9 +413,43 @@ static void lb_token_destroy(void *token) { if (token != NULL) GRPC_MDELEM_UNREF(token); } -static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx, - const grpc_grpclb_serverlist *serverlist, - glb_lb_policy *glb_policy) { +/* perform a pick over \a rr_policy. Given that a pick can return immediately + * (ignoring its completion callback) we need to perform the cleanups this + * callback would be otherwise resposible for */ +static bool pick_from_internal_rr_locked( + grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy, + const grpc_lb_policy_pick_args *pick_args, + grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) { + GPR_ASSERT(rr_policy != NULL); + const bool pick_done = + grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target, + (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure); + if (pick_done) { + /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ + if (grpc_lb_glb_trace) { + gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")", + (intptr_t)wc_arg->rr_policy); + } + GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick"); + + /* add the load reporting initial metadata */ + initial_metadata_add_lb_token(pick_args->initial_metadata, + pick_args->lb_token_mdelem_storage, + GRPC_MDELEM_REF(wc_arg->lb_token)); + + gpr_free(wc_arg); + } + /* else, the pending pick will be registered and taken care of by the + * pending pick list inside the RR policy (glb_policy->rr_policy). + * Eventually, wrapped_on_complete will be called, which will -among other + * things- add the LB token to the call's initial metadata */ + + return pick_done; +} + +static grpc_lb_policy *create_rr_locked( + grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist, + glb_lb_policy *glb_policy) { GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0); grpc_lb_policy_args args; @@ -446,18 +469,21 @@ static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx, return rr; } -static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, - grpc_error *error) { +static void rr_handover_locked(grpc_exec_ctx *exec_ctx, + glb_lb_policy *glb_policy, grpc_error *error) { GPR_ASSERT(glb_policy->serverlist != NULL && glb_policy->serverlist->num_servers > 0); glb_policy->rr_policy = - create_rr(exec_ctx, glb_policy->serverlist, glb_policy); + create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy); if (grpc_lb_glb_trace) { gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")", (intptr_t)glb_policy->rr_policy); } GPR_ASSERT(glb_policy->rr_policy != NULL); + grpc_pollset_set_add_pollset_set(exec_ctx, + glb_policy->rr_policy->interested_parties, + glb_policy->base.interested_parties); glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity( exec_ctx, glb_policy->rr_policy, &error); grpc_lb_policy_notify_on_state_change( @@ -478,11 +504,9 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "", (intptr_t)glb_policy->rr_policy); } - grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pp->pick_args, - pp->target, - (void **)&pp->wrapped_on_complete_arg.lb_token, - &pp->wrapped_on_complete); - pp->wrapped_on_complete_arg.owning_pending_node = pp; + pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy, + &pp->pick_args, pp->target, + &pp->wrapped_on_complete_arg); } pending_ping *pping; @@ -495,8 +519,7 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, (intptr_t)glb_policy->rr_policy); } grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy, - &pping->wrapped_notify); - pping->wrapped_notify_arg.owning_pending_node = pping; + &pping->wrapped_notify_arg.wrapper_closure); } } @@ -509,13 +532,16 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, if (glb_policy->serverlist != NULL) { /* a RR policy is shutting down but there's a serverlist available -> * perform a handover */ - rr_handover(exec_ctx, glb_policy, error); + gpr_mu_lock(&glb_policy->mu); + rr_handover_locked(exec_ctx, glb_policy, error); + gpr_mu_unlock(&glb_policy->mu); } else { /* shutting down and no new serverlist available. Bail out. */ gpr_free(rr_conn_data); } } else { if (error == GRPC_ERROR_NONE) { + gpr_mu_lock(&glb_policy->mu); /* RR not shutting down. Mimic the RR's policy state */ grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_conn_data->state, GRPC_ERROR_REF(error), @@ -524,6 +550,7 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy, &rr_conn_data->state, &rr_conn_data->on_change); + gpr_mu_unlock(&glb_policy->mu); } else { /* error */ gpr_free(rr_conn_data); } @@ -648,15 +675,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while (pp != NULL) { pending_pick *next = pp->next; *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_NONE, - NULL); + grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, + GRPC_ERROR_NONE, NULL); pp = next; } while (pping != NULL) { pending_ping *next = pping->next; - grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify, GRPC_ERROR_NONE, - NULL); + grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, + GRPC_ERROR_NONE, NULL); pping = next; } @@ -686,11 +713,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( - exec_ctx, &pp->wrapped_on_complete, + exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); } else { pp->next = glb_policy->pending_picks; @@ -719,10 +744,8 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); grpc_exec_ctx_sched( - exec_ctx, &pp->wrapped_on_complete, + exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); } else { pp->next = glb_policy->pending_picks; @@ -775,39 +798,20 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, (intptr_t)glb_policy->rr_policy); } GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick"); - memset(&glb_policy->wc_arg, 0, sizeof(wrapped_rr_closure_arg)); - glb_policy->wc_arg.rr_policy = glb_policy->rr_policy; - glb_policy->wc_arg.target = target; - glb_policy->wc_arg.wrapped_closure = on_complete; - glb_policy->wc_arg.lb_token_mdelem_storage = - pick_args->lb_token_mdelem_storage; - glb_policy->wc_arg.initial_metadata = pick_args->initial_metadata; - glb_policy->wc_arg.owning_pending_node = NULL; - grpc_closure_init(&glb_policy->wrapped_on_complete, wrapped_rr_closure, - &glb_policy->wc_arg); - - pick_done = - grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target, - (void **)&glb_policy->wc_arg.lb_token, - &glb_policy->wrapped_on_complete); - if (pick_done) { - /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ - if (grpc_lb_glb_trace) { - gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")", - (intptr_t)glb_policy->wc_arg.rr_policy); - } - GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->wc_arg.rr_policy, "glb_pick"); - /* add the load reporting initial metadata */ - initial_metadata_add_lb_token( - pick_args->initial_metadata, pick_args->lb_token_mdelem_storage, - GRPC_MDELEM_REF(glb_policy->wc_arg.lb_token)); - } + wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg)); + memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg)); + + grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg); + wc_arg->rr_policy = glb_policy->rr_policy; + wc_arg->target = target; + wc_arg->wrapped_closure = on_complete; + wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; + wc_arg->initial_metadata = pick_args->initial_metadata; + wc_arg->free_when_done = wc_arg; + pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy, + pick_args, target, wc_arg); } else { - /* else, the pending pick will be registered and taken care of by the - * pending pick list inside the RR policy (glb_policy->rr_policy) */ - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - glb_policy->base.interested_parties); add_pending_pick(&glb_policy->pending_picks, pick_args, target, on_complete); @@ -931,7 +935,7 @@ static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) { /* Note the following LB call progresses every time there's activity in \a * glb_policy->base.interested_parties, which is comprised of the polling - * entities passed to glb_pick(). */ + * entities from \a client_channel. */ lb_client->lb_call = grpc_channel_create_pollset_set_call( glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS, glb_policy->base.interested_parties, @@ -1070,12 +1074,13 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { if (serverlist != NULL) { gpr_slice_unref(response_slice); if (grpc_lb_glb_trace) { - gpr_log(GPR_INFO, "Serverlist with %zu servers received", - serverlist->num_servers); + gpr_log(GPR_INFO, "Serverlist with %lu servers received", + (unsigned long)serverlist->num_servers); } /* update serverlist */ if (serverlist->num_servers > 0) { + gpr_mu_lock(&lb_client->glb_policy->mu); if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist, serverlist)) { if (grpc_lb_glb_trace) { @@ -1093,7 +1098,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { if (lb_client->glb_policy->rr_policy == NULL) { /* initial "handover", in this case from a null RR policy, meaning * it'll just create the first RR policy instance */ - rr_handover(exec_ctx, lb_client->glb_policy, error); + rr_handover_locked(exec_ctx, lb_client->glb_policy, error); } else { /* unref the RR policy, eventually leading to its substitution with a * new one constructed from the received serverlist (see @@ -1101,6 +1106,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy, "serverlist_received"); } + gpr_mu_unlock(&lb_client->glb_policy->mu); } else { if (grpc_lb_glb_trace) { gpr_log(GPR_INFO, @@ -1155,10 +1161,10 @@ static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg, if (grpc_lb_glb_trace) { gpr_log(GPR_INFO, "status from lb server received. Status = %d, Details = '%s', " - "Capaticy " - "= %zu", + "Capacity " + "= %lu", lb_client->status, lb_client->status_details, - lb_client->status_details_capacity); + (unsigned long)lb_client->status_details_capacity); } /* TODO(dgq): deal with stream termination properly (fire up another one? * fail the original call?) */ diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index 961a0c9b19..6533327343 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -39,7 +39,6 @@ typedef struct pending_pick { struct pending_pick *next; - grpc_polling_entity *pollent; uint32_t initial_metadata_flags; grpc_connected_subchannel **target; grpc_closure *on_complete; @@ -119,8 +118,6 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { while (pp != NULL) { pending_pick *next = pp->next; *pp->target = NULL; - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); pp = next; @@ -138,8 +135,6 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -168,8 +163,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched( exec_ctx, pp->on_complete, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); @@ -229,11 +222,8 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (!p->started_picking) { start_picking(exec_ctx, p); } - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - p->base.interested_parties); pp = gpr_malloc(sizeof(*pp)); pp->next = p->pending_picks; - pp->pollent = pick_args->pollent; pp->target = target; pp->initial_metadata_flags = pick_args->initial_metadata_flags; pp->on_complete = on_complete; @@ -319,8 +309,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, while ((pp = p->pending_picks)) { p->pending_picks = pp->next; *pp->target = selected; - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); } diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 930fa86aca..9bd3f9da24 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -78,9 +78,6 @@ int grpc_lb_round_robin_trace = 0; typedef struct pending_pick { struct pending_pick *next; - /* polling entity for the pick()'s async notification */ - grpc_polling_entity *pollent; - /* output argument where to store the pick()ed user_data. It'll be NULL if no * such data is present or there's an error (the definite test for errors is * \a target being NULL). */ @@ -318,8 +315,6 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, while (pp != NULL) { pending_pick *next = pp->next; if (pp->target == target) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -348,8 +343,6 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); *pp->target = NULL; grpc_exec_ctx_sched( exec_ctx, pp->on_complete, @@ -403,7 +396,6 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, gpr_mu_lock(&p->mu); if ((selected = peek_next_connected_locked(p))) { /* readily available, report right away */ - gpr_mu_unlock(&p->mu); *target = grpc_subchannel_get_connected_subchannel(selected->subchannel); if (user_data != NULL) { @@ -416,17 +408,15 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, } /* only advance the last picked pointer if the selection was used */ advance_last_picked_locked(p); + gpr_mu_unlock(&p->mu); return 1; } else { /* no pick currently available. Save for later in list of pending picks */ if (!p->started_picking) { start_picking(exec_ctx, p); } - grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, - p->base.interested_parties); pp = gpr_malloc(sizeof(*pp)); pp->next = p->pending_picks; - pp->pollent = pick_args->pollent; pp->target = target; pp->on_complete = on_complete; pp->initial_metadata_flags = pick_args->initial_metadata_flags; @@ -482,8 +472,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)", (void *)selected->subchannel, (void *)selected); } - grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, - p->base.interested_parties); grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL); gpr_free(pp); } diff --git a/src/core/ext/load_reporting/load_reporting.h b/src/core/ext/load_reporting/load_reporting.h index e37817d8c2..e13097654d 100644 --- a/src/core/ext/load_reporting/load_reporting.h +++ b/src/core/ext/load_reporting/load_reporting.h @@ -37,13 +37,21 @@ #include <grpc/impl/codegen/grpc_types.h> #include "src/core/lib/channel/channel_stack.h" -/** Metadata key for initial metadata coming from clients */ -/* TODO(dgq): change to the final value TBD */ -#define GRPC_LOAD_REPORTING_INITIAL_MD_KEY "load-reporting-initial" +/** Metadata key for the gRPC LB load balancer token. + * + * The value corresponding to this key is an opaque token that is given to the + * frontend as part of each pick; the frontend sends this token to the backend + * in each request it sends when using that pick. The token is used by the + * backend to verify the request and to allow the backend to report load to the + * gRPC LB system. */ +#define GRPC_LB_TOKEN_MD_KEY "lb-token" -/** Metadata key for trailing metadata from servers */ -/* TODO(dgq): change to the final value TBD */ -#define GRPC_LOAD_REPORTING_TRAILING_MD_KEY "load-reporting-trailing" +/** Metadata key for gRPC LB cost reporting. + * + * The value corresponding to this key is an opaque binary blob reported by the + * backend as part of its trailing metadata containing cost information for the + * call. */ +#define GRPC_LB_COST_MD_KEY "lb-cost" /** Identifiers for the invocation point of the users LR callback */ typedef enum grpc_load_reporting_source { diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c index 394f0cb832..22bf36367f 100644 --- a/src/core/ext/load_reporting/load_reporting_filter.c +++ b/src/core/ext/load_reporting/load_reporting_filter.c @@ -75,7 +75,7 @@ static grpc_mdelem *recv_md_filter(void *user_data, grpc_mdelem *md) { if (md->key == GRPC_MDSTR_PATH) { calld->service_method = grpc_mdstr_as_c_string(md->value); - } else if (md->key == GRPC_MDSTR_LOAD_REPORTING_INITIAL) { + } else if (md->key == GRPC_MDSTR_LB_TOKEN) { calld->initial_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value)); return NULL; } @@ -193,7 +193,7 @@ static grpc_mdelem *lr_trailing_md_filter(void *user_data, grpc_mdelem *md) { grpc_call_element *elem = user_data; call_data *calld = elem->call_data; - if (md->key == GRPC_MDSTR_LOAD_REPORTING_TRAILING) { + if (md->key == GRPC_MDSTR_LB_COST) { calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value)); return NULL; } diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c index da3e284fcf..563271f4f8 100644 --- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c @@ -61,13 +61,12 @@ typedef struct server_secure_state { grpc_server_credentials *creds; bool is_shutdown; gpr_mu mu; - gpr_refcount refcount; - grpc_closure destroy_closure; - grpc_closure *destroy_callback; + grpc_closure tcp_server_shutdown_complete; + grpc_closure *server_destroy_listener_done; } server_secure_state; typedef struct server_secure_connect { - server_secure_state *state; + server_secure_state *server_state; grpc_pollset *accepting_pollset; grpc_tcp_server_acceptor *acceptor; grpc_handshake_manager *handshake_mgr; @@ -77,39 +76,28 @@ typedef struct server_secure_connect { grpc_channel_args *args; } server_secure_connect; -static void state_ref(server_secure_state *state) { gpr_ref(&state->refcount); } - -static void state_unref(server_secure_state *state) { - if (gpr_unref(&state->refcount)) { - /* ensure all threads have unlocked */ - gpr_mu_lock(&state->mu); - gpr_mu_unlock(&state->mu); - /* clean up */ - GRPC_SECURITY_CONNECTOR_UNREF(&state->sc->base, "server"); - grpc_server_credentials_unref(state->creds); - gpr_free(state); - } -} - static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, grpc_security_status status, grpc_endpoint *secure_endpoint, grpc_auth_context *auth_context) { - server_secure_connect *state = statep; + server_secure_connect *connection_state = statep; if (status == GRPC_SECURITY_OK) { if (secure_endpoint) { - gpr_mu_lock(&state->state->mu); - if (!state->state->is_shutdown) { + gpr_mu_lock(&connection_state->server_state->mu); + if (!connection_state->server_state->is_shutdown) { grpc_transport *transport = grpc_create_chttp2_transport( - exec_ctx, grpc_server_get_channel_args(state->state->server), + exec_ctx, grpc_server_get_channel_args( + connection_state->server_state->server), secure_endpoint, 0); grpc_arg args_to_add[2]; - args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds); + args_to_add[0] = grpc_server_credentials_to_arg( + connection_state->server_state->creds); args_to_add[1] = grpc_auth_context_to_arg(auth_context); grpc_channel_args *args_copy = grpc_channel_args_copy_and_add( - state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); - grpc_server_setup_transport(exec_ctx, state->state->server, transport, - state->accepting_pollset, args_copy); + connection_state->args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); + grpc_server_setup_transport( + exec_ctx, connection_state->server_state->server, transport, + connection_state->accepting_pollset, args_copy); grpc_channel_args_destroy(args_copy); grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL); } else { @@ -117,21 +105,21 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep, * gone away. */ grpc_endpoint_destroy(exec_ctx, secure_endpoint); } - gpr_mu_unlock(&state->state->mu); + gpr_mu_unlock(&connection_state->server_state->mu); } } else { gpr_log(GPR_ERROR, "Secure transport failed with error %d", status); } - grpc_channel_args_destroy(state->args); - state_unref(state->state); - gpr_free(state); + grpc_channel_args_destroy(connection_state->args); + grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp); + gpr_free(connection_state); } static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, grpc_channel_args *args, gpr_slice_buffer *read_buffer, void *user_data, grpc_error *error) { - server_secure_connect *state = user_data; + server_secure_connect *connection_state = user_data; if (error != GRPC_ERROR_NONE) { const char *error_str = grpc_error_string(error); gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); @@ -139,81 +127,107 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, grpc_endpoint *endpoint, GRPC_ERROR_UNREF(error); grpc_channel_args_destroy(args); gpr_free(read_buffer); - grpc_handshake_manager_shutdown(exec_ctx, state->handshake_mgr); - grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); - state_unref(state->state); - gpr_free(state); + grpc_handshake_manager_shutdown(exec_ctx, connection_state->handshake_mgr); + grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); + grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp); + gpr_free(connection_state); return; } - grpc_handshake_manager_destroy(exec_ctx, state->handshake_mgr); - state->handshake_mgr = NULL; + grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); + connection_state->handshake_mgr = NULL; // TODO(roth, jboeuf): Convert security connector handshaking to use new // handshake API, and then move the code from on_secure_handshake_done() // into this function. - state->args = args; + connection_state->args = args; grpc_server_security_connector_do_handshake( - exec_ctx, state->state->sc, state->acceptor, endpoint, read_buffer, - state->deadline, on_secure_handshake_done, state); + exec_ctx, connection_state->server_state->sc, connection_state->acceptor, + endpoint, read_buffer, connection_state->deadline, + on_secure_handshake_done, connection_state); } static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { - server_secure_connect *state = gpr_malloc(sizeof(*state)); - state->state = statep; - state_ref(state->state); - state->accepting_pollset = accepting_pollset; - state->acceptor = acceptor; - state->handshake_mgr = grpc_handshake_manager_create(); + server_secure_state *server_state = statep; + server_secure_connect *connection_state = NULL; + gpr_mu_lock(&server_state->mu); + if (server_state->is_shutdown) { + gpr_mu_unlock(&server_state->mu); + grpc_endpoint_destroy(exec_ctx, tcp); + return; + } + gpr_mu_unlock(&server_state->mu); + grpc_tcp_server_ref(server_state->tcp); + connection_state = gpr_malloc(sizeof(*connection_state)); + connection_state->server_state = server_state; + connection_state->accepting_pollset = accepting_pollset; + connection_state->acceptor = acceptor; + connection_state->handshake_mgr = grpc_handshake_manager_create(); // TODO(roth): We should really get this timeout value from channel // args instead of hard-coding it. - state->deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_seconds(120, GPR_TIMESPAN)); + connection_state->deadline = gpr_time_add( + gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN)); grpc_handshake_manager_do_handshake( - exec_ctx, state->handshake_mgr, tcp, - grpc_server_get_channel_args(state->state->server), state->deadline, - acceptor, on_handshake_done, state); + exec_ctx, connection_state->handshake_mgr, tcp, + grpc_server_get_channel_args(connection_state->server_state->server), + connection_state->deadline, acceptor, on_handshake_done, + connection_state); } /* Server callback: start listening on our ports */ -static void start(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep, - grpc_pollset **pollsets, size_t pollset_count) { - server_secure_state *state = statep; - grpc_tcp_server_start(exec_ctx, state->tcp, pollsets, pollset_count, - on_accept, state); +static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server, + void *statep, grpc_pollset **pollsets, + size_t pollset_count) { + server_secure_state *server_state = statep; + gpr_mu_lock(&server_state->mu); + server_state->is_shutdown = false; + gpr_mu_unlock(&server_state->mu); + grpc_tcp_server_start(exec_ctx, server_state->tcp, pollsets, pollset_count, + on_accept, server_state); } -static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, - grpc_error *error) { - server_secure_state *state = statep; - if (state->destroy_callback != NULL) { - state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg, - GRPC_ERROR_REF(error)); +static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *statep, + grpc_error *error) { + server_secure_state *server_state = statep; + /* ensure all threads have unlocked */ + gpr_mu_lock(&server_state->mu); + grpc_closure *destroy_done = server_state->server_destroy_listener_done; + GPR_ASSERT(server_state->is_shutdown); + gpr_mu_unlock(&server_state->mu); + /* clean up */ + grpc_server_security_connector_shutdown(exec_ctx, server_state->sc); + + /* Flush queued work before a synchronous unref. */ + grpc_exec_ctx_flush(exec_ctx); + GRPC_SECURITY_CONNECTOR_UNREF(&server_state->sc->base, "server"); + grpc_server_credentials_unref(server_state->creds); + + if (destroy_done != NULL) { + destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error)); + grpc_exec_ctx_flush(exec_ctx); } - grpc_server_security_connector_shutdown(exec_ctx, state->sc); - state_unref(state); + gpr_free(server_state); } -/* Server callback: destroy the tcp listener (so we don't generate further - callbacks) */ -static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *statep, - grpc_closure *callback) { - server_secure_state *state = statep; +static void server_destroy_listener(grpc_exec_ctx *exec_ctx, + grpc_server *server, void *statep, + grpc_closure *callback) { + server_secure_state *server_state = statep; grpc_tcp_server *tcp; - gpr_mu_lock(&state->mu); - state->is_shutdown = true; - state->destroy_callback = callback; - tcp = state->tcp; - gpr_mu_unlock(&state->mu); + gpr_mu_lock(&server_state->mu); + server_state->is_shutdown = true; + server_state->server_destroy_listener_done = callback; + tcp = server_state->tcp; + gpr_mu_unlock(&server_state->mu); grpc_tcp_server_shutdown_listeners(exec_ctx, tcp); - grpc_tcp_server_unref(exec_ctx, tcp); + grpc_tcp_server_unref(exec_ctx, server_state->tcp); } int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, grpc_server_credentials *creds) { grpc_resolved_addresses *resolved = NULL; grpc_tcp_server *tcp = NULL; - server_secure_state *state = NULL; + server_secure_state *server_state = NULL; size_t i; size_t count = 0; int port_num = -1; @@ -253,22 +267,22 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, if (err != GRPC_ERROR_NONE) { goto error; } - state = gpr_malloc(sizeof(*state)); - memset(state, 0, sizeof(*state)); - grpc_closure_init(&state->destroy_closure, destroy_done, state); - err = grpc_tcp_server_create(&state->destroy_closure, + server_state = gpr_malloc(sizeof(*server_state)); + memset(server_state, 0, sizeof(*server_state)); + grpc_closure_init(&server_state->tcp_server_shutdown_complete, + tcp_server_shutdown_complete, server_state); + err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete, grpc_server_get_channel_args(server), &tcp); if (err != GRPC_ERROR_NONE) { goto error; } - state->server = server; - state->tcp = tcp; - state->sc = sc; - state->creds = grpc_server_credentials_ref(creds); - state->is_shutdown = false; - gpr_mu_init(&state->mu); - gpr_ref_init(&state->refcount, 1); + server_state->server = server; + server_state->tcp = tcp; + server_state->sc = sc; + server_state->creds = grpc_server_credentials_ref(creds); + server_state->is_shutdown = true; + gpr_mu_init(&server_state->mu); errors = gpr_malloc(sizeof(*errors) * resolved->naddrs); for (i = 0; i < resolved->naddrs; i++) { @@ -313,7 +327,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, grpc_resolved_addresses_destroy(resolved); /* Register with the server only upon success */ - grpc_server_add_listener(&exec_ctx, server, state, start, destroy); + grpc_server_add_listener(&exec_ctx, server, server_state, + server_start_listener, server_destroy_listener); grpc_exec_ctx_finish(&exec_ctx); return port_num; @@ -334,10 +349,11 @@ error: grpc_tcp_server_unref(&exec_ctx, tcp); } else { if (sc) { + grpc_exec_ctx_flush(&exec_ctx); GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server"); } - if (state) { - gpr_free(state); + if (server_state) { + gpr_free(server_state); } } grpc_exec_ctx_finish(&exec_ctx); diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h index 5a25d39a0c..9a390699b4 100644 --- a/src/core/lib/iomgr/tcp_server.h +++ b/src/core/lib/iomgr/tcp_server.h @@ -101,8 +101,8 @@ grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s); void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, grpc_closure *shutdown_starting); -/* If the refcount drops to zero, delete s, and call (exec_ctx==NULL) or enqueue - a call (exec_ctx!=NULL) to shutdown_complete. */ +/* If the refcount drops to zero, enqueue calls on exec_ctx to + shutdown_listeners and delete s. */ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s); /* Shutdown the fds of listeners. */ diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 2d3f6cf9a7..73df5477e6 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -191,6 +191,9 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete, } static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { + gpr_mu_lock(&s->mu); + GPR_ASSERT(s->shutdown); + gpr_mu_unlock(&s->mu); if (s->shutdown_complete != NULL) { grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL); } @@ -652,6 +655,7 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, unsigned port_index) { unsigned num_fds = 0; grpc_tcp_listener *sp; + gpr_mu_lock(&s->mu); for (sp = s->head; sp && port_index != 0; sp = sp->next) { if (!sp->is_sibling) { --port_index; @@ -659,12 +663,15 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, } for (; sp; sp = sp->sibling, ++num_fds) ; + gpr_mu_unlock(&s->mu); return num_fds; } int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, unsigned fd_index) { grpc_tcp_listener *sp; + int fd; + gpr_mu_lock(&s->mu); for (sp = s->head; sp && port_index != 0; sp = sp->next) { if (!sp->is_sibling) { --port_index; @@ -673,10 +680,12 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, for (; sp && fd_index != 0; sp = sp->sibling, --fd_index) ; if (sp) { - return sp->fd; + fd = sp->fd; } else { - return -1; + fd = -1; } + gpr_mu_unlock(&s->mu); + return fd; } void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, @@ -722,7 +731,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { - gpr_ref(&s->refs); + gpr_ref_non_zero(&s->refs); return s; } @@ -736,19 +745,11 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { - /* Complete shutdown_starting work before destroying. */ - grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_tcp_server_shutdown_listeners(exec_ctx, s); gpr_mu_lock(&s->mu); - grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL); + grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); gpr_mu_unlock(&s->mu); - if (exec_ctx == NULL) { - grpc_exec_ctx_flush(&local_exec_ctx); - tcp_server_destroy(&local_exec_ctx, s); - grpc_exec_ctx_finish(&local_exec_ctx); - } else { - grpc_exec_ctx_finish(&local_exec_ctx); - tcp_server_destroy(exec_ctx, s); - } + tcp_server_destroy(exec_ctx, s); } } diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index 1b125e7005..4ff05601fa 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -139,7 +139,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { - gpr_ref(&s->refs); + gpr_ref_non_zero(&s->refs); return s; } @@ -174,19 +174,11 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { if (gpr_unref(&s->refs)) { - /* Complete shutdown_starting work before destroying. */ - grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_tcp_server_shutdown_listeners(exec_ctx, s); gpr_mu_lock(&s->mu); - grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL); + grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL); gpr_mu_unlock(&s->mu); - if (exec_ctx == NULL) { - grpc_exec_ctx_flush(&local_exec_ctx); - tcp_server_destroy(&local_exec_ctx, s); - grpc_exec_ctx_finish(&local_exec_ctx); - } else { - grpc_exec_ctx_finish(&local_exec_ctx); - tcp_server_destroy(exec_ctx, s); - } + tcp_server_destroy(exec_ctx, s); } } diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c index 850e41e646..d55d00b7b6 100644 --- a/src/core/lib/security/credentials/composite/composite_credentials.c +++ b/src/core/lib/security/credentials/composite/composite_credentials.c @@ -242,8 +242,17 @@ static grpc_security_status composite_channel_create_security_connector( return status; } +static grpc_channel_credentials * +composite_channel_duplicate_without_call_credentials( + grpc_channel_credentials *creds) { + grpc_composite_channel_credentials *c = + (grpc_composite_channel_credentials *)creds; + return grpc_channel_credentials_ref(c->inner_creds); +} + static grpc_channel_credentials_vtable composite_channel_credentials_vtable = { - composite_channel_destruct, composite_channel_create_security_connector}; + composite_channel_destruct, composite_channel_create_security_connector, + composite_channel_duplicate_without_call_credentials}; grpc_channel_credentials *grpc_composite_channel_credentials_create( grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h index 0d8966f464..f8425c2b76 100644 --- a/src/core/lib/security/credentials/composite/composite_credentials.h +++ b/src/core/lib/security/credentials/composite/composite_credentials.h @@ -53,7 +53,7 @@ grpc_call_credentials *grpc_credentials_contains_type( grpc_call_credentials *creds, const char *type, grpc_call_credentials **composite_creds); -/* -- Channel composite credentials. -- */ +/* -- Composite channel credentials. -- */ typedef struct { grpc_channel_credentials base; @@ -61,7 +61,7 @@ typedef struct { grpc_call_credentials *call_creds; } grpc_composite_channel_credentials; -/* -- Composite credentials. -- */ +/* -- Composite call credentials. -- */ typedef struct { grpc_call_credentials base; diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c index 029a357261..1149e5c2ed 100644 --- a/src/core/lib/security/credentials/credentials.c +++ b/src/core/lib/security/credentials/credentials.c @@ -138,6 +138,18 @@ grpc_security_status grpc_channel_credentials_create_security_connector( channel_creds, NULL, target, args, sc, new_args); } +grpc_channel_credentials * +grpc_channel_credentials_duplicate_without_call_credentials( + grpc_channel_credentials *channel_creds) { + if (channel_creds != NULL && channel_creds->vtable != NULL && + channel_creds->vtable->duplicate_without_call_credentials != NULL) { + return channel_creds->vtable->duplicate_without_call_credentials( + channel_creds); + } else { + return grpc_channel_credentials_ref(channel_creds); + } +} + grpc_server_credentials *grpc_server_credentials_ref( grpc_server_credentials *creds) { if (creds == NULL) return NULL; diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h index 8e9d842ead..6fb5b5b15a 100644 --- a/src/core/lib/security/credentials/credentials.h +++ b/src/core/lib/security/credentials/credentials.h @@ -107,6 +107,9 @@ typedef struct { grpc_channel_credentials *c, grpc_call_credentials *call_creds, const char *target, const grpc_channel_args *args, grpc_channel_security_connector **sc, grpc_channel_args **new_args); + + grpc_channel_credentials *(*duplicate_without_call_credentials)( + grpc_channel_credentials *c); } grpc_channel_credentials_vtable; struct grpc_channel_credentials { @@ -128,6 +131,13 @@ grpc_security_status grpc_channel_credentials_create_security_connector( const grpc_channel_args *args, grpc_channel_security_connector **sc, grpc_channel_args **new_args); +/* Creates a version of the channel credentials without any attached call + credentials. This can be used in order to open a channel to a non-trusted + gRPC load balancer. */ +grpc_channel_credentials * +grpc_channel_credentials_duplicate_without_call_credentials( + grpc_channel_credentials *creds); + /* --- grpc_credentials_md. --- */ typedef struct { diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c index 51cafd986f..ea4cb76fb9 100644 --- a/src/core/lib/security/credentials/fake/fake_credentials.c +++ b/src/core/lib/security/credentials/fake/fake_credentials.c @@ -61,7 +61,7 @@ fake_transport_security_server_create_security_connector( static grpc_channel_credentials_vtable fake_transport_security_credentials_vtable = { - NULL, fake_transport_security_create_security_connector}; + NULL, fake_transport_security_create_security_connector, NULL}; static grpc_server_credentials_vtable fake_transport_security_server_credentials_vtable = { diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.c b/src/core/lib/security/credentials/ssl/ssl_credentials.c index 545bca9d98..0dc1fccec4 100644 --- a/src/core/lib/security/credentials/ssl/ssl_credentials.c +++ b/src/core/lib/security/credentials/ssl/ssl_credentials.c @@ -95,7 +95,7 @@ static grpc_security_status ssl_create_security_connector( } static grpc_channel_credentials_vtable ssl_vtable = { - ssl_destruct, ssl_create_security_connector}; + ssl_destruct, ssl_create_security_connector, NULL}; static void ssl_build_config(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c index 5e0352a467..f019ef156a 100644 --- a/src/core/lib/transport/static_metadata.c +++ b/src/core/lib/transport/static_metadata.c @@ -126,9 +126,9 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = { "if-range", "if-unmodified-since", "last-modified", + "lb-cost", + "lb-token", "link", - "load-reporting-initial", - "load-reporting-trailing", "location", "max-forwards", ":method", diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h index 5b9ee1a60a..e0a8196419 100644 --- a/src/core/lib/transport/static_metadata.h +++ b/src/core/lib/transport/static_metadata.h @@ -175,12 +175,12 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT]; #define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62]) /* "last-modified" */ #define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63]) +/* "lb-cost" */ +#define GRPC_MDSTR_LB_COST (&grpc_static_mdstr_table[64]) +/* "lb-token" */ +#define GRPC_MDSTR_LB_TOKEN (&grpc_static_mdstr_table[65]) /* "link" */ -#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[64]) -/* "load-reporting-initial" */ -#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[65]) -/* "load-reporting-trailing" */ -#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[66]) +#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[66]) /* "location" */ #define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67]) /* "max-forwards" */ @@ -337,13 +337,12 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT]; #define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44]) /* "last-modified": "" */ #define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45]) +/* "lb-cost": "" */ +#define GRPC_MDELEM_LB_COST_EMPTY (&grpc_static_mdelem_table[46]) +/* "lb-token": "" */ +#define GRPC_MDELEM_LB_TOKEN_EMPTY (&grpc_static_mdelem_table[47]) /* "link": "" */ -#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46]) -/* "load-reporting-initial": "" */ -#define GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY (&grpc_static_mdelem_table[47]) -/* "load-reporting-trailing": "" */ -#define GRPC_MDELEM_LOAD_REPORTING_TRAILING_EMPTY \ - (&grpc_static_mdelem_table[48]) +#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[48]) /* "location": "" */ #define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[49]) /* "max-forwards": "" */ |