diff options
author | Mark D. Roth <roth@google.com> | 2016-10-04 07:24:47 -0700 |
---|---|---|
committer | Mark D. Roth <roth@google.com> | 2016-10-04 07:24:47 -0700 |
commit | 0b259a8354df4c70c9152e886c71c32e4c7e4e34 (patch) | |
tree | 85c826256691ccb99a2e337f5c334a2a42e2785b /src/core | |
parent | c532d83ea00ec271aac48527f664b19ede4042e4 (diff) | |
parent | 9ab8ba73d6887ea902528426480dafb14b1c8214 (diff) |
Merge remote-tracking branch 'upstream/master' into service_config
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/ext/client_config/client_channel.c | 7 | ||||
-rw-r--r-- | src/core/ext/client_config/lb_policy.h | 6 | ||||
-rw-r--r-- | src/core/ext/lb_policy/grpclb/grpclb.c | 61 |
3 files changed, 34 insertions, 40 deletions
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c index 19b621296e..cb91869526 100644 --- a/src/core/ext/client_config/client_channel.c +++ b/src/core/ext/client_config/client_channel.c @@ -656,9 +656,10 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, int r; GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); gpr_mu_unlock(&chand->mu); - const grpc_lb_policy_pick_args inputs = {calld->pollent, initial_metadata, - initial_metadata_flags, - &calld->lb_token_mdelem}; + // TODO(dgq): make this deadline configurable somehow. + const grpc_lb_policy_pick_args inputs = { + calld->pollent, initial_metadata, initial_metadata_flags, + &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)}; r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready); GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel"); diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_config/lb_policy.h index 7fb3e08cb3..6cc3e1ebd3 100644 --- a/src/core/ext/client_config/lb_policy.h +++ b/src/core/ext/client_config/lb_policy.h @@ -59,10 +59,14 @@ typedef struct grpc_lb_policy_pick_args { grpc_polling_entity *pollent; /** Initial metadata associated with the picking call. */ grpc_metadata_batch *initial_metadata; - /** See \a GRPC_INITIAL_METADATA_* in grpc_types.h */ + /** Bitmask used for selective cancelling. See \a + * grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in + * grpc_types.h */ uint32_t initial_metadata_flags; /** Storage for LB token in \a initial_metadata, or NULL if not used */ grpc_linked_mdelem *lb_token_mdelem_storage; + /** Deadline for the call to the LB server */ + gpr_timespec deadline; } grpc_lb_policy_pick_args; struct grpc_lb_policy_vtable { diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index db425f817b..0acc9c6058 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -105,6 +105,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/host_port.h> #include <grpc/support/string_util.h> +#include <grpc/support/time.h> #include "src/core/ext/client_config/client_channel_factory.h" #include "src/core/ext/client_config/lb_policy_factory.h" @@ -200,18 +201,8 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg, typedef struct pending_pick { struct pending_pick *next; - /* polling entity for the pick()'s async notification */ - grpc_polling_entity *pollent; - - /* the initial metadata for the pick. See grpc_lb_policy_pick() */ - grpc_metadata_batch *initial_metadata; - - /* storage for the lb token initial metadata mdelem */ - grpc_linked_mdelem *lb_token_mdelem_storage; - - /* bitmask passed to pick() and used for selective cancelling. See - * grpc_lb_policy_cancel_picks() */ - uint32_t initial_metadata_flags; + /* original pick()'s arguments */ + grpc_lb_policy_pick_args pick_args; /* output argument where to store the pick()ed connected subchannel, or NULL * upon error. */ @@ -233,11 +224,8 @@ static void add_pending_pick(pending_pick **root, memset(pp, 0, sizeof(pending_pick)); memset(&pp->wrapped_on_complete_arg, 0, sizeof(wrapped_rr_closure_arg)); pp->next = *root; - pp->pollent = pick_args->pollent; + pp->pick_args = *pick_args; pp->target = target; - pp->initial_metadata = pick_args->initial_metadata; - pp->initial_metadata_flags = pick_args->initial_metadata_flags; - pp->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; pp->wrapped_on_complete_arg.wrapped_closure = on_complete; pp->wrapped_on_complete_arg.target = target; pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata; @@ -284,10 +272,14 @@ typedef struct glb_lb_policy { /** mutex protecting remaining members */ gpr_mu mu; + /** who the client is trying to communicate with */ const char *server_name; grpc_client_channel_factory *cc_factory; grpc_channel_args *args; + /** deadline for the LB's call */ + gpr_timespec deadline; + /** for communicating with the LB server */ grpc_channel *lb_channel; @@ -489,10 +481,8 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "", (intptr_t)glb_policy->rr_policy); } - const grpc_lb_policy_pick_args pick_args = { - pp->pollent, pp->initial_metadata, pp->initial_metadata_flags, - pp->lb_token_mdelem_storage}; - grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pick_args, pp->target, + grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pp->pick_args, + pp->target, (void **)&pp->wrapped_on_complete_arg.lb_token, &pp->wrapped_on_complete); pp->wrapped_on_complete_arg.owning_pending_node = pp; @@ -593,7 +583,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, &addr_strs[addr_index++], (const struct sockaddr *)&args->addresses->addresses[i] .address.addr, - true) == 0); + true) > 0); } } } @@ -665,7 +655,6 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { *pp->target = NULL; grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_NONE, NULL); - gpr_free(pp); pp = next; } @@ -703,12 +692,11 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pending_pick *next = pp->next; if (pp->target == target) { grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pollent, glb_policy->base.interested_parties); + exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); *target = NULL; grpc_exec_ctx_sched( exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); - gpr_free(pp); } else { pp->next = glb_policy->pending_picks; glb_policy->pending_picks = pp; @@ -734,14 +722,13 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, glb_policy->pending_picks = NULL; while (pp != NULL) { pending_pick *next = pp->next; - if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == + if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { grpc_polling_entity_del_from_pollset_set( - exec_ctx, pp->pollent, glb_policy->base.interested_parties); + exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties); grpc_exec_ctx_sched( exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); - gpr_free(pp); } else { pp->next = glb_policy->pending_picks; glb_policy->pending_picks = pp; @@ -772,8 +759,6 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, const grpc_lb_policy_pick_args *pick_args, grpc_connected_subchannel **target, void **user_data, grpc_closure *on_complete) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - if (pick_args->lb_token_mdelem_storage == NULL) { *target = NULL; grpc_exec_ctx_sched( @@ -784,8 +769,10 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, return 1; } + glb_lb_policy *glb_policy = (glb_lb_policy *)pol; gpr_mu_lock(&glb_policy->mu); - int r; + glb_policy->deadline = pick_args->deadline; + bool pick_done; if (glb_policy->rr_policy != NULL) { if (grpc_lb_glb_trace) { @@ -804,10 +791,11 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_closure_init(&glb_policy->wrapped_on_complete, wrapped_rr_closure, &glb_policy->wc_arg); - r = grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target, + pick_done = + grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target, (void **)&glb_policy->wc_arg.lb_token, &glb_policy->wrapped_on_complete); - if (r != 0) { + if (pick_done) { /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ if (grpc_lb_glb_trace) { gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")", @@ -821,6 +809,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, GRPC_MDELEM_REF(glb_policy->wc_arg.lb_token)); } } else { + /* else, the pending pick will be registered and taken care of by the + * pending pick list inside the RR policy (glb_policy->rr_policy) */ grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent, glb_policy->base.interested_parties); add_pending_pick(&glb_policy->pending_picks, pick_args, target, @@ -829,10 +819,10 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, if (!glb_policy->started_picking) { start_picking(exec_ctx, glb_policy); } - r = 0; + pick_done = false; } gpr_mu_unlock(&glb_policy->mu); - return r; + return pick_done; } static grpc_connectivity_state glb_check_connectivity( @@ -942,8 +932,7 @@ static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) { grpc_closure_init(&lb_client->close_sent, close_sent_cb, lb_client); grpc_closure_init(&lb_client->srv_status_rcvd, srv_status_rcvd_cb, lb_client); - /* TODO(dgq): get the deadline from the parent channel. */ - lb_client->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + lb_client->deadline = glb_policy->deadline; /* Note the following LB call progresses every time there's activity in \a * glb_policy->base.interested_parties, which is comprised of the polling |