aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters/client_channel')
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc37
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc17
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc31
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc31
4 files changed, 72 insertions, 44 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 22c2bc8880..ea5e076c3b 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -1205,6 +1205,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
+
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
@@ -1228,7 +1231,7 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
chand, calld);
}
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else {
+ } else if (chand->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
@@ -1242,6 +1245,30 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
+ // TODO(roth): It should be impossible for chand->lb_policy to be NULL
+ // here, so the rest of this code should never actually be executed.
+ // However, we have reports of a crash on iOS that triggers this case,
+ // so we are temporarily adding this to restore branches that were
+ // removed in https://github.com/grpc/grpc/pull/12297. Need to figure
+ // out what is actually causing this to occur and then figure out the
+ // right way to deal with it.
+ else if (chand->resolver != NULL) {
+ // No LB policy, so try again.
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: resolver returned but no LB policy, "
+ "trying again",
+ chand, calld);
+ }
+ pick_after_resolver_result_start_locked(exec_ctx, elem);
+ } else {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
+ calld);
+ }
+ async_pick_done_locked(
+ exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ }
}
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
@@ -1599,8 +1626,8 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
-static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
@@ -1619,8 +1646,8 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
- GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
- grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
+ grpc_combiner_scheduler(w->chand->combiner));
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
} else {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index ffd58129c6..d6bdc13ba9 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -1027,15 +1027,19 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pp);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pping);
pping = next;
}
}
@@ -1803,9 +1807,8 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
break;
}
case GRPC_CHANNEL_IDLE:
- // lb channel inactive (probably shutdown prior to update). Restart lb
- // call to kick the lb channel into gear.
- GPR_ASSERT(glb_policy->lb_call == NULL);
+ // lb channel inactive (probably shutdown prior to update). Restart lb
+ // call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
if (glb_policy->lb_call != NULL) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index b07fc3b720..56c261ba34 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -102,12 +102,23 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
+static void fail_pending_picks_for_shutdown(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
+ pending_pick *pp;
+ while ((pp = p->pending_picks) != NULL) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pp);
+ }
+}
+
static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
p->shutdown = true;
- pp = p->pending_picks;
- p->pending_picks = NULL;
+ fail_pending_picks_for_shutdown(exec_ctx, p);
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
@@ -120,13 +131,6 @@ static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
- while (pp != NULL) {
- pending_pick *next = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- pp = next;
- }
}
static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@@ -637,12 +641,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick first exhausted channels", &error, 1),
"no_more_channels");
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
+ fail_pending_picks_for_shutdown(exec_ctx, p);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"pick_first_connectivity");
} else {
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 6812bb50cd..de163f6300 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -315,15 +315,10 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
-static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
- (void *)pol, (void *)pol);
- }
- p->shutdown = true;
+static void fail_pending_picks_for_shutdown(grpc_exec_ctx *exec_ctx,
+ round_robin_lb_policy *p) {
pending_pick *pp;
- while ((pp = p->pending_picks)) {
+ while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(
@@ -331,6 +326,16 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp);
}
+}
+
+static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
+ (void *)pol, (void *)pol);
+ }
+ p->shutdown = true;
+ fail_pending_picks_for_shutdown(exec_ctx, p);
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
@@ -621,14 +626,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
sd->user_data = NULL;
}
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- // the policy is shutting down. Flush all the pending picks...
- pending_pick *pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
+ // The policy is shutting down. Fail all of the pending picks.
+ fail_pending_picks_for_shutdown(exec_ctx, p);
}
rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
"sd_shutdown+started_picking");