aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext
diff options
context:
space:
mode:
authorGravatar Yang Gao <yangg@google.com>2017-05-22 10:41:17 -0700
committerGravatar Yang Gao <yangg@google.com>2017-05-22 10:41:17 -0700
commit725d82274385217b5f0345e834f7fae7e76fbd84 (patch)
tree310cf74a491e11efd86e16ebfd879f9a83e5e648 /src/core/ext
parent21035da1c9c20b5204ad4e9f2339490b3a3b6c0f (diff)
parent495cf83c86e04567f8b907a51cc0f86f5068fab6 (diff)
merge with head
Diffstat (limited to 'src/core/ext')
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c18
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c81
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c89
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c457
-rw-r--r--src/core/ext/filters/client_channel/parse_address.c129
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h6
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c18
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c73
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h6
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c2
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.c12
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c41
14 files changed, 489 insertions, 449 deletions
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index 24843d52e9..f2f27b9175 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -914,10 +914,13 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
if (calld->connected_subchannel == NULL) {
- gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
+ gpr_atm_no_barrier_store(&calld->subchannel_call, (gpr_atm)CANCELLED_CALL);
fail_locked(exec_ctx, calld,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1));
+ error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1));
} else if (GET_CALL(calld) == CANCELLED_CALL) {
/* already cancelled before subchannel became ready */
grpc_error *cancellation_error =
@@ -1180,6 +1183,15 @@ static void start_transport_stream_op_batch_locked_inner(
&calld->next_step)) {
calld->pick_pending = false;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ if (calld->connected_subchannel == NULL) {
+ gpr_atm_no_barrier_store(&calld->subchannel_call,
+ (gpr_atm)CANCELLED_CALL);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy");
+ fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
+ return; // Early out.
+ }
} else {
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 184b2ef720..fb4aa084a6 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -149,7 +149,9 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
/** Finds an appropriate subchannel for a call, based on \a pick_args.
- \a target will be set to the selected subchannel, or NULL on failure.
+ \a target will be set to the selected subchannel, or NULL on failure
+ or when the LB policy decides to drop the call.
+
Upon success, \a user_data will be set to whatever opaque information
may need to be propagated from the LB policy, or NULL if not needed.
\a context will be populated with context to pass to the subchannel
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index b7c0e929b7..d2a2856a18 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -327,6 +327,11 @@ typedef struct glb_lb_policy {
* response has arrived. */
grpc_grpclb_serverlist *serverlist;
+ /** Index into serverlist for next pick.
+ * If the server at this index is a drop, we return a drop.
+ * Otherwise, we delegate to the RR policy. */
+ size_t serverlist_index;
+
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@@ -402,6 +407,9 @@ struct rr_connectivity_data {
static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
bool log) {
+ if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
+ return false;
+ }
const grpc_grpclb_ip_address *ip = &server->ip_address;
if (server->port >> 16 != 0) {
if (log) {
@@ -411,7 +419,6 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
}
return false;
}
-
if (ip->size != 4 && ip->size != 16) {
if (log) {
gpr_log(GPR_ERROR,
@@ -445,11 +452,12 @@ static const grpc_lb_user_data_vtable lb_token_vtable = {
static void parse_server(const grpc_grpclb_server *server,
grpc_resolved_address *addr) {
+ memset(addr, 0, sizeof(*addr));
+ if (server->drop_for_rate_limiting || server->drop_for_load_balancing) return;
const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address *ip = &server->ip_address;
- memset(addr, 0, sizeof(*addr));
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
@@ -586,16 +594,51 @@ static bool update_lb_connectivity_status_locked(
return true;
}
-/* perform a pick over \a rr_policy. Given that a pick can return immediately
- * (ignoring its completion callback) we need to perform the cleanups this
- * callback would be otherwise resposible for */
+/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
+ * immediately (ignoring its completion callback), we need to perform the
+ * cleanups this callback would otherwise be resposible for.
+ * If \a force_async is true, then we will manually schedule the
+ * completion callback even if the pick is available immediately. */
static bool pick_from_internal_rr_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
- const grpc_lb_policy_pick_args *pick_args,
+ grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
+ const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
- GPR_ASSERT(rr_policy != NULL);
+ // Look at the index into the serverlist to see if we should drop this call.
+ grpc_grpclb_server *server =
+ glb_policy->serverlist->servers[glb_policy->serverlist_index++];
+ if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
+ glb_policy->serverlist_index = 0; // Wrap-around.
+ }
+ if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
+ // Not using the RR policy, so unref it.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+ // Update client load reporting stats to indicate the number of
+ // dropped calls. Note that we have to do this here instead of in
+ // the client_load_reporting filter, because we do not create a
+ // subchannel call (and therefore no client_load_reporting filter)
+ // for dropped calls.
+ grpc_grpclb_client_stats_add_call_started(wc_arg->client_stats);
+ grpc_grpclb_client_stats_add_call_finished(
+ server->drop_for_rate_limiting, server->drop_for_load_balancing,
+ false /* failed_to_send */, false /* known_received */,
+ wc_arg->client_stats);
+ grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ if (force_async) {
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ gpr_free(wc_arg->free_when_done);
+ return false;
+ }
+ gpr_free(wc_arg->free_when_done);
+ return true;
+ }
+ // Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
- exec_ctx, rr_policy, pick_args, target, wc_arg->context,
+ exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
@@ -604,17 +647,20 @@ static bool pick_from_internal_rr_locked(
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
-
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
-
// Pass on client stats via context. Passes ownership of the reference.
GPR_ASSERT(wc_arg->client_stats != NULL);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
-
+ if (force_async) {
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ gpr_free(wc_arg->free_when_done);
+ return false;
+ }
gpr_free(wc_arg->free_when_done);
}
/* else, the pending pick will be registered and taken care of by the
@@ -744,8 +790,8 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
- pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
- &pp->pick_args, pp->target,
+ pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
+ true /* force_async */, pp->target,
&pp->wrapped_on_complete_arg);
}
@@ -1115,8 +1161,9 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
wc_arg->initial_metadata = pick_args->initial_metadata;
wc_arg->free_when_done = wc_arg;
- pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
- pick_args, target, wc_arg);
+ pick_done =
+ pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
+ false /* force_async */, target, wc_arg);
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG,
@@ -1517,7 +1564,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
* serverlist instance will be destroyed either upon the next
* update or in glb_destroy() */
glb_policy->serverlist = serverlist;
-
+ glb_policy->serverlist_index = 0;
rr_handover_locked(exec_ctx, glb_policy);
}
} else {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
index 81b6932fae..90e7c2efe5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
@@ -37,44 +37,39 @@
#include <grpc/support/alloc.h>
+/* invoked once for every Server in ServerList */
+static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
+ void **arg) {
+ grpc_grpclb_serverlist *sl = *arg;
+ grpc_grpclb_server server;
+ if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
+ gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
+ return false;
+ }
+ ++sl->num_servers;
+ return true;
+}
+
typedef struct decode_serverlist_arg {
- /* The first pass counts the number of servers in the server list. The second
- * one allocates and decodes. */
- bool first_pass;
/* The decoding callback is invoked once per server in serverlist. Remember
* which index of the serverlist are we currently decoding */
size_t decoding_idx;
- /* Populated after the first pass. Number of server in the input serverlist */
- size_t num_servers;
/* The decoded serverlist */
- grpc_grpclb_server **servers;
+ grpc_grpclb_serverlist *serverlist;
} decode_serverlist_arg;
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
decode_serverlist_arg *dec_arg = *arg;
- if (dec_arg->first_pass) { /* count how many server do we have */
- grpc_grpclb_server server;
- if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
- gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
- return false;
- }
- dec_arg->num_servers++;
- } else { /* second pass. Actually decode. */
- grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
- GPR_ASSERT(dec_arg->num_servers > 0);
- if (dec_arg->decoding_idx == 0) { /* first iteration of second pass */
- dec_arg->servers =
- gpr_malloc(sizeof(grpc_grpclb_server *) * dec_arg->num_servers);
- }
- if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
- gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
- return false;
- }
- dec_arg->servers[dec_arg->decoding_idx++] = server;
+ GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
+ grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
+ if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
+ gpr_free(server);
+ gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
+ return false;
}
-
+ dec_arg->serverlist->servers[dec_arg->decoding_idx++] = server;
return true;
}
@@ -165,36 +160,38 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
- bool status;
- decode_serverlist_arg arg;
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
+ grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
- memset(&arg, 0, sizeof(decode_serverlist_arg));
-
- res.server_list.servers.funcs.decode = decode_serverlist;
- res.server_list.servers.arg = &arg;
- arg.first_pass = true;
- status = pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res);
+ // First pass: count number of servers.
+ res.server_list.servers.funcs.decode = count_serverlist;
+ res.server_list.servers.arg = sl;
+ bool status = pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res);
if (!status) {
+ gpr_free(sl);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
return NULL;
}
-
- arg.first_pass = false;
- status =
- pb_decode(&stream_at_start, grpc_lb_v1_LoadBalanceResponse_fields, &res);
- if (!status) {
- gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
- return NULL;
+ // Second pass: populate servers.
+ if (sl->num_servers > 0) {
+ sl->servers = gpr_zalloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ decode_serverlist_arg decode_arg;
+ memset(&decode_arg, 0, sizeof(decode_arg));
+ decode_arg.serverlist = sl;
+ res.server_list.servers.funcs.decode = decode_serverlist;
+ res.server_list.servers.arg = &decode_arg;
+ status = pb_decode(&stream_at_start, grpc_lb_v1_LoadBalanceResponse_fields,
+ &res);
+ if (!status) {
+ grpc_grpclb_destroy_serverlist(sl);
+ gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
+ return NULL;
+ }
}
-
- grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
- sl->num_servers = arg.num_servers;
- sl->servers = arg.servers;
if (res.server_list.has_expiration_interval) {
sl->expiration_interval = res.server_list.expiration_interval;
}
@@ -228,7 +225,7 @@ grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
const grpc_grpclb_serverlist *rhs) {
- if ((lhs == NULL) || (rhs == NULL)) {
+ if (lhs == NULL || rhs == NULL) {
return false;
}
if (lhs->num_servers != rhs->num_servers) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index 06873821bd..7f596ce1f1 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -51,7 +51,7 @@ typedef grpc_lb_v1_LoadBalanceRequest grpc_grpclb_request;
typedef grpc_lb_v1_InitialLoadBalanceResponse grpc_grpclb_initial_response;
typedef grpc_lb_v1_Server grpc_grpclb_server;
typedef grpc_lb_v1_Duration grpc_grpclb_duration;
-typedef struct grpc_grpclb_serverlist {
+typedef struct {
grpc_grpclb_server **servers;
size_t num_servers;
grpc_grpclb_duration expiration_interval;
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index 6e7f410635..2acde70f4c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -99,26 +99,13 @@ typedef struct pending_pick {
grpc_closure *on_complete;
} pending_pick;
-/** List of subchannels in a connectivity READY state */
-typedef struct ready_list {
- grpc_subchannel *subchannel;
- /* references namesake entry in subchannel_data */
- void *user_data;
- struct ready_list *next;
- struct ready_list *prev;
-} ready_list;
-
typedef struct {
- /** index within policy->subchannels */
- size_t index;
/** backpointer to owning policy */
round_robin_lb_policy *policy;
/** subchannel itself */
grpc_subchannel *subchannel;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
- /** this subchannels current position in subchannel->ready_list */
- ready_list *ready_list_node;
/** last observed connectivity. Not updated by
* \a grpc_subchannel_notify_on_state_change. Used to determine the previous
* state while processing the new state in \a rr_connectivity_changed */
@@ -126,6 +113,10 @@ typedef struct {
/** current connectivity state. Updated by \a
* grpc_subchannel_notify_on_state_change */
grpc_connectivity_state curr_connectivity_state;
+ /** connectivity state to be updated by the watcher, not guarded by
+ * the combiner. Will be moved to curr_connectivity_state inside of
+ * the combiner by rr_connectivity_changed_locked(). */
+ grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
@@ -141,182 +132,105 @@ struct round_robin_lb_policy {
/** all our subchannels */
size_t num_subchannels;
- subchannel_data **subchannels;
+ subchannel_data *subchannels;
- /** how many subchannels are in TRANSIENT_FAILURE */
+ /** how many subchannels are in state READY */
+ size_t num_ready;
+ /** how many subchannels are in state TRANSIENT_FAILURE */
size_t num_transient_failures;
- /** how many subchannels are IDLE */
+ /** how many subchannels are in state IDLE */
size_t num_idle;
/** have we started picking? */
- int started_picking;
+ bool started_picking;
/** are we shutting down? */
- int shutdown;
+ bool shutdown;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
- /** (Dummy) root of the doubly linked list containing READY subchannels */
- ready_list ready_list;
- /** Last pick from the ready list. */
- ready_list *ready_list_last_pick;
+ // Index into subchannels for last pick.
+ size_t last_ready_subchannel_index;
};
-/** Returns the next subchannel from the connected list or NULL if the list is
- * empty.
+/** Returns the index into p->subchannels of the next subchannel in
+ * READY state, or p->num_subchannels if no subchannel is READY.
*
- * Note that this function does *not* advance p->ready_list_last_pick. Use \a
- * advance_last_picked_locked() for that. */
-static ready_list *peek_next_connected_locked(const round_robin_lb_policy *p) {
- ready_list *selected;
- selected = p->ready_list_last_pick->next;
-
- while (selected != NULL) {
- if (selected == &p->ready_list) {
- GPR_ASSERT(selected->subchannel == NULL);
- /* skip dummy root */
- selected = selected->next;
- } else {
- GPR_ASSERT(selected->subchannel != NULL);
- return selected;
- }
+ * Note that this function does *not* update p->last_ready_subchannel_index.
+ * The caller must do that if it returns a pick. */
+static size_t get_next_ready_subchannel_index_locked(
+ const round_robin_lb_policy *p) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_INFO,
+ "[RR: %p] getting next ready subchannel, "
+ "last_ready_subchannel_index=%zu",
+ p, p->last_ready_subchannel_index);
}
- return NULL;
-}
-
-/** Advance the \a ready_list picking head. */
-static void advance_last_picked_locked(round_robin_lb_policy *p) {
- if (p->ready_list_last_pick->next != NULL) { /* non-empty list */
- p->ready_list_last_pick = p->ready_list_last_pick->next;
- if (p->ready_list_last_pick == &p->ready_list) {
- /* skip dummy root */
- p->ready_list_last_pick = p->ready_list_last_pick->next;
+ for (size_t i = 0; i < p->num_subchannels; ++i) {
+ const size_t index =
+ (i + p->last_ready_subchannel_index + 1) % p->num_subchannels;
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG, "[RR %p] checking index %zu: state=%d", p, index,
+ p->subchannels[index].curr_connectivity_state);
+ }
+ if (p->subchannels[index].curr_connectivity_state == GRPC_CHANNEL_READY) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG, "[RR %p] found next ready subchannel at index %zu",
+ p, index);
+ }
+ return index;
}
- } else { /* should be an empty list */
- GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
}
-
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG,
- "[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
- "CSC %p)",
- (void *)p, (void *)p->ready_list_last_pick,
- (void *)p->ready_list_last_pick->subchannel,
- (void *)grpc_subchannel_get_connected_subchannel(
- p->ready_list_last_pick->subchannel));
+ gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", p);
}
+ return p->num_subchannels;
}
-/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
- * csc to the list of ready subchannels. */
-static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
- subchannel_data *sd) {
- ready_list *new_elem = gpr_zalloc(sizeof(ready_list));
- new_elem->subchannel = sd->subchannel;
- new_elem->user_data = sd->user_data;
- if (p->ready_list.prev == NULL) {
- /* first element */
- new_elem->next = &p->ready_list;
- new_elem->prev = &p->ready_list;
- p->ready_list.next = new_elem;
- p->ready_list.prev = new_elem;
- } else {
- new_elem->next = &p->ready_list;
- new_elem->prev = p->ready_list.prev;
- p->ready_list.prev->next = new_elem;
- p->ready_list.prev = new_elem;
- }
+// Sets p->last_ready_subchannel_index to last_ready_index.
+static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
+ size_t last_ready_index) {
+ GPR_ASSERT(last_ready_index < p->num_subchannels);
+ p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (Conn. SC %p)",
- (void *)new_elem, (void *)sd->subchannel);
- }
- return new_elem;
-}
-
-/** Removes \a node from the list of connected subchannels */
-static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
- ready_list *node) {
- if (node == NULL) {
- return;
- }
- if (node == p->ready_list_last_pick) {
- p->ready_list_last_pick = p->ready_list_last_pick->prev;
- }
-
- /* removing last item */
- if (node->next == &p->ready_list && node->prev == &p->ready_list) {
- GPR_ASSERT(p->ready_list.next == node);
- GPR_ASSERT(p->ready_list.prev == node);
- p->ready_list.next = NULL;
- p->ready_list.prev = NULL;
- } else {
- node->prev->next = node->next;
- node->next->prev = node->prev;
- }
-
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", (void *)node,
- (void *)node->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[RR: %p] setting last_ready_subchannel_index=%zu (SC %p, CSC %p)",
+ (void *)p, last_ready_index,
+ (void *)p->subchannels[last_ready_index].subchannel,
+ (void *)grpc_subchannel_get_connected_subchannel(
+ p->subchannels[last_ready_index].subchannel));
}
-
- node->next = NULL;
- node->prev = NULL;
- node->subchannel = NULL;
-
- gpr_free(node);
-}
-
-static bool is_ready_list_empty(round_robin_lb_policy *p) {
- return p->ready_list.prev == NULL;
}
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- ready_list *elem;
-
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
}
-
for (size_t i = 0; i < p->num_subchannels; i++) {
- subchannel_data *sd = p->subchannels[i];
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+ subchannel_data *sd = &p->subchannels[i];
+ if (sd->subchannel != NULL) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+ }
}
- gpr_free(sd);
}
-
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
-
- elem = p->ready_list.next;
- while (elem != NULL && elem != &p->ready_list) {
- ready_list *tmp;
- tmp = elem->next;
- elem->next = NULL;
- elem->prev = NULL;
- elem->subchannel = NULL;
- gpr_free(elem);
- elem = tmp;
- }
-
gpr_free(p);
}
static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp;
- size_t i;
-
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
-
- p->shutdown = 1;
+ p->shutdown = true;
+ pending_pick *pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
@@ -328,10 +242,13 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
- for (i = 0; i < p->num_subchannels; i++) {
- subchannel_data *sd = p->subchannels[i];
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
- &sd->connectivity_changed_closure);
+ for (size_t i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = &p->subchannels[i];
+ if (sd->subchannel != NULL) {
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
+ NULL,
+ &sd->connectivity_changed_closure);
+ }
}
}
@@ -339,8 +256,7 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel **target,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
+ pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@@ -364,8 +280,7 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
+ pending_pick *pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
@@ -387,21 +302,16 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
- size_t i;
- p->started_picking = 1;
-
- for (i = 0; i < p->num_subchannels; i++) {
- subchannel_data *sd = p->subchannels[i];
- /* use some sentinel value outside of the range of grpc_connectivity_state
- * to signal an undefined previous state. We won't be referring to this
- * value again and it'll be overwritten after the first call to
- * rr_connectivity_changed */
- sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
- sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
- GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
+ p->started_picking = true;
+ for (size_t i = 0; i < p->num_subchannels; i++) {
+ subchannel_data *sd = &p->subchannels[i];
+ if (sd->subchannel != NULL) {
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->pending_connectivity_state_unsafe,
+ &sd->connectivity_changed_closure);
+ }
}
}
@@ -418,36 +328,32 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_call_context_element *context, void **user_data,
grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp;
- ready_list *selected;
-
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
}
-
- if ((selected = peek_next_connected_locked(p))) {
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
+ if (next_ready_index < p->num_subchannels) {
/* readily available, report right away */
+ subchannel_data *sd = &p->subchannels[next_ready_index];
*target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
- "rr_picked");
-
+ grpc_subchannel_get_connected_subchannel(sd->subchannel), "rr_picked");
if (user_data != NULL) {
- *user_data = selected->user_data;
+ *user_data = sd->user_data;
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
- "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
- (void *)*target, (void *)selected);
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (INDEX %zu)",
+ (void *)*target, next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
- advance_last_picked_locked(p);
+ update_last_ready_subchannel_index_locked(p, next_ready_index);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pp = gpr_malloc(sizeof(*pp));
+ pending_pick *pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -458,25 +364,31 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
-static void update_state_counters(subchannel_data *sd) {
+static void update_state_counters_locked(subchannel_data *sd) {
round_robin_lb_policy *p = sd->policy;
-
- /* update p->num_transient_failures (resp. p->num_idle): if the previous
- * state was TRANSIENT_FAILURE (resp. IDLE), decrement
- * p->num_transient_failures (resp. p->num_idle). */
- if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
+ GPR_ASSERT(p->num_ready > 0);
+ --p->num_ready;
+ } else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
GPR_ASSERT(p->num_transient_failures > 0);
--p->num_transient_failures;
} else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
GPR_ASSERT(p->num_idle > 0);
--p->num_idle;
}
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
+ ++p->num_ready;
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ ++p->num_transient_failures;
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
+ ++p->num_idle;
+ }
}
/* sd is the subchannel_data associted with the updated subchannel.
* shutdown_error will only be used upon policy transition to TRANSIENT_FAILURE
* or SHUTDOWN */
-static grpc_connectivity_state update_lb_connectivity_status(
+static grpc_connectivity_state update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
@@ -498,7 +410,7 @@ static grpc_connectivity_state update_lb_connectivity_status(
* CHECK: p->num_idle == p->num_subchannels.
*/
round_robin_lb_policy *p = sd->policy;
- if (!is_ready_list_empty(p)) { /* 1) READY */
+ if (p->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
return GRPC_CHANNEL_READY;
@@ -532,32 +444,62 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
- pending_pick *pp;
-
- GRPC_ERROR_REF(error);
-
+ // Now that we're inside the combiner, copy the pending connectivity
+ // state (which was set by the connectivity state watcher) to
+ // curr_connectivity_state, which is what we use inside of the combiner.
+ sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG,
+ "[RR %p] connectivity changed for subchannel %p: "
+ "prev_state=%d new_state=%d",
+ p, sd->subchannel, sd->prev_connectivity_state,
+ sd->curr_connectivity_state);
+ }
+ // If we're shutting down, unref and return.
if (p->shutdown) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
- GRPC_ERROR_UNREF(error);
return;
}
- switch (sd->curr_connectivity_state) {
- case GRPC_CHANNEL_INIT:
- GPR_UNREACHABLE_CODE(return );
- case GRPC_CHANNEL_READY:
- /* add the newly connected subchannel to the list of connected ones.
- * Note that it goes to the "end of the line". */
- sd->ready_list_node = add_connected_sc_locked(p, sd);
+ // Update state counters and determine new overall state.
+ update_state_counters_locked(sd);
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
+ grpc_connectivity_state new_connectivity_state =
+ update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
+ // If the new state is SHUTDOWN, unref the subchannel, and if the new
+ // overall state is SHUTDOWN, clean up.
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
+ sd->subchannel = NULL;
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+ }
+ if (new_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ /* the policy is shutting down. Flush all the pending picks... */
+ pending_pick *pp;
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ gpr_free(pp);
+ }
+ }
+ /* unref the "rr_connectivity" weak ref from start_picking */
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
+ } else {
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
- ready_list *selected = peek_next_connected_locked(p);
- GPR_ASSERT(selected != NULL);
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
+ GPR_ASSERT(next_ready_index < p->num_subchannels);
+ subchannel_data *selected = &p->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
- advance_last_picked_locked(p);
+ update_last_ready_subchannel_index_locked(p, next_ready_index);
}
+ pending_pick *pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -568,72 +510,19 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
- "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
- (void *)selected->subchannel, (void *)selected);
+ "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (INDEX %zu)",
+ (void *)selected->subchannel, next_ready_index);
}
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
- update_lb_connectivity_status(exec_ctx, sd, error);
- sd->prev_connectivity_state = sd->curr_connectivity_state;
- /* renew notification: reuses the "rr_connectivity" weak ref */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
- break;
- case GRPC_CHANNEL_IDLE:
- ++p->num_idle;
- /* fallthrough */
- case GRPC_CHANNEL_CONNECTING:
- update_state_counters(sd);
- update_lb_connectivity_status(exec_ctx, sd, error);
- sd->prev_connectivity_state = sd->curr_connectivity_state;
- /* renew notification: reuses the "rr_connectivity" weak ref */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
- break;
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- ++p->num_transient_failures;
- /* remove from ready list if still present */
- if (sd->ready_list_node != NULL) {
- remove_disconnected_sc_locked(p, sd->ready_list_node);
- sd->ready_list_node = NULL;
- }
- update_lb_connectivity_status(exec_ctx, sd, error);
- sd->prev_connectivity_state = sd->curr_connectivity_state;
- /* renew notification: reuses the "rr_connectivity" weak ref */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
- break;
- case GRPC_CHANNEL_SHUTDOWN:
- update_state_counters(sd);
- if (sd->ready_list_node != NULL) {
- remove_disconnected_sc_locked(p, sd->ready_list_node);
- sd->ready_list_node = NULL;
- }
- --p->num_subchannels;
- GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
- p->subchannels[p->num_subchannels]);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
- p->subchannels[sd->index]->index = sd->index;
- if (update_lb_connectivity_status(exec_ctx, sd, error) ==
- GRPC_CHANNEL_SHUTDOWN) {
- /* the policy is shutting down. Flush all the pending picks... */
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
- }
- gpr_free(sd);
- /* unref the "rr_connectivity" weak ref from start_picking */
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
- break;
+ }
+ /* renew notification: reuses the "rr_connectivity" weak ref */
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, p->base.interested_parties,
+ &sd->pending_connectivity_state_unsafe,
+ &sd->connectivity_changed_closure);
}
- GRPC_ERROR_UNREF(error);
}
static grpc_connectivity_state rr_check_connectivity_locked(
@@ -654,10 +543,10 @@ static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- ready_list *selected;
- grpc_connected_subchannel *target;
- if ((selected = peek_next_connected_locked(p))) {
- target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
+ if (next_ready_index < p->num_subchannels) {
+ subchannel_data *selected = &p->subchannels[next_ready_index];
+ grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
@@ -708,7 +597,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
p->subchannels = gpr_zalloc(sizeof(*p->subchannels) * num_addrs);
grpc_subchannel_args sc_args;
- size_t subchannel_idx = 0;
+ size_t subchannel_index = 0;
for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */
if (addresses->addresses[i].is_balancer) continue;
@@ -727,42 +616,44 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_DEBUG, "Created subchannel %p for address uri %s",
- (void *)subchannel, address_uri);
+ gpr_log(GPR_DEBUG, "index %zu: Created subchannel %p for address uri %s",
+ subchannel_index, (void *)subchannel, address_uri);
gpr_free(address_uri);
}
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel != NULL) {
- subchannel_data *sd = gpr_zalloc(sizeof(*sd));
- p->subchannels[subchannel_idx] = sd;
+ subchannel_data *sd = &p->subchannels[subchannel_index];
sd->policy = p;
- sd->index = subchannel_idx;
sd->subchannel = subchannel;
+ /* use some sentinel value outside of the range of grpc_connectivity_state
+ * to signal an undefined previous state. We won't be referring to this
+ * value again and it'll be overwritten after the first call to
+ * rr_connectivity_changed */
+ sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
+ sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
- ++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed_locked, sd,
grpc_combiner_scheduler(args->combiner, false));
+ ++subchannel_index;
}
}
- if (subchannel_idx == 0) {
+ if (subchannel_index == 0) {
/* couldn't create any subchannel. Bail out */
gpr_free(p->subchannels);
gpr_free(p);
return NULL;
}
- p->num_subchannels = subchannel_idx;
+ p->num_subchannels = subchannel_index;
- /* The (dummy node) root of the ready list */
- p->ready_list.subchannel = NULL;
- p->ready_list.prev = NULL;
- p->ready_list.next = NULL;
- p->ready_list_last_pick = &p->ready_list;
+ // Initialize the last pick index to the last subchannel, so that the
+ // first pick will start at the beginning of the list.
+ p->last_ready_subchannel_index = subchannel_index - 1;
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
diff --git a/src/core/ext/filters/client_channel/parse_address.c b/src/core/ext/filters/client_channel/parse_address.c
index edc6ce697d..18381eec55 100644
--- a/src/core/ext/filters/client_channel/parse_address.c
+++ b/src/core/ext/filters/client_channel/parse_address.c
@@ -57,11 +57,11 @@ bool grpc_parse_unix(const grpc_uri *uri,
struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
- if (path_len == maxlen) return 0;
+ if (path_len == maxlen) return false;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, uri->path);
resolved_addr->len = sizeof(*un);
- return 1;
+ return true;
}
#else /* GRPC_HAVE_UNIX_SOCKET */
@@ -73,74 +73,65 @@ bool grpc_parse_unix(const grpc_uri *uri,
#endif /* GRPC_HAVE_UNIX_SOCKET */
-bool grpc_parse_ipv4(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
- if (strcmp("ipv4", uri->scheme) != 0) {
- gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme);
- return false;
- }
- const char *host_port = uri->path;
+bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+ bool log_errors) {
+ bool success = false;
+ // Split host and port.
char *host;
char *port;
- int port_num;
- bool result = false;
- struct sockaddr_in *in = (struct sockaddr_in *)resolved_addr->addr;
-
- if (*host_port == '/') ++host_port;
- if (!gpr_split_host_port(host_port, &host, &port)) {
- return false;
- }
-
- memset(resolved_addr, 0, sizeof(grpc_resolved_address));
- resolved_addr->len = sizeof(struct sockaddr_in);
+ if (!gpr_split_host_port(hostport, &host, &port)) return false;
+ // Parse IP address.
+ memset(addr, 0, sizeof(*addr));
+ addr->len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *in = (struct sockaddr_in *)addr->addr;
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
- gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
+ if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
goto done;
}
-
- if (port != NULL) {
- if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
- port_num > 65535) {
- gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
- goto done;
- }
- in->sin_port = htons((uint16_t)port_num);
- } else {
- gpr_log(GPR_ERROR, "no port given for ipv4 scheme");
+ // Parse port.
+ if (port == NULL) {
+ if (log_errors) gpr_log(GPR_ERROR, "no port given for ipv4 scheme");
goto done;
}
-
- result = true;
+ int port_num;
+ if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 || port_num > 65535) {
+ if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
+ goto done;
+ }
+ in->sin_port = htons((uint16_t)port_num);
+ success = true;
done:
gpr_free(host);
gpr_free(port);
- return result;
+ return success;
}
-bool grpc_parse_ipv6(const grpc_uri *uri,
+bool grpc_parse_ipv4(const grpc_uri *uri,
grpc_resolved_address *resolved_addr) {
- if (strcmp("ipv6", uri->scheme) != 0) {
- gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme);
+ if (strcmp("ipv4", uri->scheme) != 0) {
+ gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme);
return false;
}
const char *host_port = uri->path;
- char *host;
- char *port;
- int port_num;
- int result = 0;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)resolved_addr->addr;
-
if (*host_port == '/') ++host_port;
- if (!gpr_split_host_port(host_port, &host, &port)) {
- return 0;
- }
+ return grpc_parse_ipv4_hostport(host_port, resolved_addr,
+ true /* log_errors */);
+}
- memset(in6, 0, sizeof(*in6));
- resolved_addr->len = sizeof(*in6);
+bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+ bool log_errors) {
+ bool success = false;
+ // Split host and port.
+ char *host;
+ char *port;
+ if (!gpr_split_host_port(hostport, &host, &port)) return false;
+ // Parse IP address.
+ memset(addr, 0, sizeof(*addr));
+ addr->len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr->addr;
in6->sin6_family = AF_INET6;
-
- /* Handle the RFC6874 syntax for IPv6 zone identifiers. */
+ // Handle the RFC6874 syntax for IPv6 zone identifiers.
char *host_end = (char *)gpr_memrchr(host, '%', strlen(host));
if (host_end != NULL) {
GPR_ASSERT(host_end >= host);
@@ -159,7 +150,7 @@ bool grpc_parse_ipv6(const grpc_uri *uri,
gpr_log(GPR_ERROR, "invalid ipv6 scope id: '%s'", host_end + 1);
goto done;
}
- // Handle "sin6_scope_id" being type "u_long". See grpc issue ##10027.
+ // Handle "sin6_scope_id" being type "u_long". See grpc issue #10027.
in6->sin6_scope_id = sin6_scope_id;
} else {
if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
@@ -167,24 +158,34 @@ bool grpc_parse_ipv6(const grpc_uri *uri,
goto done;
}
}
-
- if (port != NULL) {
- if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
- port_num > 65535) {
- gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
- goto done;
- }
- in6->sin6_port = htons((uint16_t)port_num);
- } else {
- gpr_log(GPR_ERROR, "no port given for ipv6 scheme");
+ // Parse port.
+ if (port == NULL) {
+ if (log_errors) gpr_log(GPR_ERROR, "no port given for ipv6 scheme");
goto done;
}
-
- result = 1;
+ int port_num;
+ if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 || port_num > 65535) {
+ if (log_errors) gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
+ goto done;
+ }
+ in6->sin6_port = htons((uint16_t)port_num);
+ success = true;
done:
gpr_free(host);
gpr_free(port);
- return result;
+ return success;
+}
+
+bool grpc_parse_ipv6(const grpc_uri *uri,
+ grpc_resolved_address *resolved_addr) {
+ if (strcmp("ipv6", uri->scheme) != 0) {
+ gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme);
+ return false;
+ }
+ const char *host_port = uri->path;
+ if (*host_port == '/') ++host_port;
+ return grpc_parse_ipv6_hostport(host_port, resolved_addr,
+ true /* log_errors */);
}
bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr) {
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index fa7ea33a00..1a203a3b26 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -54,4 +54,10 @@ bool grpc_parse_ipv6(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
/** Populate \a resolved_addr from \a uri. Returns true upon success. */
bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+/** Parse bare IPv4 or IPv6 "IP:port" strings. */
+bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+ bool log_errors);
+bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+ bool log_errors);
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
index ffaeeed324..578e8d697f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
@@ -61,6 +61,8 @@
typedef struct {
/** base class: must be first */
grpc_resolver base;
+ /** DNS server to use (if not system default) */
+ char *dns_server;
/** name to resolve (usually the same as target_name) */
char *name_to_resolve;
/** default port to use */
@@ -172,6 +174,8 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
gpr_timespec timeout = gpr_time_sub(next_try, now);
@@ -221,9 +225,9 @@ static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!r->resolving);
r->resolving = true;
r->addresses = NULL;
- grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
- r->interested_parties, &r->dns_ares_on_resolved_locked,
- &r->addresses);
+ grpc_dns_lookup_ares(exec_ctx, r->dns_server, r->name_to_resolve,
+ r->default_port, r->interested_parties,
+ &r->dns_ares_on_resolved_locked, &r->addresses);
}
static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -246,6 +250,7 @@ static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
+ gpr_free(r->dns_server);
gpr_free(r->name_to_resolve);
gpr_free(r->default_port);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
@@ -257,14 +262,13 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
const char *default_port) {
// Get name from args.
const char *path = args->uri->path;
- if (0 != strcmp(args->uri->authority, "")) {
- gpr_log(GPR_ERROR, "authority based dns uri's not supported");
- return NULL;
- }
if (path[0] == '/') ++path;
// Create resolver.
ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
+ if (0 != strcmp(args->uri->authority, "")) {
+ r->dns_server = gpr_strdup(args->uri->authority);
+ }
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
index 09c46a66e0..e0cfd8b629 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -48,7 +48,10 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
+
+#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -58,6 +61,8 @@ static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu;
typedef struct grpc_ares_request {
+ /** indicates the DNS server to use, if specified */
+ struct ares_addr_port_node dns_server_addr;
/** following members are set in grpc_resolve_address_ares_impl */
/** host to resolve, parsed from the name to resolve */
char *host;
@@ -192,11 +197,12 @@ static void on_done_cb(void *arg, int status, int timeouts,
grpc_ares_request_unref(NULL, r);
}
-void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
+void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
+ const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ grpc_error *error = GRPC_ERROR_NONE;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@@ -208,28 +214,23 @@ void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
char *port;
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
- grpc_error *err = grpc_error_set_str(
+ error = grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"),
GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
- grpc_closure_sched(exec_ctx, on_done, err);
goto error_cleanup;
} else if (port == NULL) {
if (default_port == NULL) {
- grpc_error *err = grpc_error_set_str(
+ error = grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"),
GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
- grpc_closure_sched(exec_ctx, on_done, err);
goto error_cleanup;
}
port = gpr_strdup(default_port);
}
grpc_ares_ev_driver *ev_driver;
- grpc_error *err = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
- if (err != GRPC_ERROR_NONE) {
- GRPC_LOG_IF_ERROR("grpc_ares_ev_driver_create() failed", err);
- goto error_cleanup;
- }
+ error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
+ if (error != GRPC_ERROR_NONE) goto error_cleanup;
grpc_ares_request *r = gpr_malloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
@@ -242,6 +243,40 @@ void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
r->success = false;
r->error = GRPC_ERROR_NONE;
ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+
+ // If dns_server is specified, use it.
+ if (dns_server != NULL) {
+ gpr_log(GPR_INFO, "Using DNS server %s", dns_server);
+ grpc_resolved_address addr;
+ if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
+ r->dns_server_addr.family = AF_INET;
+ memcpy(&r->dns_server_addr.addr.addr4, addr.addr, addr.len);
+ r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
+ r->dns_server_addr.udp_port = grpc_sockaddr_get_port(&addr);
+ } else if (grpc_parse_ipv6_hostport(dns_server, &addr,
+ false /* log_errors */)) {
+ r->dns_server_addr.family = AF_INET6;
+ memcpy(&r->dns_server_addr.addr.addr6, addr.addr, addr.len);
+ r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
+ r->dns_server_addr.udp_port = grpc_sockaddr_get_port(&addr);
+ } else {
+ error = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("cannot parse authority"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ goto error_cleanup;
+ }
+ int status = ares_set_servers_ports(*channel, &r->dns_server_addr);
+ if (status != ARES_SUCCESS) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
+ ares_strerror(status));
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ goto error_cleanup;
+ }
+ }
+ // An extra reference is put here to avoid destroying the request in
+ // on_done_cb before calling grpc_ares_ev_driver_start.
gpr_ref_init(&r->pending_queries, 2);
if (grpc_ipv6_loopback_available()) {
gpr_ref(&r->pending_queries);
@@ -254,10 +289,20 @@ void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
return;
error_cleanup:
+ grpc_closure_sched(exec_ctx, on_done, error);
gpr_free(host);
gpr_free(port);
}
+void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port,
+ interested_parties, on_done, addrs);
+}
+
void (*grpc_resolve_address_ares)(
grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
grpc_pollset_set *interested_parties, grpc_closure *on_done,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 3dd40ea268..84fd7fcbd6 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -51,6 +51,12 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
grpc_closure *on_done,
grpc_resolved_addresses **addresses);
+void grpc_dns_lookup_ares(grpc_exec_ctx *exec_ctx, const char *dns_server,
+ const char *addr, const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addresses);
+
/* Initialize gRPC ares wrapper. Must be called at least once before
grpc_resolve_address_ares(). */
grpc_error *grpc_ares_init(void);
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 1af3393a62..dd14bf1d02 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -283,6 +283,7 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
+ // add a weak ref and subtract a strong ref (atomically)
old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
@@ -656,7 +657,6 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
gpr_free(sw_subchannel);
grpc_channel_stack_destroy(exec_ctx, stk);
gpr_free(con);
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
return false;
}
diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.c
index f6ef4a845e..b25dbfcf51 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.c
+++ b/src/core/ext/filters/client_channel/subchannel_index.c
@@ -183,8 +183,11 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
enter_ctx(exec_ctx);
grpc_subchannel *c = NULL;
+ bool need_to_unref_constructed;
while (c == NULL) {
+ need_to_unref_constructed = false;
+
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
@@ -194,8 +197,11 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
// - Check to see if a subchannel already exists
c = gpr_avl_get(index, key);
if (c != NULL) {
+ c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
+ }
+ if (c != NULL) {
// yes -> we're done
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, constructed, "index_register");
+ need_to_unref_constructed = true;
} else {
// no -> update the avl and compare/swap
gpr_avl updated =
@@ -219,6 +225,10 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
leave_ctx(exec_ctx);
+ if (need_to_unref_constructed) {
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, constructed, "index_register");
+ }
+
return c;
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index fb8ceaecb0..f3268bcfca 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -2139,15 +2139,8 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
double bdp_dbl) {
- int32_t bdp;
- const int32_t kMinBDP = 128;
- if (bdp_dbl <= kMinBDP) {
- bdp = kMinBDP;
- } else if (bdp_dbl > INT32_MAX) {
- bdp = INT32_MAX;
- } else {
- bdp = (int32_t)(bdp_dbl);
- }
+ // initial window size bounded [1,2^31-1], but we set the min to 128.
+ int32_t bdp = GPR_CLAMP((int32_t)bdp_dbl, 128, INT32_MAX);
int64_t delta =
(int64_t)bdp -
(int64_t)t->settings[GRPC_LOCAL_SETTINGS]
@@ -2161,7 +2154,26 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
(uint32_t)bdp);
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, (uint32_t)bdp);
+}
+
+static void update_frame(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ double bw_dbl, double bdp_dbl) {
+ int32_t bdp = GPR_CLAMP((int32_t)bdp_dbl, 128, INT32_MAX);
+ int32_t target = GPR_MAX((int32_t)bw_dbl / 1000, bdp);
+ // frame size is bounded [2^14,2^24-1]
+ int32_t frame_size = GPR_CLAMP(target, 16384, 16777215);
+ int64_t delta = (int64_t)frame_size -
+ (int64_t)t->settings[GRPC_LOCAL_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
+ if (delta == 0 || (delta > -frame_size / 10 && delta < frame_size / 10)) {
+ return;
+ }
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "%s: update max_frame size to %d", t->peer_string,
+ (int)frame_size);
+ }
+ push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ (uint32_t)frame_size);
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@@ -2280,6 +2292,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
}
int64_t estimate = -1;
+ double bdp_guess = -1;
if (grpc_bdp_estimator_get_estimate(&t->bdp_estimator, &estimate)) {
double target = 1 + log2((double)estimate);
double memory_pressure = grpc_resource_quota_get_memory_pressure(
@@ -2297,9 +2310,15 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
}
double log2_bdp_guess =
grpc_pid_controller_update(&t->pid_controller, bdp_error, dt);
- update_bdp(exec_ctx, t, pow(2, log2_bdp_guess));
+ bdp_guess = pow(2, log2_bdp_guess);
+ update_bdp(exec_ctx, t, bdp_guess);
t->last_pid_update = now;
}
+
+ double bw = -1;
+ if (grpc_bdp_estimator_get_bw(&t->bdp_estimator, &bw)) {
+ update_frame(exec_ctx, t, bw, bdp_guess);
+ }
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {