From befafe64f9a010920c492956ee8df1257a6d4e77 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 11:30:54 -0800 Subject: Initial pass to remove mu_lock from client_channel: trickier cases remain --- src/core/ext/client_channel/client_channel.c | 291 ++++++++++++++------------- 1 file changed, 150 insertions(+), 141 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 06038bb5ba..2595acd8c4 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -51,6 +51,7 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" #include "src/core/lib/channel/deadline_filter.h" +#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/profiling/timers.h" @@ -160,8 +161,8 @@ typedef struct client_channel_channel_data { /** client channel factory */ grpc_client_channel_factory *client_channel_factory; - /** mutex protecting all variables below in this data structure */ - gpr_mu mu; + /** combiner protecting all variables below in this data structure */ + grpc_combiner *combiner; /** currently active load balancer */ char *lb_policy_name; grpc_lb_policy *lb_policy; @@ -218,8 +219,8 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx, } static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, - lb_policy_connectivity_watcher *w, - grpc_error *error) { + void *arg, grpc_error *error) { + lb_policy_connectivity_watcher *w = arg; grpc_connectivity_state publish_state = w->state; /* check if the notification is for a stale policy */ if (w->lb_policy != w->chand->lb_policy) return; @@ -235,15 +236,6 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, if (w->state != GRPC_CHANNEL_SHUTDOWN) { watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); } -} - -static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - lb_policy_connectivity_watcher *w = arg; - - gpr_mu_lock(&w->chand->mu); - on_lb_policy_state_changed_locked(exec_ctx, w, error); - gpr_mu_unlock(&w->chand->mu); GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); gpr_free(w); @@ -256,16 +248,16 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand, GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); w->chand = chand; - grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w, - grpc_schedule_on_exec_ctx); + grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w, + grpc_combiner_scheduler(chand->combiner, false)); w->state = current_state; w->lb_policy = lb_policy; grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, &w->on_changed); } -static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, + void *arg, grpc_error *error) { channel_data *chand = arg; char *lb_policy_name = NULL; grpc_lb_policy *lb_policy = NULL; @@ -353,7 +345,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, chand->interested_parties); } - gpr_mu_lock(&chand->mu); if (lb_policy_name != NULL) { gpr_free(chand->lb_policy_name); chand->lb_policy_name = lb_policy_name; @@ -391,7 +382,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, &chand->on_resolver_result_changed); - gpr_mu_unlock(&chand->mu); } else { if (chand->resolver != NULL) { grpc_resolver_shutdown(exec_ctx, chand->resolver); @@ -404,7 +394,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs, GPR_ARRAY_SIZE(refs)), "resolver_gone"); - gpr_mu_unlock(&chand->mu); } if (exit_idle) { @@ -426,20 +415,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_UNREF(state_error); } -static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) { +static void cc_start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error_ignored) { + grpc_transport_op *op = arg; + grpc_channel_element *elem = op->transport_private.args[0]; channel_data *chand = elem->channel_data; - grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); - - GPR_ASSERT(op->set_accept_stream == false); - if (op->bind_pollset != NULL) { - grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, - op->bind_pollset); - } - - gpr_mu_lock(&chand->mu); if (op->on_connectivity_state_change != NULL) { grpc_connectivity_state_notify_on_state_change( exec_ctx, &chand->state_tracker, op->connectivity_state, @@ -482,7 +463,28 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, } GRPC_ERROR_UNREF(op->disconnect_with_error); } - gpr_mu_unlock(&chand->mu); +} + +static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_transport_op *op) { + channel_data *chand = elem->channel_data; + + grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); + + GPR_ASSERT(op->set_accept_stream == false); + if (op->bind_pollset != NULL) { + grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, + op->bind_pollset); + } + + op->transport_private.args[0] = elem; + grpc_closure_sched( + exec_ctx, + grpc_closure_init(&op->transport_private.closure, + cc_start_transport_op_locked, op, + grpc_combiner_scheduler(chand->combiner, false)), + GRPC_ERROR_NONE); } static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, @@ -512,11 +514,11 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, GPR_ASSERT(args->is_last); GPR_ASSERT(elem->filter == &grpc_client_channel_filter); // Initialize data members. - gpr_mu_init(&chand->mu); + chand->combiner = grpc_combiner_create(NULL); chand->owning_stack = args->channel_stack; grpc_closure_init(&chand->on_resolver_result_changed, - on_resolver_result_changed, chand, - grpc_schedule_on_exec_ctx); + on_resolver_result_changed_locked, chand, + grpc_combiner_scheduler(chand->combiner, false)); chand->interested_parties = grpc_pollset_set_create(); grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, "client_channel"); @@ -572,7 +574,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, } grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); grpc_pollset_set_destroy(chand->interested_parties); - gpr_mu_destroy(&chand->mu); + grpc_combiner_destroy(exec_ctx, chand->combiner); } /************************************************************************* @@ -615,8 +617,6 @@ typedef struct client_channel_call_data { grpc_subchannel_call */ gpr_atm subchannel_call; - gpr_mu mu; - subchannel_creation_phase creation_phase; grpc_connected_subchannel *connected_subchannel; grpc_polling_entity *pollent; @@ -701,12 +701,11 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { GRPC_ERROR_NONE); } -static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { grpc_call_element *elem = arg; call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; - gpr_mu_lock(&calld->mu); GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, @@ -742,7 +741,6 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, (gpr_atm)(uintptr_t)subchannel_call); retry_waiting_locked(exec_ctx, calld); } - gpr_mu_unlock(&calld->mu); GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); } @@ -768,37 +766,35 @@ typedef struct { /** Return true if subchannel is available immediately (in which case on_ready should not be called), or false otherwise (in which case on_ready should be called when the subchannel is available). */ -static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *initial_metadata, - uint32_t initial_metadata_flags, - grpc_connected_subchannel **connected_subchannel, - grpc_closure *on_ready, grpc_error *error); - -static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static bool pick_subchannel_locked( + grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, + grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready, + grpc_error *error); + +static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { continue_picking_args *cpa = arg; if (cpa->connected_subchannel == NULL) { /* cancelled, do nothing */ } else if (error != GRPC_ERROR_NONE) { grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error)); } else { - call_data *calld = cpa->elem->call_data; - gpr_mu_lock(&calld->mu); - if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, - cpa->initial_metadata_flags, cpa->connected_subchannel, - cpa->on_ready, GRPC_ERROR_NONE)) { + if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata, + cpa->initial_metadata_flags, + cpa->connected_subchannel, cpa->on_ready, + GRPC_ERROR_NONE)) { grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE); } - gpr_mu_unlock(&calld->mu); } gpr_free(cpa); } -static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *initial_metadata, - uint32_t initial_metadata_flags, - grpc_connected_subchannel **connected_subchannel, - grpc_closure *on_ready, grpc_error *error) { +static bool pick_subchannel_locked( + grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, + grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready, + grpc_error *error) { GPR_TIMER_BEGIN("pick_subchannel", 0); channel_data *chand = elem->channel_data; @@ -808,7 +804,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, GPR_ASSERT(connected_subchannel); - gpr_mu_lock(&chand->mu); if (initial_metadata == NULL) { if (chand->lb_policy != NULL) { grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, @@ -824,7 +819,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); } } - gpr_mu_unlock(&chand->mu); GPR_TIMER_END("pick_subchannel", 0); GRPC_ERROR_UNREF(error); return true; @@ -833,7 +827,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, if (chand->lb_policy != NULL) { grpc_lb_policy *lb_policy = chand->lb_policy; GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); - gpr_mu_unlock(&chand->mu); // If the application explicitly set wait_for_ready, use that. // Otherwise, if the service config specified a value for this // method, use that. @@ -872,59 +865,37 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, cpa->connected_subchannel = connected_subchannel; cpa->on_ready = on_ready; cpa->elem = elem; - grpc_closure_init(&cpa->closure, continue_picking, cpa, - grpc_schedule_on_exec_ctx); + grpc_closure_init(&cpa->closure, continue_picking_locked, cpa, + grpc_combiner_scheduler(chand->combiner, true)); grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, GRPC_ERROR_NONE); } else { grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected")); } - gpr_mu_unlock(&chand->mu); GPR_TIMER_END("pick_subchannel", 0); return false; } -// The logic here is fairly complicated, due to (a) the fact that we -// need to handle the case where we receive the send op before the -// initial metadata op, and (b) the need for efficiency, especially in -// the streaming case. -// TODO(ctiller): Explain this more thoroughly. -static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op *op) { +static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error_ignored) { + grpc_transport_stream_op *op = arg; + grpc_call_element *elem = op->transport_private.args[0]; call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; - GRPC_CALL_LOG_OP(GPR_INFO, elem, op); - grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); - /* try to (atomically) get the call */ - grpc_subchannel_call *call = GET_CALL(calld); - GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); - if (call == CANCELLED_CALL) { - grpc_transport_stream_op_finish_with_failure( - exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); - GPR_TIMER_END("cc_start_transport_stream_op", 0); - return; - } - if (call != NULL) { - grpc_subchannel_call_process_op(exec_ctx, call, op); - GPR_TIMER_END("cc_start_transport_stream_op", 0); - return; - } - /* we failed; lock and figure out what to do */ - gpr_mu_lock(&calld->mu); + grpc_subchannel_call *call; + retry: /* need to recheck that another thread hasn't set the call */ call = GET_CALL(calld); if (call == CANCELLED_CALL) { - gpr_mu_unlock(&calld->mu); grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); GPR_TIMER_END("cc_start_transport_stream_op", 0); return; } if (call != NULL) { - gpr_mu_unlock(&calld->mu); grpc_subchannel_call_process_op(exec_ctx, call, op); GPR_TIMER_END("cc_start_transport_stream_op", 0); return; @@ -946,11 +917,11 @@ retry: fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error)); break; case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL: - pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel, - NULL, GRPC_ERROR_REF(op->cancel_error)); + pick_subchannel_locked(exec_ctx, elem, NULL, 0, + &calld->connected_subchannel, NULL, + GRPC_ERROR_REF(op->cancel_error)); break; } - gpr_mu_unlock(&calld->mu); grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); GPR_TIMER_END("cc_start_transport_stream_op", 0); @@ -962,16 +933,16 @@ retry: calld->connected_subchannel == NULL && op->send_initial_metadata != NULL) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; - grpc_closure_init(&calld->next_step, subchannel_ready, elem, - grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem, + grpc_combiner_scheduler(chand->combiner, true)); GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); /* If a subchannel is not available immediately, the polling entity from call_data should be provided to channel_data's interested_parties, so that IO of the lb_policy and resolver could be done under it. */ - if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, - op->send_initial_metadata_flags, - &calld->connected_subchannel, &calld->next_step, - GRPC_ERROR_NONE)) { + if (pick_subchannel_locked(exec_ctx, elem, op->send_initial_metadata, + op->send_initial_metadata_flags, + &calld->connected_subchannel, &calld->next_step, + GRPC_ERROR_NONE)) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); } else { @@ -998,27 +969,60 @@ retry: } /* nothing to be done but wait */ add_waiting_locked(calld, op); - gpr_mu_unlock(&calld->mu); GPR_TIMER_END("cc_start_transport_stream_op", 0); } +// The logic here is fairly complicated, due to (a) the fact that we +// need to handle the case where we receive the send op before the +// initial metadata op, and (b) the need for efficiency, especially in +// the streaming case. +// TODO(ctiller): Explain this more thoroughly. +static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_transport_stream_op *op) { + call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; + GRPC_CALL_LOG_OP(GPR_INFO, elem, op); + grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); + /* try to (atomically) get the call */ + grpc_subchannel_call *call = GET_CALL(calld); + GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); + if (call == CANCELLED_CALL) { + grpc_transport_stream_op_finish_with_failure( + exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); + GPR_TIMER_END("cc_start_transport_stream_op", 0); + return; + } + if (call != NULL) { + grpc_subchannel_call_process_op(exec_ctx, call, op); + GPR_TIMER_END("cc_start_transport_stream_op", 0); + return; + } + /* we failed; lock and figure out what to do */ + op->transport_private.args[0] = elem; + grpc_closure_sched( + exec_ctx, + grpc_closure_init(&op->transport_private.closure, + cc_start_transport_stream_op_locked, op, + grpc_combiner_scheduler(chand->combiner, false)), + GRPC_ERROR_NONE); +} + // Gets data from the service config. Invoked when the resolver returns // its initial result. -static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { grpc_call_element *elem = arg; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; // If this is an error, there's no point in looking at the service config. if (error == GRPC_ERROR_NONE) { // Get the method config table from channel data. - gpr_mu_lock(&chand->mu); grpc_slice_hash_table *method_params_table = NULL; if (chand->method_params_table != NULL) { method_params_table = grpc_slice_hash_table_ref(chand->method_params_table); } - gpr_mu_unlock(&chand->mu); // If the method config table was present, use it. if (method_params_table != NULL) { const method_parameters *method_params = grpc_method_config_table_get( @@ -1028,7 +1032,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0; if (have_method_timeout || method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { - gpr_mu_lock(&calld->mu); if (have_method_timeout) { const gpr_timespec per_method_deadline = gpr_time_add(calld->call_start_time, method_params->timeout); @@ -1042,7 +1045,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, calld->wait_for_ready_from_service_config = method_params->wait_for_ready; } - gpr_mu_unlock(&calld->mu); } } grpc_slice_hash_table_unref(exec_ctx, method_params_table); @@ -1051,43 +1053,25 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config"); } -/* Constructor for call_data */ -static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_call_element_args *args) { +static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error_ignored) { + grpc_call_element *elem = arg; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - // Initialize data members. - grpc_deadline_state_init(exec_ctx, elem, args->call_stack); - calld->path = grpc_slice_ref_internal(args->path); - calld->call_start_time = args->start_time; - calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); - calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET; - calld->cancel_error = GRPC_ERROR_NONE; - gpr_atm_rel_store(&calld->subchannel_call, 0); - gpr_mu_init(&calld->mu); - calld->connected_subchannel = NULL; - calld->waiting_ops = NULL; - calld->waiting_ops_count = 0; - calld->waiting_ops_capacity = 0; - calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; - calld->owning_call = args->call_stack; - calld->pollent = NULL; // If the resolver has already returned results, then we can access // the service config parameters immediately. Otherwise, we need to // defer that work until the resolver returns an initial result. // TODO(roth): This code is almost but not quite identical to the code // in read_service_config() above. It would be nice to find a way to // combine them, to avoid having to maintain it twice. - gpr_mu_lock(&chand->mu); if (chand->lb_policy != NULL) { // We already have a resolver result, so check for service config. if (chand->method_params_table != NULL) { grpc_slice_hash_table *method_params_table = grpc_slice_hash_table_ref(chand->method_params_table); - gpr_mu_unlock(&chand->mu); method_parameters *method_params = grpc_method_config_table_get( - exec_ctx, method_params_table, args->path); + exec_ctx, method_params_table, calld->path); if (method_params != NULL) { if (gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) { @@ -1101,24 +1085,50 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, } } grpc_slice_hash_table_unref(exec_ctx, method_params_table); - } else { - gpr_mu_unlock(&chand->mu); } } else { // We don't yet have a resolver result, so register a callback to // get the service config data once the resolver returns. // Take a reference to the call stack to be owned by the callback. GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); - grpc_closure_init(&calld->read_service_config, read_service_config, elem, - grpc_schedule_on_exec_ctx); + grpc_closure_init(&calld->read_service_config, read_service_config_locked, + elem, grpc_combiner_scheduler(chand->combiner, false)); grpc_closure_list_append(&chand->waiting_for_config_closures, &calld->read_service_config, GRPC_ERROR_NONE); - gpr_mu_unlock(&chand->mu); } // Start the deadline timer with the current deadline value. If we // do not yet have service config data, then the timer may be reset // later. grpc_deadline_state_start(exec_ctx, elem, calld->deadline); +} + +/* Constructor for call_data */ +static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_call_element_args *args) { + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + // Initialize data members. + grpc_deadline_state_init(exec_ctx, elem, args->call_stack); + calld->path = grpc_slice_ref_internal(args->path); + calld->call_start_time = args->start_time; + calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); + calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET; + calld->cancel_error = GRPC_ERROR_NONE; + gpr_atm_rel_store(&calld->subchannel_call, 0); + calld->connected_subchannel = NULL; + calld->waiting_ops = NULL; + calld->waiting_ops_count = 0; + calld->waiting_ops_capacity = 0; + calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; + calld->owning_call = args->call_stack; + calld->pollent = NULL; + grpc_closure_sched( + exec_ctx, + grpc_closure_init(&calld->read_service_config, + initial_read_service_config_locked, elem, + grpc_combiner_scheduler(chand->combiner, false)), + GRPC_ERROR_NONE); return GRPC_ERROR_NONE; } @@ -1136,7 +1146,6 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); } GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING); - gpr_mu_destroy(&calld->mu); GPR_ASSERT(calld->waiting_ops_count == 0); if (calld->connected_subchannel != NULL) { GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel, -- cgit v1.2.3 From 613dafa60ce3a22a5d7f1351b8054a3090b9deb1 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 12:00:43 -0800 Subject: Convert connectivity_state, channel info into a combiner-compatible form --- src/core/ext/client_channel/client_channel.c | 100 ++++++++++++++--------- src/core/ext/client_channel/subchannel.c | 2 +- src/core/ext/lb_policy/grpclb/grpclb.c | 9 +- src/core/ext/lb_policy/pick_first/pick_first.c | 2 +- src/core/ext/lb_policy/round_robin/round_robin.c | 2 +- src/core/lib/transport/connectivity_state.c | 42 +++++++--- src/core/lib/transport/connectivity_state.h | 20 +++-- test/core/transport/connectivity_state_test.c | 3 +- 8 files changed, 117 insertions(+), 63 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 2595acd8c4..58504de87d 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -164,10 +164,7 @@ typedef struct client_channel_channel_data { /** combiner protecting all variables below in this data structure */ grpc_combiner *combiner; /** currently active load balancer */ - char *lb_policy_name; grpc_lb_policy *lb_policy; - /** service config in JSON form */ - char *service_config_json; /** maps method names to method_parameters structs */ grpc_slice_hash_table *method_params_table; /** incoming resolver result - set by resolver.next() */ @@ -184,6 +181,13 @@ typedef struct client_channel_channel_data { grpc_channel_stack *owning_stack; /** interested parties (owned) */ grpc_pollset_set *interested_parties; + + /* the following properties are guarded by a mutex since API's require them + to be instantaniously available */ + gpr_mu info_mu; + char *info_lb_policy_name; + /** service config in JSON form */ + char *info_service_config_json; } channel_data; /** We create one watcher for each new lb_policy that is returned from a @@ -345,16 +349,18 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, chand->interested_parties); } + gpr_mu_lock(&chand->info_mu); if (lb_policy_name != NULL) { - gpr_free(chand->lb_policy_name); - chand->lb_policy_name = lb_policy_name; + gpr_free(chand->info_lb_policy_name); + chand->info_lb_policy_name = lb_policy_name; } old_lb_policy = chand->lb_policy; chand->lb_policy = lb_policy; if (service_config_json != NULL) { - gpr_free(chand->service_config_json); - chand->service_config_json = service_config_json; + gpr_free(chand->info_service_config_json); + chand->info_service_config_json = service_config_json; } + gpr_mu_unlock(&chand->info_mu); if (chand->method_params_table != NULL) { grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); } @@ -491,18 +497,19 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, const grpc_channel_info *info) { channel_data *chand = elem->channel_data; - gpr_mu_lock(&chand->mu); + gpr_mu_lock(&chand->info_mu); if (info->lb_policy_name != NULL) { - *info->lb_policy_name = chand->lb_policy_name == NULL + *info->lb_policy_name = chand->info_lb_policy_name == NULL ? NULL - : gpr_strdup(chand->lb_policy_name); + : gpr_strdup(chand->info_lb_policy_name); } if (info->service_config_json != NULL) { - *info->service_config_json = chand->service_config_json == NULL - ? NULL - : gpr_strdup(chand->service_config_json); + *info->service_config_json = + chand->info_service_config_json == NULL + ? NULL + : gpr_strdup(chand->info_service_config_json); } - gpr_mu_unlock(&chand->mu); + gpr_mu_unlock(&chand->info_mu); } /* Constructor for channel_data */ @@ -567,8 +574,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, chand->interested_parties); GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); } - gpr_free(chand->lb_policy_name); - gpr_free(chand->service_config_json); + gpr_free(chand->info_lb_policy_name); + gpr_free(chand->info_service_config_json); if (chand->method_params_table != NULL) { grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); } @@ -1181,26 +1188,34 @@ const grpc_channel_filter grpc_client_channel_filter = { "client-channel", }; +static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error_ignored) { + channel_data *chand = arg; + if (chand->lb_policy != NULL) { + grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy); + } else { + chand->exit_idle_when_lb_policy_arrives = true; + if (!chand->started_resolving && chand->resolver != NULL) { + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); + chand->started_resolving = true; + grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, + &chand->on_resolver_result_changed); + } + } +} + grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { channel_data *chand = elem->channel_data; grpc_connectivity_state out; - gpr_mu_lock(&chand->mu); - out = grpc_connectivity_state_check(&chand->state_tracker, NULL); + out = grpc_connectivity_state_check(&chand->state_tracker); if (out == GRPC_CHANNEL_IDLE && try_to_connect) { - if (chand->lb_policy != NULL) { - grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy); - } else { - chand->exit_idle_when_lb_policy_arrives = true; - if (!chand->started_resolving && chand->resolver != NULL) { - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); - chand->started_resolving = true; - grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, - &chand->on_resolver_result_changed); - } - } + grpc_closure_sched( + exec_ctx, + grpc_closure_create(try_to_connect_locked, chand, + grpc_combiner_scheduler(chand->combiner, false)), + GRPC_ERROR_NONE); } - gpr_mu_unlock(&chand->mu); return out; } @@ -1208,6 +1223,7 @@ typedef struct { channel_data *chand; grpc_pollset *pollset; grpc_closure *on_complete; + grpc_connectivity_state *state; grpc_closure my_closure; } external_connectivity_watcher; @@ -1220,7 +1236,17 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg, GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "external_connectivity_watcher"); gpr_free(w); - follow_up->cb(exec_ctx, follow_up->cb_arg, error); + grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error)); +} + +static void cc_watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error_ignored) { + external_connectivity_watcher *w = arg; + grpc_closure_init(&w->my_closure, on_external_watch_complete, w, + grpc_schedule_on_exec_ctx); + grpc_connectivity_state_notify_on_state_change( + exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure); } void grpc_client_channel_watch_connectivity_state( @@ -1231,13 +1257,13 @@ void grpc_client_channel_watch_connectivity_state( w->chand = chand; w->pollset = pollset; w->on_complete = on_complete; + w->state = state; grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); - grpc_closure_init(&w->my_closure, on_external_watch_complete, w, - grpc_schedule_on_exec_ctx); GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, "external_connectivity_watcher"); - gpr_mu_lock(&chand->mu); - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &chand->state_tracker, state, &w->my_closure); - gpr_mu_unlock(&chand->mu); + grpc_closure_sched( + exec_ctx, + grpc_closure_init(&w->my_closure, cc_watch_connectivity_state_locked, w, + grpc_combiner_scheduler(chand->combiner, true)), + GRPC_ERROR_NONE); } diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c index aa036e883b..c37134fd5e 100644 --- a/src/core/ext/client_channel/subchannel.c +++ b/src/core/ext/client_channel/subchannel.c @@ -419,7 +419,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c, grpc_error **error) { grpc_connectivity_state state; gpr_mu_lock(&c->mu); - state = grpc_connectivity_state_check(&c->state_tracker, error); + state = grpc_connectivity_state_get(&c->state_tracker, error); gpr_mu_unlock(&c->mu); return state; } diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index ab62e5ed6a..8a2af48328 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -492,9 +492,8 @@ static grpc_lb_addresses *process_serverlist_locked( static bool update_lb_connectivity_status_locked( grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) { - grpc_error *curr_state_error; - const grpc_connectivity_state curr_glb_state = grpc_connectivity_state_check( - &glb_policy->state_tracker, &curr_state_error); + const grpc_connectivity_state curr_glb_state = + grpc_connectivity_state_check(&glb_policy->state_tracker); /* The new connectivity status is a function of the previous one and the new * input coming from the status of the RR policy. @@ -1098,8 +1097,8 @@ static grpc_connectivity_state glb_check_connectivity( glb_lb_policy *glb_policy = (glb_lb_policy *)pol; grpc_connectivity_state st; gpr_mu_lock(&glb_policy->mu); - st = grpc_connectivity_state_check(&glb_policy->state_tracker, - connectivity_error); + st = grpc_connectivity_state_get(&glb_policy->state_tracker, + connectivity_error); gpr_mu_unlock(&glb_policy->mu); return st; } diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index 9f2aa461be..1b965183f6 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -398,7 +398,7 @@ static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p = (pick_first_lb_policy *)pol; grpc_connectivity_state st; gpr_mu_lock(&p->mu); - st = grpc_connectivity_state_check(&p->state_tracker, error); + st = grpc_connectivity_state_get(&p->state_tracker, error); gpr_mu_unlock(&p->mu); return st; } diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 3e060d189a..63e3d033ad 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -655,7 +655,7 @@ static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p = (round_robin_lb_policy *)pol; grpc_connectivity_state st; gpr_mu_lock(&p->mu); - st = grpc_connectivity_state_check(&p->state_tracker, error); + st = grpc_connectivity_state_get(&p->state_tracker, error); gpr_mu_unlock(&p->mu); return st; } diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c index 8fc5bf3e9a..e884567240 100644 --- a/src/core/lib/transport/connectivity_state.c +++ b/src/core/lib/transport/connectivity_state.c @@ -62,7 +62,7 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) { void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, grpc_connectivity_state init_state, const char *name) { - tracker->current_state = init_state; + gpr_atm_no_barrier_store(&tracker->current_state_atm, init_state); tracker->current_error = GRPC_ERROR_NONE; tracker->watchers = NULL; tracker->name = gpr_strdup(name); @@ -89,15 +89,29 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, } grpc_connectivity_state grpc_connectivity_state_check( + grpc_connectivity_state_tracker *tracker) { + grpc_connectivity_state cur = + (grpc_connectivity_state)gpr_atm_no_barrier_load( + &tracker->current_state_atm); + if (grpc_connectivity_state_trace) { + gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, + grpc_connectivity_state_name(cur)); + } + return cur; +} + +grpc_connectivity_state grpc_connectivity_state_get( grpc_connectivity_state_tracker *tracker, grpc_error **error) { + grpc_connectivity_state cur =(grpc_connectivity_state) + gpr_atm_no_barrier_load(&tracker->current_state_atm); if (grpc_connectivity_state_trace) { gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, - grpc_connectivity_state_name(tracker->current_state)); + grpc_connectivity_state_name(cur)); } if (error != NULL) { *error = GRPC_ERROR_REF(tracker->current_error); } - return tracker->current_state; + return cur; } bool grpc_connectivity_state_has_watchers( @@ -108,6 +122,8 @@ bool grpc_connectivity_state_has_watchers( bool grpc_connectivity_state_notify_on_state_change( grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current, grpc_closure *notify) { + grpc_connectivity_state cur =(grpc_connectivity_state) + gpr_atm_no_barrier_load(&tracker->current_state_atm); if (grpc_connectivity_state_trace) { if (current == NULL) { gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker, @@ -115,7 +131,7 @@ bool grpc_connectivity_state_notify_on_state_change( } else { gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker, tracker->name, grpc_connectivity_state_name(*current), - grpc_connectivity_state_name(tracker->current_state), notify); + grpc_connectivity_state_name(cur), notify); } } if (current == NULL) { @@ -138,8 +154,8 @@ bool grpc_connectivity_state_notify_on_state_change( } return false; } else { - if (tracker->current_state != *current) { - *current = tracker->current_state; + if (cur != *current) { + *current = cur; grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_REF(tracker->current_error)); } else { @@ -149,7 +165,7 @@ bool grpc_connectivity_state_notify_on_state_change( w->next = tracker->watchers; tracker->watchers = w; } - return tracker->current_state == GRPC_CHANNEL_IDLE; + return cur == GRPC_CHANNEL_IDLE; } } @@ -157,11 +173,13 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state, grpc_error *error, const char *reason) { + grpc_connectivity_state cur =(grpc_connectivity_state) + gpr_atm_no_barrier_load(&tracker->current_state_atm); grpc_connectivity_state_watcher *w; if (grpc_connectivity_state_trace) { const char *error_string = grpc_error_string(error); gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker, - tracker->name, grpc_connectivity_state_name(tracker->current_state), + tracker->name, grpc_connectivity_state_name(cur), grpc_connectivity_state_name(state), reason, error, error_string); } switch (state) { @@ -178,13 +196,13 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, } GRPC_ERROR_UNREF(tracker->current_error); tracker->current_error = error; - if (tracker->current_state == state) { + if (cur == state) { return; } - GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_SHUTDOWN); - tracker->current_state = state; + GPR_ASSERT(cur != GRPC_CHANNEL_SHUTDOWN); + gpr_atm_no_barrier_store(&tracker->current_state_atm, state); while ((w = tracker->watchers) != NULL) { - *w->current = tracker->current_state; + *w->current = state; tracker->watchers = w->next; if (grpc_connectivity_state_trace) { gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h index 769c675b79..c9604c34dd 100644 --- a/src/core/lib/transport/connectivity_state.h +++ b/src/core/lib/transport/connectivity_state.h @@ -47,8 +47,8 @@ typedef struct grpc_connectivity_state_watcher { } grpc_connectivity_state_watcher; typedef struct { - /** current connectivity state */ - grpc_connectivity_state current_state; + /** current grpc_connectivity_state */ + gpr_atm current_state_atm; /** error associated with state */ grpc_error *current_error; /** all our watchers */ @@ -59,6 +59,7 @@ typedef struct { extern int grpc_connectivity_state_trace; +/** enum --> string conversion */ const char *grpc_connectivity_state_name(grpc_connectivity_state state); void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, @@ -68,22 +69,31 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker); /** Set connectivity state; not thread safe; access must be serialized with an - * external lock */ + * external lock */ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state, grpc_error *associated_error, const char *reason); +/** Return true if this connectivity state has watchers. + Access must be serialized with an external lock. */ bool grpc_connectivity_state_has_watchers( grpc_connectivity_state_tracker *tracker); +/** Return the last seen connectivity state. No need to synchronize access. */ grpc_connectivity_state grpc_connectivity_state_check( - grpc_connectivity_state_tracker *tracker, grpc_error **current_error); + grpc_connectivity_state_tracker *tracker); + +/** Return the last seen connectivity state, and the associated error. + Access must be serialized with an external lock. */ +grpc_connectivity_state grpc_connectivity_state_get( + grpc_connectivity_state_tracker *tracker, grpc_error **error); /** Return 1 if the channel should start connecting, 0 otherwise. If current==NULL cancel notify if it is already queued (success==0 in that - case) */ + case). + Access must be serialized with an external lock. */ bool grpc_connectivity_state_notify_on_state_change( grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current, grpc_closure *notify); diff --git a/test/core/transport/connectivity_state_test.c b/test/core/transport/connectivity_state_test.c index 3520ef0a80..8314a5f619 100644 --- a/test/core/transport/connectivity_state_test.c +++ b/test/core/transport/connectivity_state_test.c @@ -77,8 +77,9 @@ static void test_check(void) { grpc_error *error; gpr_log(GPR_DEBUG, "test_check"); grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_IDLE, "xxx"); - GPR_ASSERT(grpc_connectivity_state_check(&tracker, &error) == + GPR_ASSERT(grpc_connectivity_state_get(&tracker, &error) == GRPC_CHANNEL_IDLE); + GPR_ASSERT(grpc_connectivity_state_check(&tracker) == GRPC_CHANNEL_IDLE); GPR_ASSERT(error == GRPC_ERROR_NONE); grpc_connectivity_state_destroy(&exec_ctx, &tracker); grpc_exec_ctx_finish(&exec_ctx); -- cgit v1.2.3 From d85477515230c5161659175cbc60b684109aedbf Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 12:02:39 -0800 Subject: Init mutex --- src/core/ext/client_channel/client_channel.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 58504de87d..a7dd967a51 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -522,6 +522,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, GPR_ASSERT(elem->filter == &grpc_client_channel_filter); // Initialize data members. chand->combiner = grpc_combiner_create(NULL); + gpr_mu_init(&chand->info_mu); chand->owning_stack = args->channel_stack; grpc_closure_init(&chand->on_resolver_result_changed, on_resolver_result_changed_locked, chand, @@ -582,6 +583,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); grpc_pollset_set_destroy(chand->interested_parties); grpc_combiner_destroy(exec_ctx, chand->combiner); + gpr_mu_destroy(&chand->info_mu); } /************************************************************************* -- cgit v1.2.3 From d2e5cfc5bbe844ece808a843314458b798368748 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 13:02:20 -0800 Subject: Start fixing refcounting --- src/core/ext/client_channel/client_channel.c | 48 +++++++++++----------------- 1 file changed, 19 insertions(+), 29 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index a7dd967a51..282913431f 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -469,6 +469,9 @@ static void cc_start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, } GRPC_ERROR_UNREF(op->disconnect_with_error); } + GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op"); + + grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); } static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, @@ -476,8 +479,6 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport_op *op) { channel_data *chand = elem->channel_data; - grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); - GPR_ASSERT(op->set_accept_stream == false); if (op->bind_pollset != NULL) { grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, @@ -485,6 +486,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, } op->transport_private.args[0] = elem; + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); grpc_closure_sched( exec_ctx, grpc_closure_init(&op->transport_private.closure, @@ -670,44 +672,24 @@ static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld, GRPC_ERROR_UNREF(error); } -typedef struct { - grpc_transport_stream_op **ops; - size_t nops; - grpc_subchannel_call *call; -} retry_ops_args; - -static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { - retry_ops_args *a = args; - size_t i; - for (i = 0; i < a->nops; i++) { - grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]); - } - GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops"); - gpr_free(a->ops); - gpr_free(a); -} - static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { if (calld->waiting_ops_count == 0) { return; } - retry_ops_args *a = gpr_malloc(sizeof(*a)); - a->ops = calld->waiting_ops; - a->nops = calld->waiting_ops_count; - a->call = GET_CALL(calld); - if (a->call == CANCELLED_CALL) { - gpr_free(a); + grpc_subchannel_call *call = GET_CALL(calld); + grpc_transport_stream_op **ops = calld->waiting_ops; + size_t nops = calld->waiting_ops_count; + if (call == CANCELLED_CALL) { fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED); return; } calld->waiting_ops = NULL; calld->waiting_ops_count = 0; calld->waiting_ops_capacity = 0; - GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops"); - grpc_closure_sched( - exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + for (size_t i = 0; i < nops; i++) { + grpc_subchannel_call_process_op(exec_ctx, call, ops[i]); + } } static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, @@ -978,6 +960,8 @@ retry: } /* nothing to be done but wait */ add_waiting_locked(calld, op); + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, + "start_transport_stream_op"); GPR_TIMER_END("cc_start_transport_stream_op", 0); } @@ -1008,6 +992,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, return; } /* we failed; lock and figure out what to do */ + GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op"); op->transport_private.args[0] = elem; grpc_closure_sched( exec_ctx, @@ -1109,6 +1094,8 @@ static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx, // do not yet have service config data, then the timer may be reset // later. grpc_deadline_state_start(exec_ctx, elem, calld->deadline); + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, + "initial_read_service_config"); } /* Constructor for call_data */ @@ -1132,6 +1119,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; calld->owning_call = args->call_stack; calld->pollent = NULL; + GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config"); grpc_closure_sched( exec_ctx, grpc_closure_init(&calld->read_service_config, @@ -1204,6 +1192,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg, &chand->on_resolver_result_changed); } } + GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect"); } grpc_connectivity_state grpc_client_channel_check_connectivity_state( @@ -1212,6 +1201,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_connectivity_state out; out = grpc_connectivity_state_check(&chand->state_tracker); if (out == GRPC_CHANNEL_IDLE && try_to_connect) { + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); grpc_closure_sched( exec_ctx, grpc_closure_create(try_to_connect_locked, chand, -- cgit v1.2.3 From 9efea88d33132964b3eadcbdc43bd2bba38517ad Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 13:06:52 -0800 Subject: Fix waiting_ops leak --- src/core/ext/client_channel/client_channel.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 282913431f..4bde9ec805 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -690,6 +690,7 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { for (size_t i = 0; i < nops; i++) { grpc_subchannel_call_process_op(exec_ctx, call, ops[i]); } + gpr_free(ops); } static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, -- cgit v1.2.3 From 2b56dcc49ee44a9c30cb8397c0d0fdf0d42a5a65 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 13:13:05 -0800 Subject: Fix leak in client_channel --- src/core/ext/client_channel/client_channel.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 4bde9ec805..e62fdf3adb 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -884,13 +884,11 @@ retry: if (call == CANCELLED_CALL) { grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); - GPR_TIMER_END("cc_start_transport_stream_op", 0); - return; + goto done; } if (call != NULL) { grpc_subchannel_call_process_op(exec_ctx, call, op); - GPR_TIMER_END("cc_start_transport_stream_op", 0); - return; + goto done; } /* if this is a cancellation, then we can raise our cancelled flag */ if (op->cancel_error != GRPC_ERROR_NONE) { @@ -916,8 +914,7 @@ retry: } grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); - GPR_TIMER_END("cc_start_transport_stream_op", 0); - return; + goto done; } } /* if we don't have a subchannel, try to get one */ @@ -961,6 +958,7 @@ retry: } /* nothing to be done but wait */ add_waiting_locked(calld, op); +done: GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "start_transport_stream_op"); GPR_TIMER_END("cc_start_transport_stream_op", 0); -- cgit v1.2.3 From c5de83531ee88919da57cc37dbc8a2725c7b94ca Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 14:08:05 -0800 Subject: Fix leaks --- src/core/ext/client_channel/client_channel.c | 26 +++++++++++++------------- src/core/lib/transport/connectivity_state.c | 15 +++++++++------ 2 files changed, 22 insertions(+), 19 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index e62fdf3adb..907d81866d 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -226,19 +226,19 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { lb_policy_connectivity_watcher *w = arg; grpc_connectivity_state publish_state = w->state; - /* check if the notification is for a stale policy */ - if (w->lb_policy != w->chand->lb_policy) return; - - if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { - publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; - grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver); - GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); - w->chand->lb_policy = NULL; - } - set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, - GRPC_ERROR_REF(error), "lb_changed"); - if (w->state != GRPC_CHANNEL_SHUTDOWN) { - watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); + /* check if the notification is for the latest policy */ + if (w->lb_policy == w->chand->lb_policy) { + if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { + publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; + grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver); + GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); + w->chand->lb_policy = NULL; + } + set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, + GRPC_ERROR_REF(error), "lb_changed"); + if (w->state != GRPC_CHANNEL_SHUTDOWN) { + watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); + } } GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c index e884567240..afe1f6164d 100644 --- a/src/core/lib/transport/connectivity_state.c +++ b/src/core/lib/transport/connectivity_state.c @@ -102,8 +102,9 @@ grpc_connectivity_state grpc_connectivity_state_check( grpc_connectivity_state grpc_connectivity_state_get( grpc_connectivity_state_tracker *tracker, grpc_error **error) { - grpc_connectivity_state cur =(grpc_connectivity_state) - gpr_atm_no_barrier_load(&tracker->current_state_atm); + grpc_connectivity_state cur = + (grpc_connectivity_state)gpr_atm_no_barrier_load( + &tracker->current_state_atm); if (grpc_connectivity_state_trace) { gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, grpc_connectivity_state_name(cur)); @@ -122,8 +123,9 @@ bool grpc_connectivity_state_has_watchers( bool grpc_connectivity_state_notify_on_state_change( grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state *current, grpc_closure *notify) { - grpc_connectivity_state cur =(grpc_connectivity_state) - gpr_atm_no_barrier_load(&tracker->current_state_atm); + grpc_connectivity_state cur = + (grpc_connectivity_state)gpr_atm_no_barrier_load( + &tracker->current_state_atm); if (grpc_connectivity_state_trace) { if (current == NULL) { gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker, @@ -173,8 +175,9 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, grpc_connectivity_state state, grpc_error *error, const char *reason) { - grpc_connectivity_state cur =(grpc_connectivity_state) - gpr_atm_no_barrier_load(&tracker->current_state_atm); + grpc_connectivity_state cur = + (grpc_connectivity_state)gpr_atm_no_barrier_load( + &tracker->current_state_atm); grpc_connectivity_state_watcher *w; if (grpc_connectivity_state_trace) { const char *error_string = grpc_error_string(error); -- cgit v1.2.3 From f1021678e4de35babec7b09e64a3f1ce2750a841 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 9 Feb 2017 21:29:50 -0800 Subject: Merge fixup --- src/core/ext/client_channel/client_channel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 907d81866d..43f7e619cf 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -584,7 +584,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, } grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); grpc_pollset_set_destroy(chand->interested_parties); - grpc_combiner_destroy(exec_ctx, chand->combiner); + GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel"); gpr_mu_destroy(&chand->info_mu); } -- cgit v1.2.3 From 4a84bdda7f42e303c570e2ee73cc14c57df6c81b Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 09:48:41 -0800 Subject: s/transport_private/handler_private -- use is no longer restricted to transports --- src/core/ext/client_channel/client_channel.c | 6 +++--- src/core/ext/transport/chttp2/transport/chttp2_transport.c | 10 +++++----- src/core/lib/transport/transport.h | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 43f7e619cf..49ea7f06be 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -873,7 +873,7 @@ static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error_ignored) { grpc_transport_stream_op *op = arg; - grpc_call_element *elem = op->transport_private.args[0]; + grpc_call_element *elem = op->handler_private.args[0]; call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; grpc_subchannel_call *call; @@ -992,10 +992,10 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, } /* we failed; lock and figure out what to do */ GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op"); - op->transport_private.args[0] = elem; + op->handler_private.args[0] = elem; grpc_closure_sched( exec_ctx, - grpc_closure_init(&op->transport_private.closure, + grpc_closure_init(&op->handler_private.closure, cc_start_transport_stream_op_locked, op, grpc_combiner_scheduler(chand->combiner, false)), GRPC_ERROR_NONE); diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 3ee5e976f8..c00e64f033 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1033,8 +1033,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, GPR_TIMER_BEGIN("perform_stream_op_locked", 0); grpc_transport_stream_op *op = stream_op; - grpc_chttp2_transport *t = op->transport_private.args[0]; - grpc_chttp2_stream *s = op->transport_private.args[1]; + grpc_chttp2_transport *t = op->handler_private.args[0]; + grpc_chttp2_stream *s = op->handler_private.args[1]; if (grpc_http_trace) { char *str = grpc_transport_stream_op_string(op); @@ -1255,13 +1255,13 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, gpr_free(str); } - op->transport_private.args[0] = gt; - op->transport_private.args[1] = gs; + op->handler_private.args[0] = gt; + op->handler_private.args[1] = gs; GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); grpc_closure_sched( exec_ctx, grpc_closure_init( - &op->transport_private.closure, perform_stream_op_locked, op, + &op->handler_private.closure, perform_stream_op_locked, op, grpc_combiner_scheduler(t->combiner, op->covered_by_poller)), GRPC_ERROR_NONE); GPR_TIMER_END("perform_stream_op", 0); diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index 9a0abe1ca4..e56bf2780a 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -167,9 +167,9 @@ typedef struct grpc_transport_stream_op { /*************************************************************************** * remaining fields are initialized and used at the discretion of the - * transport implementation */ + * current handler of the op */ - grpc_transport_private_op_data transport_private; + grpc_transport_private_op_data handler_private; } grpc_transport_stream_op; /** Transport op: a set of operations to perform on a transport as a whole */ -- cgit v1.2.3 From a11bfc85e76dc1371b4c209259eb3449fd4b0dde Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 09:56:33 -0800 Subject: Simplify loop --- src/core/ext/client_channel/client_channel.c | 49 ++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 14 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 49ea7f06be..28ce224e61 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -869,32 +869,34 @@ static bool pick_subchannel_locked( return false; } -static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error_ignored) { - grpc_transport_stream_op *op = arg; - grpc_call_element *elem = op->handler_private.args[0]; - call_data *calld = elem->call_data; +static void cc_start_transport_stream_op_locked_inner( + grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op, + grpc_call_element *elem) { channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; grpc_subchannel_call *call; -retry: /* need to recheck that another thread hasn't set the call */ call = GET_CALL(calld); if (call == CANCELLED_CALL) { grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); - goto done; + /* early out */ + return; } if (call != NULL) { grpc_subchannel_call_process_op(exec_ctx, call, op); - goto done; + /* early out */ + return; } /* if this is a cancellation, then we can raise our cancelled flag */ if (op->cancel_error != GRPC_ERROR_NONE) { if (!gpr_atm_rel_cas(&calld->subchannel_call, 0, (gpr_atm)(uintptr_t)CANCELLED_CALL)) { - goto retry; + /* recurse to retry */ + cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + /* early out */ + return; } else { // Stash a copy of cancel_error in our call data, so that we can use // it for subsequent operations. This ensures that if the call is @@ -914,7 +916,8 @@ retry: } grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); - goto done; + /* early out */ + return; } } /* if we don't have a subchannel, try to get one */ @@ -954,14 +957,29 @@ retry: gpr_atm_rel_store(&calld->subchannel_call, (gpr_atm)(uintptr_t)subchannel_call); retry_waiting_locked(exec_ctx, calld); - goto retry; + /* recurse to retry */ + cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + /* early out */ + return; } /* nothing to be done but wait */ add_waiting_locked(calld, op); -done: +} + +static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error_ignored) { + GPR_TIMER_BEGIN("cc_start_transport_stream_op_locked", 0); + + grpc_transport_stream_op *op = arg; + grpc_call_element *elem = op->handler_private.args[0]; + call_data *calld = elem->call_data; + + cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "start_transport_stream_op"); - GPR_TIMER_END("cc_start_transport_stream_op", 0); + GPR_TIMER_END("cc_start_transport_stream_op_locked", 0); } // The logic here is fairly complicated, due to (a) the fact that we @@ -983,11 +1001,13 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_finish_with_failure( exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); GPR_TIMER_END("cc_start_transport_stream_op", 0); + /* early out */ return; } if (call != NULL) { grpc_subchannel_call_process_op(exec_ctx, call, op); GPR_TIMER_END("cc_start_transport_stream_op", 0); + /* early out */ return; } /* we failed; lock and figure out what to do */ @@ -999,6 +1019,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, cc_start_transport_stream_op_locked, op, grpc_combiner_scheduler(chand->combiner, false)), GRPC_ERROR_NONE); + GPR_TIMER_END("cc_start_transport_stream_op", 0); } // Gets data from the service config. Invoked when the resolver returns -- cgit v1.2.3 From be9691af99b438c72b64dd070cf0d45fac29792e Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 10:00:42 -0800 Subject: Add commentary --- src/core/ext/client_channel/client_channel.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 28ce224e61..e500187975 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -898,11 +898,11 @@ static void cc_start_transport_stream_op_locked_inner( /* early out */ return; } else { - // Stash a copy of cancel_error in our call data, so that we can use - // it for subsequent operations. This ensures that if the call is - // cancelled before any ops are passed down (e.g., if the deadline - // is in the past when the call starts), we can return the right - // error to the caller when the first op does get passed down. + /* Stash a copy of cancel_error in our call data, so that we can use + it for subsequent operations. This ensures that if the call is + cancelled before any ops are passed down (e.g., if the deadline + is in the past when the call starts), we can return the right + error to the caller when the first op does get passed down. */ calld->cancel_error = GRPC_ERROR_REF(op->cancel_error); switch (calld->creation_phase) { case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING: @@ -982,11 +982,14 @@ static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, GPR_TIMER_END("cc_start_transport_stream_op_locked", 0); } -// The logic here is fairly complicated, due to (a) the fact that we -// need to handle the case where we receive the send op before the -// initial metadata op, and (b) the need for efficiency, especially in -// the streaming case. -// TODO(ctiller): Explain this more thoroughly. +/* The logic here is fairly complicated, due to (a) the fact that we + need to handle the case where we receive the send op before the + initial metadata op, and (b) the need for efficiency, especially in + the streaming case. + + We use double-checked locking to initially see if initialization has been + performed. If it has not, we acquire the combiner and perform initialization. + If it has, we proceed on the fast path. */ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op *op) { -- cgit v1.2.3 From a8610c094d8bdf60188b9fd4c9934b2a9d3c0b56 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 10:05:11 -0800 Subject: Review nits --- src/core/ext/client_channel/client_channel.c | 34 +++++++++++++--------------- 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index e500187975..bedb13d974 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -421,8 +421,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(state_error); } -static void cc_start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { +static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error_ignored) { grpc_transport_op *op = arg; grpc_channel_element *elem = op->transport_private.args[0]; channel_data *chand = elem->channel_data; @@ -488,10 +488,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, op->transport_private.args[0] = elem; GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); grpc_closure_sched( - exec_ctx, - grpc_closure_init(&op->transport_private.closure, - cc_start_transport_op_locked, op, - grpc_combiner_scheduler(chand->combiner, false)), + exec_ctx, grpc_closure_init( + &op->transport_private.closure, start_transport_op_locked, + op, grpc_combiner_scheduler(chand->combiner, false)), GRPC_ERROR_NONE); } @@ -869,9 +868,9 @@ static bool pick_subchannel_locked( return false; } -static void cc_start_transport_stream_op_locked_inner( - grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op, - grpc_call_element *elem) { +static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx, + grpc_transport_stream_op *op, + grpc_call_element *elem) { channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; grpc_subchannel_call *call; @@ -894,7 +893,7 @@ static void cc_start_transport_stream_op_locked_inner( if (!gpr_atm_rel_cas(&calld->subchannel_call, 0, (gpr_atm)(uintptr_t)CANCELLED_CALL)) { /* recurse to retry */ - cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + start_transport_stream_op_locked_inner(exec_ctx, op, elem); /* early out */ return; } else { @@ -958,7 +957,7 @@ static void cc_start_transport_stream_op_locked_inner( (gpr_atm)(uintptr_t)subchannel_call); retry_waiting_locked(exec_ctx, calld); /* recurse to retry */ - cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + start_transport_stream_op_locked_inner(exec_ctx, op, elem); /* early out */ return; } @@ -975,7 +974,7 @@ static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem = op->handler_private.args[0]; call_data *calld = elem->call_data; - cc_start_transport_stream_op_locked_inner(exec_ctx, op, elem); + start_transport_stream_op_locked_inner(exec_ctx, op, elem); GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "start_transport_stream_op"); @@ -1221,8 +1220,8 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { channel_data *chand = elem->channel_data; - grpc_connectivity_state out; - out = grpc_connectivity_state_check(&chand->state_tracker); + grpc_connectivity_state out = + grpc_connectivity_state_check(&chand->state_tracker); if (out == GRPC_CHANNEL_IDLE && try_to_connect) { GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); grpc_closure_sched( @@ -1254,9 +1253,8 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error)); } -static void cc_watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error_ignored) { +static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error_ignored) { external_connectivity_watcher *w = arg; grpc_closure_init(&w->my_closure, on_external_watch_complete, w, grpc_schedule_on_exec_ctx); @@ -1278,7 +1276,7 @@ void grpc_client_channel_watch_connectivity_state( "external_connectivity_watcher"); grpc_closure_sched( exec_ctx, - grpc_closure_init(&w->my_closure, cc_watch_connectivity_state_locked, w, + grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w, grpc_combiner_scheduler(chand->combiner, true)), GRPC_ERROR_NONE); } -- cgit v1.2.3 From 225435972c10da4dfb561ed7b11743022187708a Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 10:29:36 -0800 Subject: Fix bug in poll polling engine causing pollsets to be deleted before they are removed from pollset_sets --- src/core/lib/iomgr/ev_poll_posix.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) (limited to 'src/core') diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index 21eb62753e..6bae99a344 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -191,6 +191,7 @@ struct grpc_pollset { int kicked_without_pollers; grpc_closure *shutdown_done; grpc_closure_list idle_jobs; + int pollset_set_count; /* all polled fds */ size_t fd_count; size_t fd_capacity; @@ -228,7 +229,7 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p, /* Return 1 if the pollset has active threads in pollset_work (pollset must * be locked) */ -static int pollset_has_workers(grpc_pollset *pollset); +static bool pollset_has_workers(grpc_pollset *pollset); /******************************************************************************* * pollset_set definitions @@ -658,10 +659,18 @@ static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { worker->next->prev = worker->prev; } -static int pollset_has_workers(grpc_pollset *p) { +static bool pollset_has_workers(grpc_pollset *p) { return p->root_worker.next != &p->root_worker; } +static bool pollset_in_pollset_sets(grpc_pollset *p) { + return p->pollset_set_count; +} + +static bool pollset_has_observers(grpc_pollset *p) { + return pollset_has_workers(p) || pollset_in_pollset_sets(p); +} + static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { if (pollset_has_workers(p)) { grpc_pollset_worker *w = p->root_worker.next; @@ -800,6 +809,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->fd_count = 0; pollset->fd_capacity = 0; pollset->fds = NULL; + pollset->pollset_set_count = 0; } static void pollset_destroy(grpc_pollset *pollset) { @@ -1061,7 +1071,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->shutting_down) { if (pollset_has_workers(pollset)) { pollset_kick(pollset, NULL); - } else if (!pollset->called_shutdown) { + } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); finish_shutdown(exec_ctx, pollset); @@ -1093,7 +1103,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset)) { grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs); } - if (!pollset->called_shutdown && !pollset_has_workers(pollset)) { + if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; finish_shutdown(exec_ctx, pollset); } @@ -1143,13 +1153,16 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i, j; + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count++; + gpr_mu_unlock(&pollset->mu); gpr_mu_lock(&pollset_set->mu); if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = GPR_MAX(8, 2 * pollset_set->pollset_capacity); - pollset_set->pollsets = - gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * - sizeof(*pollset_set->pollsets)); + pollset_set->pollsets = gpr_realloc( + pollset_set->pollsets, + pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)); } pollset_set->pollsets[pollset_set->pollset_count++] = pollset; for (i = 0, j = 0; i < pollset_set->fd_count; i++) { @@ -1178,6 +1191,17 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, } } gpr_mu_unlock(&pollset_set->mu); + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count--; + /* check shutdown */ + if (pollset->shutting_down && !pollset->called_shutdown && + !pollset_has_observers(pollset)) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(exec_ctx, pollset); + } else { + gpr_mu_unlock(&pollset->mu); + } } static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, -- cgit v1.2.3 From 9e5ac1bf115a182c0418f21d60d15245af6307bf Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 14 Feb 2017 22:25:50 -0800 Subject: Fix broken shutdown, and cascading exec_ctx usage bugs --- src/core/ext/client_channel/client_channel.c | 2 +- src/core/ext/client_channel/lb_policy.c | 2 +- src/core/ext/client_channel/subchannel.c | 2 +- src/core/ext/resolver/dns/native/dns_resolver.c | 2 +- src/core/lib/http/httpcli.c | 5 ++-- src/core/lib/http/httpcli.h | 3 +- src/core/lib/iomgr/ev_epoll_linux.c | 7 ++--- src/core/lib/iomgr/ev_poll_posix.c | 33 ++++++++++++++++------ src/core/lib/iomgr/ev_posix.c | 5 ++-- src/core/lib/iomgr/ev_posix.h | 3 +- src/core/lib/iomgr/pollset_set.h | 3 +- src/core/lib/iomgr/pollset_set_uv.c | 3 +- src/core/lib/iomgr/pollset_set_windows.c | 3 +- .../google_default/google_default_credentials.c | 2 +- .../lib/security/credentials/jwt/jwt_verifier.c | 4 +-- .../lib/security/credentials/jwt/jwt_verifier.h | 3 +- .../credentials/oauth2/oauth2_credentials.c | 2 +- test/core/end2end/fixtures/http_proxy.c | 2 +- test/core/http/httpcli_test.c | 2 +- test/core/http/httpscli_test.c | 2 +- test/core/iomgr/pollset_set_test.c | 11 ++++---- test/core/iomgr/resolve_address_posix_test.c | 2 +- test/core/iomgr/resolve_address_test.c | 2 +- test/core/iomgr/tcp_client_posix_test.c | 2 +- test/core/security/jwt_verifier_test.c | 16 +++++------ test/core/security/verify_jwt.c | 5 ++-- test/core/util/port_server_client.c | 4 +-- 27 files changed, 78 insertions(+), 54 deletions(-) (limited to 'src/core') diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index bedb13d974..62fb061bcf 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -582,7 +582,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); } grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); - grpc_pollset_set_destroy(chand->interested_parties); + grpc_pollset_set_destroy(exec_ctx, chand->interested_parties); GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel"); gpr_mu_destroy(&chand->info_mu); } diff --git a/src/core/ext/client_channel/lb_policy.c b/src/core/ext/client_channel/lb_policy.c index 45ee72e2f0..90401b586f 100644 --- a/src/core/ext/client_channel/lb_policy.c +++ b/src/core/ext/client_channel/lb_policy.c @@ -94,7 +94,7 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, gpr_atm old_val = ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF")); if (old_val == 1) { - grpc_pollset_set_destroy(policy->interested_parties); + grpc_pollset_set_destroy(exec_ctx, policy->interested_parties); policy->vtable->destroy(exec_ctx, policy); } } diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c index c37134fd5e..09c68a91dd 100644 --- a/src/core/ext/client_channel/subchannel.c +++ b/src/core/ext/client_channel/subchannel.c @@ -217,7 +217,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_slice_unref_internal(exec_ctx, c->initial_connect_string); grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker); grpc_connector_unref(exec_ctx, c->connector); - grpc_pollset_set_destroy(c->pollset_set); + grpc_pollset_set_destroy(exec_ctx, c->pollset_set); grpc_subchannel_key_destroy(exec_ctx, c->key); gpr_free(c); } diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c index 2c9623211b..c08b53ea04 100644 --- a/src/core/ext/resolver/dns/native/dns_resolver.c +++ b/src/core/ext/resolver/dns/native/dns_resolver.c @@ -244,7 +244,7 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { if (r->resolved_result != NULL) { grpc_channel_args_destroy(exec_ctx, r->resolved_result); } - grpc_pollset_set_destroy(r->interested_parties); + grpc_pollset_set_destroy(exec_ctx, r->interested_parties); gpr_free(r->name_to_resolve); gpr_free(r->default_port); grpc_channel_args_destroy(exec_ctx, r->channel_args); diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c index fb2108987b..6d7aa43b81 100644 --- a/src/core/lib/http/httpcli.c +++ b/src/core/lib/http/httpcli.c @@ -93,8 +93,9 @@ void grpc_httpcli_context_init(grpc_httpcli_context *context) { context->pollset_set = grpc_pollset_set_create(); } -void grpc_httpcli_context_destroy(grpc_httpcli_context *context) { - grpc_pollset_set_destroy(context->pollset_set); +void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, + grpc_httpcli_context *context) { + grpc_pollset_set_destroy(exec_ctx, context->pollset_set); } static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h index 11e03b44df..8ae03ee78f 100644 --- a/src/core/lib/http/httpcli.h +++ b/src/core/lib/http/httpcli.h @@ -83,7 +83,8 @@ typedef struct grpc_httpcli_request { typedef struct grpc_http_response grpc_httpcli_response; void grpc_httpcli_context_init(grpc_httpcli_context *context); -void grpc_httpcli_context_destroy(grpc_httpcli_context *context); +void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, + grpc_httpcli_context *context); /* Asynchronously perform a HTTP GET. 'context' specifies the http context under which to do the get diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index fc56843128..fac3705142 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -1842,13 +1842,12 @@ static grpc_pollset_set *pollset_set_create(void) { return pss; } -static void pollset_set_destroy(grpc_pollset_set *pss) { +static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pss) { gpr_mu_destroy(&pss->po.mu); if (pss->po.pi != NULL) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy"); - grpc_exec_ctx_finish(&exec_ctx); + PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy"); } gpr_free(pss); diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c index 6bae99a344..c03fadaebb 100644 --- a/src/core/lib/iomgr/ev_poll_posix.c +++ b/src/core/lib/iomgr/ev_poll_posix.c @@ -149,7 +149,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec, static bool fd_is_orphaned(grpc_fd *fd); /* Reference counting for fds */ -/*#define GRPC_FD_REF_COUNT_DEBUG*/ +//#define GRPC_FD_REF_COUNT_DEBUG #ifdef GRPC_FD_REF_COUNT_DEBUG static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); static void fd_unref(grpc_fd *fd, const char *reason, const char *file, @@ -283,8 +283,8 @@ cv_fd_table g_cvfds; static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, - gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); + (int)gpr_atm_no_barrier_load(&fd->refst), + (int)gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(fd, n, reason) unref_by(fd, n) @@ -298,8 +298,8 @@ static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { gpr_atm old; gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, - gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); + (int)gpr_atm_no_barrier_load(&fd->refst), + (int)gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); #else static void unref_by(grpc_fd *fd, int n) { gpr_atm old; @@ -1137,12 +1137,27 @@ static grpc_pollset_set *pollset_set_create(void) { return pollset_set; } -static void pollset_set_destroy(grpc_pollset_set *pollset_set) { +static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set) { size_t i; gpr_mu_destroy(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } + for (i = 0; i < pollset_set->pollset_count; i++) { + grpc_pollset *pollset = pollset_set->pollsets[i]; + gpr_mu_lock(&pollset->mu); + pollset->pollset_set_count--; + /* check shutdown */ + if (pollset->shutting_down && !pollset->called_shutdown && + !pollset_has_observers(pollset)) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(exec_ctx, pollset); + } else { + gpr_mu_unlock(&pollset->mu); + } + } gpr_free(pollset_set->pollsets); gpr_free(pollset_set->pollset_sets); gpr_free(pollset_set->fds); @@ -1160,9 +1175,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = GPR_MAX(8, 2 * pollset_set->pollset_capacity); - pollset_set->pollsets = gpr_realloc( - pollset_set->pollsets, - pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)); + pollset_set->pollsets = + gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * + sizeof(*pollset_set->pollsets)); } pollset_set->pollsets[pollset_set->pollset_count++] = pollset; for (i = 0, j = 0; i < pollset_set->fd_count; i++) { diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index 95b1d99d75..b5be5504b9 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -215,8 +215,9 @@ grpc_pollset_set *grpc_pollset_set_create(void) { return g_event_engine->pollset_set_create(); } -void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) { - g_event_engine->pollset_set_destroy(pollset_set); +void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set) { + g_event_engine->pollset_set_destroy(exec_ctx, pollset_set); } void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h index 1aea7d61f3..1a9e5c115a 100644 --- a/src/core/lib/iomgr/ev_posix.h +++ b/src/core/lib/iomgr/ev_posix.h @@ -74,7 +74,8 @@ typedef struct grpc_event_engine_vtable { struct grpc_fd *fd); grpc_pollset_set *(*pollset_set_create)(void); - void (*pollset_set_destroy)(grpc_pollset_set *pollset_set); + void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set); void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset); diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h index 34bb728c41..d11801d63b 100644 --- a/src/core/lib/iomgr/pollset_set.h +++ b/src/core/lib/iomgr/pollset_set.h @@ -44,7 +44,8 @@ typedef struct grpc_pollset_set grpc_pollset_set; grpc_pollset_set *grpc_pollset_set_create(void); -void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set); +void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, + grpc_pollset_set *pollset_set); void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset); diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.c index e5ef8b29e0..836cfee4ef 100644 --- a/src/core/lib/iomgr/pollset_set_uv.c +++ b/src/core/lib/iomgr/pollset_set_uv.c @@ -41,7 +41,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, + grpc_pollset_set* pollset_set) {} void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pollset_set, diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c index 645650db9b..ae18c8a3ce 100644 --- a/src/core/lib/iomgr/pollset_set_windows.c +++ b/src/core/lib/iomgr/pollset_set_windows.c @@ -42,7 +42,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) { return (grpc_pollset_set*)((intptr_t)0xdeafbeef); } -void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {} +void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, + grpc_pollset_set* pollset_set) {} void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pollset_set, diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c index a098741b70..ecd26de9fa 100644 --- a/src/core/lib/security/credentials/google_default/google_default_credentials.c +++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c @@ -154,7 +154,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { } gpr_mu_unlock(g_polling_mu); - grpc_httpcli_context_destroy(&context); + grpc_httpcli_context_destroy(exec_ctx, &context); grpc_closure_init(&destroy_closure, destroy_pollset, grpc_polling_entity_pollset(&detector.pollent), grpc_schedule_on_exec_ctx); diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c index 2270be8f44..f128177e8c 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.c +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c @@ -898,10 +898,10 @@ grpc_jwt_verifier *grpc_jwt_verifier_create( return v; } -void grpc_jwt_verifier_destroy(grpc_jwt_verifier *v) { +void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_verifier *v) { size_t i; if (v == NULL) return; - grpc_httpcli_context_destroy(&v->http_ctx); + grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx); if (v->mappings != NULL) { for (i = 0; i < v->num_mappings; i++) { gpr_free(v->mappings[i].email_domain); diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index 4fa320a415..5c3d2a7788 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -109,7 +109,8 @@ grpc_jwt_verifier *grpc_jwt_verifier_create( size_t num_mappings); /*The verifier must not be destroyed if there are still outstanding callbacks.*/ -void grpc_jwt_verifier_destroy(grpc_jwt_verifier *verifier); +void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, + grpc_jwt_verifier *verifier); /* User provided callback that will be called when the verification of the JWT is done (maybe in another thread). diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c index 1b0e43a1e4..c0f260f938 100644 --- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c @@ -124,7 +124,7 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx, (grpc_oauth2_token_fetcher_credentials *)creds; grpc_credentials_md_store_unref(exec_ctx, c->access_token_md); gpr_mu_destroy(&c->mu); - grpc_httpcli_context_destroy(&c->httpcli_context); + grpc_httpcli_context_destroy(exec_ctx, &c->httpcli_context); } grpc_credentials_status diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c index 6fdc86fc12..2682ea0e7b 100644 --- a/test/core/end2end/fixtures/http_proxy.c +++ b/test/core/end2end/fixtures/http_proxy.c @@ -110,7 +110,7 @@ static void proxy_connection_unref(grpc_exec_ctx* exec_ctx, grpc_endpoint_destroy(exec_ctx, conn->client_endpoint); if (conn->server_endpoint != NULL) grpc_endpoint_destroy(exec_ctx, conn->server_endpoint); - grpc_pollset_set_destroy(conn->pollset_set); + grpc_pollset_set_destroy(exec_ctx, conn->pollset_set); grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_read_buffer); grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_deferred_write_buffer); diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c index 6cc00f871d..be8301c5e3 100644 --- a/test/core/http/httpcli_test.c +++ b/test/core/http/httpcli_test.c @@ -209,7 +209,7 @@ int main(int argc, char **argv) { test_get(port); test_post(port); - grpc_httpcli_context_destroy(&g_context); + grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_closure_init(&destroyed, destroy_pops, &g_pops, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c index e1a26d91e9..5a6f07bec2 100644 --- a/test/core/http/httpscli_test.c +++ b/test/core/http/httpscli_test.c @@ -212,7 +212,7 @@ int main(int argc, char **argv) { test_get(port); test_post(port); - grpc_httpcli_context_destroy(&g_context); + grpc_httpcli_context_destroy(&exec_ctx, &g_context); grpc_closure_init(&destroyed, destroy_pops, &g_pops, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops), diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c index 40fa858602..e7777acce1 100644 --- a/test/core/iomgr/pollset_set_test.c +++ b/test/core/iomgr/pollset_set_test.c @@ -59,10 +59,11 @@ void init_test_pollset_sets(test_pollset_set *pollset_sets, const int num_pss) { } } -void cleanup_test_pollset_sets(test_pollset_set *pollset_sets, +void cleanup_test_pollset_sets(grpc_exec_ctx *exec_ctx, + test_pollset_set *pollset_sets, const int num_pss) { for (int i = 0; i < num_pss; i++) { - grpc_pollset_set_destroy(pollset_sets[i].pss); + grpc_pollset_set_destroy(exec_ctx, pollset_sets[i].pss); pollset_sets[i].pss = NULL; } } @@ -297,7 +298,7 @@ static void pollset_set_test_basic() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); - cleanup_test_pollset_sets(pollset_sets, num_pss); + cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss); grpc_exec_ctx_finish(&exec_ctx); } @@ -372,7 +373,7 @@ void pollset_set_test_dup_fds() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, &pollset, num_ps); - cleanup_test_pollset_sets(pollset_sets, num_pss); + cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss); grpc_exec_ctx_finish(&exec_ctx); } @@ -437,7 +438,7 @@ void pollset_set_test_empty_pollset() { cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); - cleanup_test_pollset_sets(&pollset_set, num_pss); + cleanup_test_pollset_sets(&exec_ctx, &pollset_set, num_pss); grpc_exec_ctx_finish(&exec_ctx); } diff --git a/test/core/iomgr/resolve_address_posix_test.c b/test/core/iomgr/resolve_address_posix_test.c index a4feff8b00..ef4cfdf06f 100644 --- a/test/core/iomgr/resolve_address_posix_test.c +++ b/test/core/iomgr/resolve_address_posix_test.c @@ -74,7 +74,7 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); - grpc_pollset_set_destroy(args->pollset_set); + grpc_pollset_set_destroy(exec_ctx, args->pollset_set); grpc_closure do_nothing_cb; grpc_closure_init(&do_nothing_cb, do_nothing, NULL, grpc_schedule_on_exec_ctx); diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 54de9a20e1..6a9bb5ae6f 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -69,7 +69,7 @@ void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); - grpc_pollset_set_destroy(args->pollset_set); + grpc_pollset_set_destroy(exec_ctx, args->pollset_set); grpc_closure do_nothing_cb; grpc_closure_init(&do_nothing_cb, do_nothing, NULL, grpc_schedule_on_exec_ctx); diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c index dcdff8efb1..c9b514a024 100644 --- a/test/core/iomgr/tcp_client_posix_test.c +++ b/test/core/iomgr/tcp_client_posix_test.c @@ -207,7 +207,7 @@ int main(int argc, char **argv) { test_succeeds(); gpr_log(GPR_ERROR, "End of first test"); test_fails(); - grpc_pollset_set_destroy(g_pollset_set); + grpc_pollset_set_destroy(&exec_ctx, g_pollset_set); grpc_closure_init(&destroyed, destroy_pollset, g_pollset, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c index a9bd976a39..0a73f67528 100644 --- a/test/core/security/jwt_verifier_test.c +++ b/test/core/security/jwt_verifier_test.c @@ -386,9 +386,9 @@ static void test_jwt_verifier_google_email_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -420,9 +420,9 @@ static void test_jwt_verifier_custom_email_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -469,9 +469,9 @@ static void test_jwt_verifier_url_issuer_success(void) { GPR_ASSERT(jwt != NULL); grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_success, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -511,9 +511,9 @@ static void test_jwt_verifier_url_issuer_bad_config(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_key_retrieval_error, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -534,9 +534,9 @@ static void test_jwt_verifier_bad_json_key(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_key_retrieval_error, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } @@ -588,9 +588,9 @@ static void test_jwt_verifier_bad_signature(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, jwt, expected_audience, on_verification_bad_signature, (void *)expected_user_data); - grpc_exec_ctx_finish(&exec_ctx); gpr_free(jwt); - grpc_jwt_verifier_destroy(verifier); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); + grpc_exec_ctx_finish(&exec_ctx); grpc_httpcli_set_override(NULL, NULL); } @@ -619,8 +619,8 @@ static void test_jwt_verifier_bad_format(void) { grpc_jwt_verifier_verify(&exec_ctx, verifier, NULL, "bad jwt", expected_audience, on_verification_bad_format, (void *)expected_user_data); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); - grpc_jwt_verifier_destroy(verifier); grpc_httpcli_set_override(NULL, NULL); } diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c index bbd4a67ac1..aaf0e7f6b1 100644 --- a/test/core/security/verify_jwt.c +++ b/test/core/security/verify_jwt.c @@ -123,14 +123,15 @@ int main(int argc, char **argv) { gpr_inf_future(GPR_CLOCK_MONOTONIC)))) sync.is_done = true; gpr_mu_unlock(sync.mu); - grpc_exec_ctx_finish(&exec_ctx); + grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(sync.mu); } gpr_mu_unlock(sync.mu); gpr_free(sync.pollset); - grpc_jwt_verifier_destroy(verifier); + grpc_jwt_verifier_destroy(&exec_ctx, verifier); + grpc_exec_ctx_finish(&exec_ctx); gpr_cmdline_destroy(cl); grpc_shutdown(); return !sync.success; diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c index 6d722ffc88..7b733ab9c7 100644 --- a/test/core/util/port_server_client.c +++ b/test/core/util/port_server_client.c @@ -121,7 +121,7 @@ void grpc_free_port_using_server(char *server, int port) { } gpr_mu_unlock(pr.mu); - grpc_httpcli_context_destroy(&context); + grpc_httpcli_context_destroy(&exec_ctx, &context); grpc_exec_ctx_finish(&exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); @@ -245,7 +245,7 @@ int grpc_pick_port_using_server(char *server) { gpr_mu_unlock(pr.mu); grpc_http_response_destroy(&pr.response); - grpc_httpcli_context_destroy(&context); + grpc_httpcli_context_destroy(&exec_ctx, &context); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); grpc_exec_ctx_finish(&exec_ctx); -- cgit v1.2.3 From 6955c5e8d2c881169dfa4ffd80a38182d6e2e14e Mon Sep 17 00:00:00 2001 From: yang-g Date: Mon, 13 Feb 2017 15:49:27 -0800 Subject: Remove code from network_status_tracker.c Pull the hash function to useful.h --- include/grpc/support/useful.h | 3 + src/core/lib/iomgr/network_status_tracker.c | 87 ++----------------------- src/core/lib/iomgr/timer_generic.c | 10 +-- src/core/lib/support/cpu_posix.c | 9 +-- test/core/end2end/tests/network_status_change.c | 5 +- 5 files changed, 16 insertions(+), 98 deletions(-) (limited to 'src/core') diff --git a/include/grpc/support/useful.h b/include/grpc/support/useful.h index 003e096cf9..9d8314e4be 100644 --- a/include/grpc/support/useful.h +++ b/include/grpc/support/useful.h @@ -74,4 +74,7 @@ #define GPR_ICMP(a, b) ((a) < (b) ? -1 : ((a) > (b) ? 1 : 0)) +#define GPR_HASH_POINTER(x, range) \ + ((((size_t)x) >> 4) ^ (((size_t)x) >> 9) ^ (((size_t)x) >> 14)) % (range) + #endif /* GRPC_SUPPORT_USEFUL_H */ diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c index 1601a39002..4104bf927a 100644 --- a/src/core/lib/iomgr/network_status_tracker.c +++ b/src/core/lib/iomgr/network_status_tracker.c @@ -31,95 +31,18 @@ * */ -#include -#include #include "src/core/lib/iomgr/endpoint.h" -typedef struct endpoint_ll_node { - grpc_endpoint *ep; - struct endpoint_ll_node *next; -} endpoint_ll_node; - -static endpoint_ll_node *head = NULL; -static gpr_mu g_endpoint_mutex; - -void grpc_network_status_shutdown(void) { - if (head != NULL) { - gpr_log(GPR_ERROR, - "Memory leaked as not all network endpoints were shut down"); - } - gpr_mu_destroy(&g_endpoint_mutex); -} +void grpc_network_status_shutdown(void) {} void grpc_network_status_init(void) { - gpr_mu_init(&g_endpoint_mutex); // TODO(makarandd): Install callback with OS to monitor network status. } -void grpc_destroy_network_status_monitor() { - for (endpoint_ll_node *curr = head; curr != NULL;) { - endpoint_ll_node *next = curr->next; - gpr_free(curr); - curr = next; - } - gpr_mu_destroy(&g_endpoint_mutex); -} - -void grpc_network_status_register_endpoint(grpc_endpoint *ep) { - gpr_mu_lock(&g_endpoint_mutex); - if (head == NULL) { - head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); - head->ep = ep; - head->next = NULL; - } else { - endpoint_ll_node *prev_head = head; - head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node)); - head->ep = ep; - head->next = prev_head; - } - gpr_mu_unlock(&g_endpoint_mutex); -} +void grpc_destroy_network_status_monitor() {} -void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { - gpr_mu_lock(&g_endpoint_mutex); - GPR_ASSERT(head); - bool found = false; - endpoint_ll_node *prev = head; - // if we're unregistering the head, just move head to the next - if (ep == head->ep) { - head = head->next; - gpr_free(prev); - found = true; - } else { - for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) { - if (ep == curr->ep) { - prev->next = curr->next; - gpr_free(curr); - found = true; - break; - } - prev = curr; - } - } - gpr_mu_unlock(&g_endpoint_mutex); - GPR_ASSERT(found); -} +void grpc_network_status_register_endpoint(grpc_endpoint *ep) { (void)ep; } -// Walk the linked-list from head and execute shutdown. It is possible that -// other threads might be in the process of shutdown as well, but that has -// no side effect since endpoint shutdown is idempotent. -void grpc_network_status_shutdown_all_endpoints() { - gpr_mu_lock(&g_endpoint_mutex); - if (head == NULL) { - gpr_mu_unlock(&g_endpoint_mutex); - return; - } - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { (void)ep; } - for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) { - curr->ep->vtable->shutdown(&exec_ctx, curr->ep, - GRPC_ERROR_CREATE("Network unavailable")); - } - gpr_mu_unlock(&g_endpoint_mutex); - grpc_exec_ctx_finish(&exec_ctx); -} +void grpc_network_status_shutdown_all_endpoints() {} diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 40c8351472..8a5617e7c1 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -121,12 +121,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { g_initialized = false; } -/* This is a cheap, but good enough, pointer hash for sharding the tasks: */ -static size_t shard_idx(const grpc_timer *info) { - size_t x = (size_t)info; - return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1); -} - static double ts_to_dbl(gpr_timespec ts) { return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; } @@ -181,7 +175,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec deadline, grpc_closure *closure, gpr_timespec now) { int is_first_timer = 0; - shard_type *shard = &g_shards[shard_idx(timer)]; + shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; GPR_ASSERT(deadline.clock_type == g_clock_type); GPR_ASSERT(now.clock_type == g_clock_type); timer->closure = closure; @@ -247,7 +241,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { return; } - shard_type *shard = &g_shards[shard_idx(timer)]; + shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; gpr_mu_lock(&shard->mu); if (!timer->triggered) { grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); diff --git a/src/core/lib/support/cpu_posix.c b/src/core/lib/support/cpu_posix.c index 667bde7cad..245f12f06d 100644 --- a/src/core/lib/support/cpu_posix.c +++ b/src/core/lib/support/cpu_posix.c @@ -41,6 +41,7 @@ #include #include +#include static __thread char magic_thread_local; @@ -60,18 +61,12 @@ unsigned gpr_cpu_num_cores(void) { return (unsigned)ncpus; } -/* This is a cheap, but good enough, pointer hash for sharding things: */ -static size_t shard_ptr(const void *info) { - size_t x = (size_t)info; - return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) % gpr_cpu_num_cores(); -} - unsigned gpr_cpu_current_cpu(void) { /* NOTE: there's no way I know to return the actual cpu index portably... most code that's using this is using it to shard across work queues though, so here we use thread identity instead to achieve a similar though not identical effect */ - return (unsigned)shard_ptr(&magic_thread_local); + return (unsigned)GPR_HASH_POINTER(&magic_thread_local, gpr_cpu_num_cores()); } #endif /* GPR_CPU_POSIX */ diff --git a/test/core/end2end/tests/network_status_change.c b/test/core/end2end/tests/network_status_change.c index 9cef02b2b3..7540ce93a1 100644 --- a/test/core/end2end/tests/network_status_change.c +++ b/test/core/end2end/tests/network_status_change.c @@ -212,8 +212,11 @@ static void test_invoke_network_status_change(grpc_end2end_test_config config) { CQ_EXPECT_COMPLETION(cqv, tag(1), 1); cq_verify(cqv); + // TODO(makdharma) Update this when the shutdown_all_endpoints is implemented. // Expected behavior of a RPC when network is lost. - GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); + // GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); + GPR_ASSERT(status == GRPC_STATUS_OK); + GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo")); validate_host_override_string("foo.test.google.fr:1234", call_details.host, config); -- cgit v1.2.3 From 37cbc3f5b30451254c99a85be36364bae74f2c70 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 16 Feb 2017 14:54:55 -0800 Subject: Use special errors where appropriate in call cancellation: avoids many allocations in a semi-common case --- src/core/lib/surface/call.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'src/core') diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 3352e427cd..48a1e586e1 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -481,7 +481,10 @@ void grpc_call_destroy(grpc_call *c) { c->destroy_called = 1; cancel = !c->received_final_op; gpr_mu_unlock(&c->mu); - if (cancel) grpc_call_cancel(c, NULL); + if (cancel) { + cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, + GRPC_ERROR_CANCELLED); + } GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy"); grpc_exec_ctx_finish(&exec_ctx); GPR_TIMER_END("grpc_call_destroy", 0); @@ -490,8 +493,11 @@ void grpc_call_destroy(grpc_call *c) { grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) { GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved)); GPR_ASSERT(!reserved); - return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled", - NULL); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + cancel_with_error(&exec_ctx, call, STATUS_FROM_API_OVERRIDE, + GRPC_ERROR_CANCELLED); + grpc_exec_ctx_finish(&exec_ctx); + return GRPC_CALL_OK; } static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call, -- cgit v1.2.3