diff options
author | Mark D. Roth <roth@google.com> | 2017-08-29 16:59:07 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-08-29 16:59:07 -0700 |
commit | bf19961d0a49b43cb528392efeb4880eeebb9b5e (patch) | |
tree | 1d4c96db4d3bdc05c634e5d386c14a77845a1758 /src/core/ext | |
parent | 9811915ba3fa1ccdf44b6a70fe1b1dd4782cd508 (diff) |
Revert "Implement call combiner"
Diffstat (limited to 'src/core/ext')
19 files changed, 553 insertions, 560 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 3e10f61154..13fe2e6b1c 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -179,6 +179,7 @@ const grpc_channel_filter grpc_client_census_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "census-client"}; @@ -192,5 +193,6 @@ const grpc_channel_filter grpc_server_census_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "census-server"}; diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index e6822ce801..58e31d7b45 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -796,8 +796,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, // send_message // recv_trailing_metadata // send_trailing_metadata -// We also add room for a single cancel_stream batch. -#define MAX_WAITING_BATCHES 7 +#define MAX_WAITING_BATCHES 6 /** Call data. Holds a pointer to grpc_subchannel_call and the associated machinery to create such a pointer. @@ -809,25 +808,23 @@ typedef struct client_channel_call_data { // The code in deadline_filter.c requires this to be the first field. // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state // and this struct both independently store a pointer to the call - // combiner. If/when we have time, find a way to avoid this without - // breaking the grpc_deadline_state abstraction. + // stack and each has its own mutex. If/when we have time, find a way + // to avoid this without breaking the grpc_deadline_state abstraction. grpc_deadline_state deadline_state; grpc_slice path; // Request path. gpr_timespec call_start_time; gpr_timespec deadline; - gpr_arena *arena; - grpc_call_combiner *call_combiner; - grpc_server_retry_throttle_data *retry_throttle_data; method_parameters *method_params; - grpc_subchannel_call *subchannel_call; - grpc_error *error; + /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest + bit is 0), or a pointer to an error (if the lowest bit is 1) */ + gpr_atm subchannel_call_or_error; + gpr_arena *arena; grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending. grpc_closure lb_pick_closure; - grpc_closure cancel_closure; grpc_connected_subchannel *connected_subchannel; grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; @@ -835,9 +832,10 @@ typedef struct client_channel_call_data { grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; size_t waiting_for_pick_batches_count; - grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES]; - grpc_transport_stream_op_batch *initial_metadata_batch; + grpc_transport_stream_op_batch_payload *initial_metadata_payload; + + grpc_call_stack *owning_call; grpc_linked_mdelem lb_token_mdelem; @@ -845,42 +843,55 @@ typedef struct client_channel_call_data { grpc_closure *original_on_complete; } call_data; -grpc_subchannel_call *grpc_client_channel_get_subchannel_call( - grpc_call_element *elem) { - call_data *calld = elem->call_data; - return calld->subchannel_call; +typedef struct { + grpc_subchannel_call *subchannel_call; + grpc_error *error; +} call_or_error; + +static call_or_error get_call_or_error(call_data *p) { + gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error); + if (c == 0) + return (call_or_error){NULL, NULL}; + else if (c & 1) + return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)}; + else + return (call_or_error){(grpc_subchannel_call *)c, NULL}; } -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_add( - call_data *calld, grpc_transport_stream_op_batch *batch) { - if (batch->send_initial_metadata) { - GPR_ASSERT(calld->initial_metadata_batch == NULL); - calld->initial_metadata_batch = batch; +static bool set_call_or_error(call_data *p, call_or_error coe) { + // this should always be under a lock + call_or_error existing = get_call_or_error(p); + if (existing.error != GRPC_ERROR_NONE) { + GRPC_ERROR_UNREF(coe.error); + return false; + } + GPR_ASSERT(existing.subchannel_call == NULL); + if (coe.error != GRPC_ERROR_NONE) { + GPR_ASSERT(coe.subchannel_call == NULL); + gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error); } else { - GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = - batch; + GPR_ASSERT(coe.subchannel_call != NULL); + gpr_atm_rel_store(&p->subchannel_call_or_error, + (gpr_atm)coe.subchannel_call); } + return true; } -// This is called via the call combiner, so access to calld is synchronized. -static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - call_data *calld = arg; - if (calld->waiting_for_pick_batches_count > 0) { - --calld->waiting_for_pick_batches_count; - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count], - GRPC_ERROR_REF(error), calld->call_combiner); - } +grpc_subchannel_call *grpc_client_channel_get_subchannel_call( + grpc_call_element *call_elem) { + return get_call_or_error(call_elem->call_data).subchannel_call; } -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { +static void waiting_for_pick_batches_add_locked( + call_data *calld, grpc_transport_stream_op_batch *batch) { + GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); + calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = + batch; +} + +static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_error *error) { call_data *calld = elem->call_data; if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, @@ -889,60 +900,34 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, grpc_error_string(error)); } for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { - GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], - fail_pending_batch_in_call_combiner, calld, - grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - &calld->handle_pending_batch_in_call_combiner[i], - GRPC_ERROR_REF(error), - "waiting_for_pick_batches_fail"); - } - if (calld->initial_metadata_batch != NULL) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error), - calld->call_combiner); - } else { - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "waiting_for_pick_batches_fail"); + exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error)); } + calld->waiting_for_pick_batches_count = 0; GRPC_ERROR_UNREF(error); } -// This is called via the call combiner, so access to calld is synchronized. -static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *ignored) { - call_data *calld = arg; - if (calld->waiting_for_pick_batches_count > 0) { - --calld->waiting_for_pick_batches_count; - grpc_subchannel_call_process_op( - exec_ctx, calld->subchannel_call, - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]); - } -} - -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - channel_data *chand = elem->channel_data; +static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { call_data *calld = elem->call_data; + if (calld->waiting_for_pick_batches_count == 0) return; + call_or_error coe = get_call_or_error(calld); + if (coe.error != GRPC_ERROR_NONE) { + waiting_for_pick_batches_fail_locked(exec_ctx, elem, + GRPC_ERROR_REF(coe.error)); + return; + } if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR " pending batches to subchannel_call=%p", - chand, calld, calld->waiting_for_pick_batches_count, - calld->subchannel_call); + elem->channel_data, calld, calld->waiting_for_pick_batches_count, + coe.subchannel_call); } for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { - GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], - run_pending_batch_in_call_combiner, calld, - grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - &calld->handle_pending_batch_in_call_combiner[i], - GRPC_ERROR_NONE, - "waiting_for_pick_batches_resume"); - } - GPR_ASSERT(calld->initial_metadata_batch != NULL); - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, - calld->initial_metadata_batch); + grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, + calld->waiting_for_pick_batches[i]); + } + calld->waiting_for_pick_batches_count = 0; } // Applies service config to the call. Must be invoked once we know @@ -983,28 +968,29 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx, static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_error *error) { - channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; + grpc_subchannel_call *subchannel_call = NULL; const grpc_connected_subchannel_call_args call_args = { .pollent = calld->pollent, .path = calld->path, .start_time = calld->call_start_time, .deadline = calld->deadline, .arena = calld->arena, - .context = calld->subchannel_call_context, - .call_combiner = calld->call_combiner}; + .context = calld->subchannel_call_context}; grpc_error *new_error = grpc_connected_subchannel_create_call( - exec_ctx, calld->connected_subchannel, &call_args, - &calld->subchannel_call); + exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", - chand, calld, calld->subchannel_call, grpc_error_string(new_error)); + elem->channel_data, calld, subchannel_call, + grpc_error_string(new_error)); } + GPR_ASSERT(set_call_or_error( + calld, (call_or_error){.subchannel_call = subchannel_call})); if (new_error != GRPC_ERROR_NONE) { new_error = grpc_error_add_child(new_error, error); - waiting_for_pick_batches_fail(exec_ctx, elem, new_error); + waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error); } else { - waiting_for_pick_batches_resume(exec_ctx, elem); + waiting_for_pick_batches_resume_locked(exec_ctx, elem); } GRPC_ERROR_UNREF(error); } @@ -1016,27 +1002,60 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, channel_data *chand = elem->channel_data; grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, chand->interested_parties); + call_or_error coe = get_call_or_error(calld); if (calld->connected_subchannel == NULL) { // Failed to create subchannel. - GRPC_ERROR_UNREF(calld->error); - calld->error = error == GRPC_ERROR_NONE - ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy") - : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Failed to create subchannel", &error, 1); + grpc_error *failure = + error == GRPC_ERROR_NONE + ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy") + : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Failed to create subchannel", &error, 1); if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: failed to create subchannel: error=%s", chand, - calld, grpc_error_string(calld->error)); + calld, grpc_error_string(failure)); + } + set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)}); + waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure); + } else if (coe.error != GRPC_ERROR_NONE) { + /* already cancelled before subchannel became ready */ + grpc_error *child_errors[] = {error, coe.error}; + grpc_error *cancellation_error = + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Cancelled before creating subchannel", child_errors, + GPR_ARRAY_SIZE(child_errors)); + /* if due to deadline, attach the deadline exceeded status to the error */ + if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { + cancellation_error = + grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_DEADLINE_EXCEEDED); } - waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error)); + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: cancelled before subchannel became ready: %s", + chand, calld, grpc_error_string(cancellation_error)); + } + waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error); } else { /* Create call on subchannel. */ create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); } + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); GRPC_ERROR_UNREF(error); } +static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { + call_data *calld = elem->call_data; + grpc_subchannel_call *subchannel_call = + get_call_or_error(calld).subchannel_call; + if (subchannel_call == NULL) { + return NULL; + } else { + return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call); + } +} + /** Return true if subchannel is available immediately (in which case subchannel_ready_locked() should not be called), or false otherwise (in which case subchannel_ready_locked() should be called when the subchannel @@ -1050,44 +1069,6 @@ typedef struct { grpc_closure closure; } pick_after_resolver_result_args; -// Note: This runs under the client_channel combiner, but will NOT be -// holding the call combiner. -static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - grpc_call_element *elem = arg; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - // If we don't yet have a resolver result, then a closure for - // pick_after_resolver_result_done_locked() will have been added to - // chand->waiting_for_resolver_result_closures, and it may not be invoked - // until after this call has been destroyed. We mark the operation as - // cancelled, so that when pick_after_resolver_result_done_locked() - // is called, it will be a no-op. We also immediately invoke - // subchannel_ready_locked() to propagate the error back to the caller. - for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; - closure != NULL; closure = closure->next_data.next) { - pick_after_resolver_result_args *args = closure->cb_arg; - if (!args->cancelled && args->elem == elem) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: " - "cancelling pick waiting for resolver result", - chand, calld); - } - args->cancelled = true; - // Note: Although we are not in the call combiner here, we are - // basically stealing the call combiner from the pending pick, so - // it's safe to call subchannel_ready_locked() here -- we are - // essentially calling it here instead of calling it in - // pick_after_resolver_result_done_locked(). - subchannel_ready_locked(exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); - } - } -} - static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -1098,24 +1079,21 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "call cancelled before resolver result"); } } else { - grpc_call_element *elem = args->elem; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, - NULL); + channel_data *chand = args->elem->channel_data; + call_data *calld = args->elem->call_data; if (error != GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", chand, calld); } - subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); + subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error)); } else { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", chand, calld); } - if (pick_subchannel_locked(exec_ctx, elem)) { - subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); + if (pick_subchannel_locked(exec_ctx, args->elem)) { + subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE); } } } @@ -1138,33 +1116,41 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, args, grpc_combiner_scheduler(chand->combiner)); grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, &args->closure, GRPC_ERROR_NONE); - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->cancel_closure, - pick_after_resolver_result_cancel_locked, elem, - grpc_combiner_scheduler(chand->combiner))); } -// Note: This runs under the client_channel combiner, but will NOT be -// holding the call combiner. -static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = arg; +static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_error *error) { channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - if (calld->lb_policy != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", - chand, calld, calld->lb_policy); + // If we don't yet have a resolver result, then a closure for + // pick_after_resolver_result_done_locked() will have been added to + // chand->waiting_for_resolver_result_closures, and it may not be invoked + // until after this call has been destroyed. We mark the operation as + // cancelled, so that when pick_after_resolver_result_done_locked() + // is called, it will be a no-op. We also immediately invoke + // subchannel_ready_locked() to propagate the error back to the caller. + for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; + closure != NULL; closure = closure->next_data.next) { + pick_after_resolver_result_args *args = closure->cb_arg; + if (!args->cancelled && args->elem == elem) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: " + "cancelling pick waiting for resolver result", + chand, calld); + } + args->cancelled = true; + subchannel_ready_locked(exec_ctx, elem, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick cancelled", &error, 1)); } - grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, - &calld->connected_subchannel, - GRPC_ERROR_REF(error)); } + GRPC_ERROR_UNREF(error); } // Callback invoked by grpc_lb_policy_pick_locked() for async picks. -// Unrefs the LB policy and invokes subchannel_ready_locked(). +// Unrefs the LB policy after invoking subchannel_ready_locked(). static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = arg; @@ -1174,7 +1160,6 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", chand, calld); } - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); GPR_ASSERT(calld->lb_policy != NULL); GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; @@ -1209,15 +1194,24 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx, } GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; - } else { - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked, - elem, grpc_combiner_scheduler(chand->combiner))); } return pick_done; } +static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_error *error) { + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + GPR_ASSERT(calld->lb_policy != NULL); + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", + chand, calld, calld->lb_policy); + } + grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, + &calld->connected_subchannel, error); +} + static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GPR_TIMER_BEGIN("pick_subchannel", 0); @@ -1230,7 +1224,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, // Otherwise, if the service config specified a value for this // method, use that. uint32_t initial_metadata_flags = - calld->initial_metadata_batch->payload->send_initial_metadata + calld->initial_metadata_payload->send_initial_metadata .send_initial_metadata_flags; const bool wait_for_ready_set_from_api = initial_metadata_flags & @@ -1247,7 +1241,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, } } const grpc_lb_policy_pick_args inputs = { - calld->initial_metadata_batch->payload->send_initial_metadata + calld->initial_metadata_payload->send_initial_metadata .send_initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem}; pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs); @@ -1264,33 +1258,91 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, return pick_done; } -static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { - GPR_TIMER_BEGIN("start_pick_locked", 0); - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - GPR_ASSERT(calld->connected_subchannel == NULL); - if (pick_subchannel_locked(exec_ctx, elem)) { - // Pick was returned synchronously. - if (calld->connected_subchannel == NULL) { - GRPC_ERROR_UNREF(calld->error); - calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy"); - waiting_for_pick_batches_fail(exec_ctx, elem, - GRPC_ERROR_REF(calld->error)); +static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error_ignored) { + GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0); + grpc_transport_stream_op_batch *batch = arg; + grpc_call_element *elem = batch->handler_private.extra_arg; + call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; + /* need to recheck that another thread hasn't set the call */ + call_or_error coe = get_call_or_error(calld); + if (coe.error != GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", + chand, calld, grpc_error_string(coe.error)); + } + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, batch, GRPC_ERROR_REF(coe.error)); + goto done; + } + if (coe.subchannel_call != NULL) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, + calld, coe.subchannel_call); + } + grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); + goto done; + } + // Add to waiting-for-pick list. If we succeed in getting a + // subchannel call below, we'll handle this batch (along with any + // other waiting batches) in waiting_for_pick_batches_resume_locked(). + waiting_for_pick_batches_add_locked(calld, batch); + // If this is a cancellation, cancel the pending pick (if any) and + // fail any pending batches. + if (batch->cancel_stream) { + grpc_error *error = batch->payload->cancel_stream.cancel_error; + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, + calld, grpc_error_string(error)); + } + /* Stash a copy of cancel_error in our call data, so that we can use + it for subsequent operations. This ensures that if the call is + cancelled before any batches are passed down (e.g., if the deadline + is in the past when the call starts), we can return the right + error to the caller when the first batch does get passed down. */ + set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)}); + if (calld->lb_policy != NULL) { + pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); } else { - // Create subchannel call. - create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); + pick_after_resolver_result_cancel_locked(exec_ctx, elem, + GRPC_ERROR_REF(error)); } - } else { - // Pick will be done asynchronously. Add the call's polling entity to - // the channel's interested_parties, so that I/O for the resolver - // and LB policy can be done under it. - grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, - chand->interested_parties); + waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); + goto done; } - GPR_TIMER_END("start_pick_locked", 0); + /* if we don't have a subchannel, try to get one */ + if (batch->send_initial_metadata) { + GPR_ASSERT(calld->connected_subchannel == NULL); + calld->initial_metadata_payload = batch->payload; + GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); + /* If a subchannel is not available immediately, the polling entity from + call_data should be provided to channel_data's interested_parties, so + that IO of the lb_policy and resolver could be done under it. */ + if (pick_subchannel_locked(exec_ctx, elem)) { + // Pick was returned synchronously. + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); + if (calld->connected_subchannel == NULL) { + grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy"); + set_call_or_error(calld, + (call_or_error){.error = GRPC_ERROR_REF(error)}); + waiting_for_pick_batches_fail_locked(exec_ctx, elem, error); + } else { + // Create subchannel call. + create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); + } + } else { + grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); + } + } +done: + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, + "start_transport_stream_op_batch"); + GPR_TIMER_END("start_transport_stream_op_batch_locked", 0); } static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -1313,49 +1365,27 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { GRPC_ERROR_REF(error)); } +/* The logic here is fairly complicated, due to (a) the fact that we + need to handle the case where we receive the send op before the + initial metadata op, and (b) the need for efficiency, especially in + the streaming case. + + We use double-checked locking to initially see if initialization has been + performed. If it has not, we acquire the combiner and perform initialization. + If it has, we proceed on the fast path. */ static void cc_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; + if (GRPC_TRACER_ON(grpc_client_channel_trace) || + GRPC_TRACER_ON(grpc_trace_channel)) { + grpc_call_log_op(GPR_INFO, elem, batch); + } if (chand->deadline_checking_enabled) { grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, batch); } - GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); - // If we've previously been cancelled, immediately fail any new batches. - if (calld->error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", - chand, calld, grpc_error_string(calld->error)); - } - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner); - goto done; - } - if (batch->cancel_stream) { - // Stash a copy of cancel_error in our call data, so that we can use - // it for subsequent operations. This ensures that if the call is - // cancelled before any batches are passed down (e.g., if the deadline - // is in the past when the call starts), we can return the right - // error to the caller when the first batch does get passed down. - GRPC_ERROR_UNREF(calld->error); - calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, - calld, grpc_error_string(calld->error)); - } - // If we have a subchannel call, send the cancellation batch down. - // Otherwise, fail all pending batches. - if (calld->subchannel_call != NULL) { - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); - } else { - waiting_for_pick_batches_add(calld, batch); - waiting_for_pick_batches_fail(exec_ctx, elem, - GRPC_ERROR_REF(calld->error)); - } - goto done; - } // Intercept on_complete for recv_trailing_metadata so that we can // check retry throttle status. if (batch->recv_trailing_metadata) { @@ -1365,43 +1395,38 @@ static void cc_start_transport_stream_op_batch( grpc_schedule_on_exec_ctx); batch->on_complete = &calld->on_complete; } - // Check if we've already gotten a subchannel call. - // Note that once we have completed the pick, we do not need to enter - // the channel combiner, which is more efficient (especially for - // streaming calls). - if (calld->subchannel_call != NULL) { + /* try to (atomically) get the call */ + call_or_error coe = get_call_or_error(calld); + GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); + if (coe.error != GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, - calld, calld->subchannel_call); + gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", + chand, calld, grpc_error_string(coe.error)); } - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, batch, GRPC_ERROR_REF(coe.error)); goto done; } - // We do not yet have a subchannel call. - // Add the batch to the waiting-for-pick list. - waiting_for_pick_batches_add(calld, batch); - // For batches containing a send_initial_metadata op, enter the channel - // combiner to start a pick. - if (batch->send_initial_metadata) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); - } - GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, - elem, grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); - } else { - // For all other batches, release the call combiner. + if (coe.subchannel_call != NULL) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, - "chand=%p calld=%p: saved batch, yeilding call combiner", chand, - calld); + "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, + calld, coe.subchannel_call); } - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "batch does not include send_initial_metadata"); + grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); + goto done; + } + /* we failed; lock and figure out what to do */ + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); } + GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch"); + batch->handler_private.extra_arg = elem; + GRPC_CLOSURE_SCHED( + exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure, + start_transport_stream_op_batch_locked, batch, + grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); done: GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); } @@ -1416,11 +1441,10 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, calld->path = grpc_slice_ref_internal(args->path); calld->call_start_time = args->start_time; calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); + calld->owning_call = args->call_stack; calld->arena = args->arena; - calld->call_combiner = args->call_combiner; if (chand->deadline_checking_enabled) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, - args->call_combiner, calld->deadline); + grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline); } return GRPC_ERROR_NONE; } @@ -1439,12 +1463,13 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, if (calld->method_params != NULL) { method_parameters_unref(calld->method_params); } - GRPC_ERROR_UNREF(calld->error); - if (calld->subchannel_call != NULL) { - grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, + call_or_error coe = get_call_or_error(calld); + GRPC_ERROR_UNREF(coe.error); + if (coe.subchannel_call != NULL) { + grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call, then_schedule_closure); then_schedule_closure = NULL; - GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call, + GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call, "client_channel_destroy_call"); } GPR_ASSERT(calld->lb_policy == NULL); @@ -1483,6 +1508,7 @@ const grpc_channel_filter grpc_client_channel_filter = { sizeof(channel_data), cc_init_channel_elem, cc_destroy_channel_elem, + cc_get_peer, cc_get_channel_info, "client-channel", }; diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c index 299f26b4de..568bb2ba8d 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c @@ -132,5 +132,6 @@ const grpc_channel_filter grpc_client_load_reporting_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "client_load_reporting"}; diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c index 5cc8be7628..5788819331 100644 --- a/src/core/ext/filters/client_channel/subchannel.c +++ b/src/core/ext/filters/client_channel/subchannel.c @@ -724,6 +724,13 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); } +char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx, + grpc_subchannel_call *call) { + grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); + grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0); + return top_elem->filter->get_peer(exec_ctx, top_elem); +} + void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *call, grpc_transport_stream_op_batch *op) { @@ -753,15 +760,13 @@ grpc_error *grpc_connected_subchannel_create_call( args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size); grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); (*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call"); - const grpc_call_element_args call_args = { - .call_stack = callstk, - .server_transport_data = NULL, - .context = args->context, - .path = args->path, - .start_time = args->start_time, - .deadline = args->deadline, - .arena = args->arena, - .call_combiner = args->call_combiner}; + const grpc_call_element_args call_args = {.call_stack = callstk, + .server_transport_data = NULL, + .context = args->context, + .path = args->path, + .start_time = args->start_time, + .deadline = args->deadline, + .arena = args->arena}; grpc_error *error = grpc_call_stack_init( exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args); if (error != GRPC_ERROR_NONE) { diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h index 51d712f6a7..6d2abb04df 100644 --- a/src/core/ext/filters/client_channel/subchannel.h +++ b/src/core/ext/filters/client_channel/subchannel.h @@ -106,7 +106,6 @@ typedef struct { gpr_timespec deadline; gpr_arena *arena; grpc_call_context_element *context; - grpc_call_combiner *call_combiner; } grpc_connected_subchannel_call_args; grpc_error *grpc_connected_subchannel_create_call( @@ -151,6 +150,10 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *subchannel_call, grpc_transport_stream_op_batch *op); +/** continue querying for peer */ +char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx, + grpc_subchannel_call *subchannel_call); + /** Must be called once per call. Sets the 'then_schedule_closure' argument for call stack destruction. */ void grpc_subchannel_call_set_cleanup_closure( diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c index 565b0679dc..6789903c95 100644 --- a/src/core/ext/filters/deadline/deadline_filter.c +++ b/src/core/ext/filters/deadline/deadline_filter.c @@ -34,56 +34,22 @@ // grpc_deadline_state // -// The on_complete callback used when sending a cancel_error batch down the -// filter stack. Yields the call combiner when the batch returns. -static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* ignored) { - grpc_deadline_state* deadline_state = arg; - GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, - "got on_complete from cancel_stream batch"); - GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); -} - -// This is called via the call combiner, so access to deadline_state is -// synchronized. -static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - grpc_call_element* elem = arg; - grpc_deadline_state* deadline_state = elem->call_data; - grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op( - GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner, - deadline_state, grpc_schedule_on_exec_ctx)); - batch->cancel_stream = true; - batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error); - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); -} - // Timer callback. static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { grpc_call_element* elem = (grpc_call_element*)arg; grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; if (error != GRPC_ERROR_CANCELLED) { - error = grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED); - grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner, - GRPC_ERROR_REF(error)); - GRPC_CLOSURE_INIT(&deadline_state->timer_callback, - send_cancel_op_in_call_combiner, elem, - grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, - &deadline_state->timer_callback, error, - "deadline exceeded -- sending cancel_stream op"); - } else { - GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, - "deadline_timer"); + grpc_call_element_signal_error( + exec_ctx, elem, + grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED)); } + GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); } // Starts the deadline timer. -// This is called via the call combiner, so access to deadline_state is -// synchronized. static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec deadline) { @@ -92,39 +58,51 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, return; } grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; + grpc_deadline_timer_state cur_state; grpc_closure* closure = NULL; - switch (deadline_state->timer_state) { +retry: + cur_state = + (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state); + switch (cur_state) { case GRPC_DEADLINE_STATE_PENDING: // Note: We do not start the timer if there is already a timer return; case GRPC_DEADLINE_STATE_FINISHED: - deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; - // If we've already created and destroyed a timer, we always create a - // new closure: we have no other guarantee that the inlined closure is - // not in use (it may hold a pending call to timer_callback) - closure = - GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx); + if (gpr_atm_rel_cas(&deadline_state->timer_state, + GRPC_DEADLINE_STATE_FINISHED, + GRPC_DEADLINE_STATE_PENDING)) { + // If we've already created and destroyed a timer, we always create a + // new closure: we have no other guarantee that the inlined closure is + // not in use (it may hold a pending call to timer_callback) + closure = GRPC_CLOSURE_CREATE(timer_callback, elem, + grpc_schedule_on_exec_ctx); + } else { + goto retry; + } break; case GRPC_DEADLINE_STATE_INITIAL: - deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; - closure = - GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, - elem, grpc_schedule_on_exec_ctx); + if (gpr_atm_rel_cas(&deadline_state->timer_state, + GRPC_DEADLINE_STATE_INITIAL, + GRPC_DEADLINE_STATE_PENDING)) { + closure = + GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, + elem, grpc_schedule_on_exec_ctx); + } else { + goto retry; + } break; } - GPR_ASSERT(closure != NULL); + GPR_ASSERT(closure); GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, gpr_now(GPR_CLOCK_MONOTONIC)); } // Cancels the deadline timer. -// This is called via the call combiner, so access to deadline_state is -// synchronized. static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_deadline_state* deadline_state) { - if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) { - deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED; + if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING, + GRPC_DEADLINE_STATE_FINISHED)) { grpc_timer_cancel(exec_ctx, &deadline_state->timer); } else { // timer was either in STATE_INITAL (nothing to cancel) @@ -153,7 +131,6 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state, // Callback and associated state for starting the timer after call stack // initialization has been completed. struct start_timer_after_init_state { - bool in_call_combiner; grpc_call_element* elem; gpr_timespec deadline; grpc_closure closure; @@ -161,29 +138,15 @@ struct start_timer_after_init_state { static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { struct start_timer_after_init_state* state = arg; - grpc_deadline_state* deadline_state = state->elem->call_data; - if (!state->in_call_combiner) { - // We are initially called without holding the call combiner, so we - // need to bounce ourselves into it. - state->in_call_combiner = true; - GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, - &state->closure, GRPC_ERROR_REF(error), - "scheduling deadline timer"); - return; - } start_timer_if_needed(exec_ctx, state->elem, state->deadline); gpr_free(state); - GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, - "done scheduling deadline timer"); } void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, - grpc_call_combiner* call_combiner, gpr_timespec deadline) { grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; deadline_state->call_stack = call_stack; - deadline_state->call_combiner = call_combiner; // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); @@ -195,7 +158,7 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // call stack initialization is finished. To avoid that problem, we // create a closure to start the timer, and we schedule that closure // to be run after call stack initialization is done. - struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state)); + struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state)); state->elem = elem; state->deadline = deadline; GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state, @@ -269,8 +232,7 @@ typedef struct server_call_data { static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_element_args* args) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, - args->call_combiner, args->deadline); + grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline); return GRPC_ERROR_NONE; } @@ -348,6 +310,7 @@ const grpc_channel_filter grpc_client_deadline_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "deadline", }; @@ -362,6 +325,7 @@ const grpc_channel_filter grpc_server_deadline_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "deadline", }; diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h index 3eb102ad28..420bf7065a 100644 --- a/src/core/ext/filters/deadline/deadline_filter.h +++ b/src/core/ext/filters/deadline/deadline_filter.h @@ -31,8 +31,7 @@ typedef enum grpc_deadline_timer_state { typedef struct grpc_deadline_state { // We take a reference to the call stack for the timer callback. grpc_call_stack* call_stack; - grpc_call_combiner* call_combiner; - grpc_deadline_timer_state timer_state; + gpr_atm timer_state; grpc_timer timer; grpc_closure timer_callback; // Closure to invoke when the call is complete. @@ -51,7 +50,6 @@ typedef struct grpc_deadline_state { // assumes elem->call_data is zero'd void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, - grpc_call_combiner* call_combiner, gpr_timespec deadline); void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem); @@ -63,8 +61,6 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, // to ensure that the timer callback is not invoked while it is in the // process of being reset, which means that attempting to increase the // deadline may result in the timer being called twice. -// -// Note: Must be called while holding the call combiner. void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec new_deadline); @@ -74,8 +70,6 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // // Note: It is the caller's responsibility to chain to the next filter if // necessary after this function returns. -// -// Note: Must be called while holding the call combiner. void grpc_deadline_state_client_start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op); diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c index 99ddd08e6a..3ca01a41b5 100644 --- a/src/core/ext/filters/http/client/http_client_filter.c +++ b/src/core/ext/filters/http/client/http_client_filter.c @@ -36,7 +36,6 @@ static const size_t kMaxPayloadSizeForGet = 2048; typedef struct call_data { - grpc_call_combiner *call_combiner; // State for handling send_initial_metadata ops. grpc_linked_mdelem method; grpc_linked_mdelem scheme; @@ -216,13 +215,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, call_data *calld = (call_data *)elem->call_data; if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + exec_ctx, calld->send_message_batch, error); return; } error = pull_slice_from_send_message(exec_ctx, calld); if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + exec_ctx, calld->send_message_batch, error); return; } // There may or may not be more to read, but we don't care. If we got @@ -415,7 +414,7 @@ static void hc_start_transport_stream_op_batch( done: if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + exec_ctx, calld->send_message_batch, error); } else if (!batch_will_be_handled_asynchronously) { grpc_call_next_op(exec_ctx, elem, batch); } @@ -427,7 +426,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; - calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, recv_initial_metadata_ready, elem, grpc_schedule_on_exec_ctx); @@ -567,5 +565,6 @@ const grpc_channel_filter grpc_http_client_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "http-client"}; diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c index 98a503cafc..a32819bfe4 100644 --- a/src/core/ext/filters/http/message_compress/message_compress_filter.c +++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c @@ -35,29 +35,35 @@ #include "src/core/lib/surface/call.h" #include "src/core/lib/transport/static_metadata.h" -typedef enum { - // Initial metadata not yet seen. - INITIAL_METADATA_UNSEEN = 0, - // Initial metadata seen; compression algorithm set. - HAS_COMPRESSION_ALGORITHM, - // Initial metadata seen; no compression algorithm set. - NO_COMPRESSION_ALGORITHM, -} initial_metadata_state; +#define INITIAL_METADATA_UNSEEN 0 +#define HAS_COMPRESSION_ALGORITHM 2 +#define NO_COMPRESSION_ALGORITHM 4 + +#define CANCELLED_BIT ((gpr_atm)1) typedef struct call_data { - grpc_call_combiner *call_combiner; + grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ grpc_linked_mdelem compression_algorithm_storage; grpc_linked_mdelem stream_compression_algorithm_storage; grpc_linked_mdelem accept_encoding_storage; grpc_linked_mdelem accept_stream_encoding_storage; + uint32_t remaining_slice_bytes; /** Compression algorithm we'll try to use. It may be given by incoming * metadata, or by the channel's default compression settings. */ grpc_compression_algorithm compression_algorithm; - initial_metadata_state send_initial_metadata_state; - grpc_error *cancel_error; - grpc_closure start_send_message_batch_in_call_combiner; + + /* Atomic recording the state of initial metadata; allowed values: + INITIAL_METADATA_UNSEEN - initial metadata op not seen + HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm + set + NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm + set + pointer - a stalled op containing a send_message that's waiting on initial + metadata + pointer | CANCELLED_BIT - request was cancelled with error pointed to */ + gpr_atm send_initial_metadata_state; + grpc_transport_stream_op_batch *send_message_batch; - grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ grpc_slice_buffer_stream replacement_stream; grpc_closure *original_send_message_on_complete; grpc_closure send_message_on_complete; @@ -86,13 +92,13 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags, channel_data *channeld = elem->channel_data; if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { - return true; + return 1; } if (has_compression_algorithm) { if (calld->compression_algorithm == GRPC_COMPRESS_NONE) { - return true; + return 1; } - return false; /* we have an actual call-specific algorithm */ + return 0; /* we have an actual call-specific algorithm */ } /* no per-call compression override */ return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE; @@ -220,18 +226,6 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_REF(error)); } -static void send_message_batch_continue(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; - // Note: The call to grpc_call_next_op() results in yielding the - // call combiner, so we need to clear calld->send_message_batch - // before we do that. - grpc_transport_stream_op_batch *send_message_batch = - calld->send_message_batch; - calld->send_message_batch = NULL; - grpc_call_next_op(exec_ctx, elem, send_message_batch); -} - static void finish_send_message(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { call_data *calld = (call_data *)elem->call_data; @@ -240,8 +234,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, grpc_slice_buffer_init(&tmp); uint32_t send_flags = calld->send_message_batch->payload->send_message.send_message->flags; - bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm, - &calld->slices, &tmp); + const bool did_compress = grpc_msg_compress( + exec_ctx, calld->compression_algorithm, &calld->slices, &tmp); if (did_compress) { if (GRPC_TRACER_ON(grpc_compression_trace)) { char *algo_name; @@ -279,19 +273,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, calld->original_send_message_on_complete = calld->send_message_batch->on_complete; calld->send_message_batch->on_complete = &calld->send_message_on_complete; - send_message_batch_continue(exec_ctx, elem); -} - -static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - call_data *calld = arg; - if (calld->send_message_batch != NULL) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error), - calld->call_combiner); - calld->send_message_batch = NULL; - } + grpc_call_next_op(exec_ctx, elem, calld->send_message_batch); } // Pulls a slice from the send_message byte stream and adds it to calld->slices. @@ -311,25 +293,21 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx, // If all data has been read, invokes finish_send_message(). Otherwise, // an async call to grpc_byte_stream_next() has been started, which will // eventually result in calling on_send_message_next_done(). -static void continue_reading_send_message(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { +static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { call_data *calld = (call_data *)elem->call_data; while (grpc_byte_stream_next( exec_ctx, calld->send_message_batch->payload->send_message.send_message, ~(size_t)0, &calld->on_send_message_next_done)) { grpc_error *error = pull_slice_from_send_message(exec_ctx, calld); - if (error != GRPC_ERROR_NONE) { - // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); - GRPC_ERROR_UNREF(error); - return; - } + if (error != GRPC_ERROR_NONE) return error; if (calld->slices.length == calld->send_message_batch->payload->send_message.send_message->length) { finish_send_message(exec_ctx, elem); break; } } + return GRPC_ERROR_NONE; } // Async callback for grpc_byte_stream_next(). @@ -337,37 +315,46 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; - if (error != GRPC_ERROR_NONE) { - // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); - return; - } + if (error != GRPC_ERROR_NONE) goto fail; error = pull_slice_from_send_message(exec_ctx, calld); - if (error != GRPC_ERROR_NONE) { - // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); - GRPC_ERROR_UNREF(error); - return; - } + if (error != GRPC_ERROR_NONE) goto fail; if (calld->slices.length == calld->send_message_batch->payload->send_message.send_message->length) { finish_send_message(exec_ctx, elem); } else { - continue_reading_send_message(exec_ctx, elem); + // This will either finish reading all of the data and invoke + // finish_send_message(), or else it will make an async call to + // grpc_byte_stream_next(), which will eventually result in calling + // this function again. + error = continue_reading_send_message(exec_ctx, elem); + if (error != GRPC_ERROR_NONE) goto fail; } + return; +fail: + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, calld->send_message_batch, error); } -static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *unused) { - grpc_call_element *elem = (grpc_call_element *)arg; +static void start_send_message_batch(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_transport_stream_op_batch *batch, + bool has_compression_algorithm) { call_data *calld = (call_data *)elem->call_data; - if (skip_compression( - elem, - calld->send_message_batch->payload->send_message.send_message->flags, - calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) { - send_message_batch_continue(exec_ctx, elem); + if (!skip_compression(elem, batch->payload->send_message.send_message->flags, + has_compression_algorithm)) { + calld->send_message_batch = batch; + // This will either finish reading all of the data and invoke + // finish_send_message(), or else it will make an async call to + // grpc_byte_stream_next(), which will eventually result in calling + // on_send_message_next_done(). + grpc_error *error = continue_reading_send_message(exec_ctx, elem); + if (error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, calld->send_message_batch, error); + } } else { - continue_reading_send_message(exec_ctx, elem); + /* pass control down the stack */ + grpc_call_next_op(exec_ctx, elem, batch); } } @@ -375,80 +362,95 @@ static void compress_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = elem->call_data; + GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0); - // Handle cancel_stream. + if (batch->cancel_stream) { - GRPC_ERROR_UNREF(calld->cancel_error); - calld->cancel_error = - GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (calld->send_message_batch != NULL) { - if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { - GRPC_CALL_COMBINER_START( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_REF(calld->cancel_error), "failing send_message op"); - } else { - grpc_byte_stream_shutdown( - exec_ctx, - calld->send_message_batch->payload->send_message.send_message, - GRPC_ERROR_REF(calld->cancel_error)); - } + // TODO(roth): As part of the upcoming call combiner work, change + // this to call grpc_byte_stream_shutdown() on the incoming byte + // stream, to cancel any in-flight calls to grpc_byte_stream_next(). + GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); + gpr_atm cur = gpr_atm_full_xchg( + &calld->send_initial_metadata_state, + CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error); + switch (cur) { + case HAS_COMPRESSION_ALGORITHM: + case NO_COMPRESSION_ALGORITHM: + case INITIAL_METADATA_UNSEEN: + break; + default: + if ((cur & CANCELLED_BIT) == 0) { + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, (grpc_transport_stream_op_batch *)cur, + GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error)); + } else { + GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT)); + } + break; } - } else if (calld->cancel_error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error), - calld->call_combiner); - goto done; } - // Handle send_initial_metadata. + if (batch->send_initial_metadata) { - GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN); bool has_compression_algorithm; grpc_error *error = process_send_initial_metadata( exec_ctx, elem, batch->payload->send_initial_metadata.send_initial_metadata, &has_compression_algorithm); if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, - calld->call_combiner); - goto done; + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, + error); + return; } - calld->send_initial_metadata_state = has_compression_algorithm - ? HAS_COMPRESSION_ALGORITHM - : NO_COMPRESSION_ALGORITHM; - // If we had previously received a batch containing a send_message op, - // handle it now. Note that we need to re-enter the call combiner - // for this, since we can't send two batches down while holding the - // call combiner, since the connected_channel filter (at the bottom of - // the call stack) will release the call combiner for each batch it sees. - if (calld->send_message_batch != NULL) { - GRPC_CALL_COMBINER_START( - exec_ctx, calld->call_combiner, - &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE, - "starting send_message after send_initial_metadata"); + gpr_atm cur; + retry_send_im: + cur = gpr_atm_acq_load(&calld->send_initial_metadata_state); + GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM && + cur != NO_COMPRESSION_ALGORITHM); + if ((cur & CANCELLED_BIT) == 0) { + if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur, + has_compression_algorithm + ? HAS_COMPRESSION_ALGORITHM + : NO_COMPRESSION_ALGORITHM)) { + goto retry_send_im; + } + if (cur != INITIAL_METADATA_UNSEEN) { + start_send_message_batch(exec_ctx, elem, + (grpc_transport_stream_op_batch *)cur, + has_compression_algorithm); + } } } - // Handle send_message. if (batch->send_message) { - GPR_ASSERT(calld->send_message_batch == NULL); - calld->send_message_batch = batch; - // If we have not yet seen send_initial_metadata, then we have to - // wait. We save the batch in calld and then drop the call - // combiner, which we'll have to pick up again later when we get - // send_initial_metadata. - if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { - GRPC_CALL_COMBINER_STOP( - exec_ctx, calld->call_combiner, - "send_message batch pending send_initial_metadata"); - goto done; + gpr_atm cur; + retry_send: + cur = gpr_atm_acq_load(&calld->send_initial_metadata_state); + switch (cur) { + case INITIAL_METADATA_UNSEEN: + if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur, + (gpr_atm)batch)) { + goto retry_send; + } + break; + case HAS_COMPRESSION_ALGORITHM: + case NO_COMPRESSION_ALGORITHM: + start_send_message_batch(exec_ctx, elem, batch, + cur == HAS_COMPRESSION_ALGORITHM); + break; + default: + if (cur & CANCELLED_BIT) { + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, batch, + GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT))); + } else { + /* >1 send_message concurrently */ + GPR_UNREACHABLE_CODE(break); + } } - start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE); } else { - // Pass control down the stack. + /* pass control down the stack */ grpc_call_next_op(exec_ctx, elem, batch); } -done: + GPR_TIMER_END("compress_start_transport_stream_op_batch", 0); } @@ -456,16 +458,16 @@ done: static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; - calld->call_combiner = args->call_combiner; - calld->cancel_error = GRPC_ERROR_NONE; + /* grab pointers to our data from the call element */ + call_data *calld = elem->call_data; + + /* initialize members */ grpc_slice_buffer_init(&calld->slices); - GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner, - start_send_message_batch, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->on_send_message_next_done, on_send_message_next_done, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete, elem, grpc_schedule_on_exec_ctx); + return GRPC_ERROR_NONE; } @@ -473,9 +475,14 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { + /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); - GRPC_ERROR_UNREF(calld->cancel_error); + gpr_atm imstate = + gpr_atm_no_barrier_load(&calld->send_initial_metadata_state); + if (imstate & CANCELLED_BIT) { + GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT)); + } } /* Constructor for channel_data */ @@ -543,5 +550,6 @@ const grpc_channel_filter grpc_message_compress_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, - "message_compress"}; + "compress"}; diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c index a10e69ba59..b145f12aff 100644 --- a/src/core/ext/filters/http/server/http_server_filter.c +++ b/src/core/ext/filters/http/server/http_server_filter.c @@ -32,8 +32,6 @@ #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1 typedef struct call_data { - grpc_call_combiner *call_combiner; - grpc_linked_mdelem status; grpc_linked_mdelem content_type; @@ -283,11 +281,7 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, *calld->pp_recv_message = calld->payload_bin_delivered ? NULL : (grpc_byte_stream *)&calld->read_stream; - // Re-enter call combiner for recv_message_ready, since the surface - // code will release the call combiner for each callback it receives. - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - calld->recv_message_ready, GRPC_ERROR_REF(err), - "resuming recv_message_ready from on_complete"); + GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); calld->recv_message_ready = NULL; calld->payload_bin_delivered = true; } @@ -299,20 +293,15 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, grpc_call_element *elem = user_data; call_data *calld = elem->call_data; if (calld->seen_path_with_query) { - // Do nothing. This is probably a GET request, and payload will be - // returned in hs_on_complete callback. - // Note that we release the call combiner here, so that other - // callbacks can run. - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "pausing recv_message_ready until on_complete"); + /* do nothing. This is probably a GET request, and payload will be returned + in hs_on_complete callback. */ } else { GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); } } -static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { +static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + grpc_transport_stream_op_batch *op) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; @@ -334,7 +323,10 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, server_filter_outgoing_metadata( exec_ctx, elem, op->payload->send_initial_metadata.send_initial_metadata)); - if (error != GRPC_ERROR_NONE) return error; + if (error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); + return; + } } if (op->recv_initial_metadata) { @@ -367,25 +359,21 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_error *error = server_filter_outgoing_metadata( exec_ctx, elem, op->payload->send_trailing_metadata.send_trailing_metadata); - if (error != GRPC_ERROR_NONE) return error; + if (error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); + return; + } } - - return GRPC_ERROR_NONE; } -static void hs_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = elem->call_data; - GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0); - grpc_error *error = hs_mutate_op(exec_ctx, elem, op); - if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error, - calld->call_combiner); - } else { - grpc_call_next_op(exec_ctx, elem, op); - } - GPR_TIMER_END("hs_start_transport_stream_op_batch", 0); +static void hs_start_transport_op(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_transport_stream_op_batch *op) { + GRPC_CALL_LOG_OP(GPR_INFO, elem, op); + GPR_TIMER_BEGIN("hs_start_transport_op", 0); + hs_mutate_op(exec_ctx, elem, op); + grpc_call_next_op(exec_ctx, elem, op); + GPR_TIMER_END("hs_start_transport_op", 0); } /* Constructor for call_data */ @@ -395,7 +383,6 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; /* initialize members */ - calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem, @@ -427,7 +414,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} const grpc_channel_filter grpc_http_server_filter = { - hs_start_transport_stream_op_batch, + hs_start_transport_op, grpc_channel_next_op, sizeof(call_data), init_call_elem, @@ -436,5 +423,6 @@ const grpc_channel_filter grpc_http_server_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "http-server"}; diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/load_reporting_filter.c index 17e946937f..08474efb2e 100644 --- a/src/core/ext/filters/load_reporting/load_reporting_filter.c +++ b/src/core/ext/filters/load_reporting/load_reporting_filter.c @@ -223,5 +223,6 @@ const grpc_channel_filter grpc_load_reporting_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "load_reporting"}; diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 16c85a70d0..7d748b9c32 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -391,6 +391,7 @@ const grpc_channel_filter grpc_max_age_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "max_age"}; diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c index 47763b1deb..846c7df69a 100644 --- a/src/core/ext/filters/message_size/message_size_filter.c +++ b/src/core/ext/filters/message_size/message_size_filter.c @@ -68,7 +68,6 @@ static void* message_size_limits_create_from_json(const grpc_json* json) { } typedef struct call_data { - grpc_call_combiner* call_combiner; message_size_limits limits; // Receive closures are chained: we inject this closure as the // recv_message_ready up-call on transport_stream_op, and remember to @@ -132,8 +131,7 @@ static void start_transport_stream_op_batch( exec_ctx, op, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string), GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_RESOURCE_EXHAUSTED), - calld->call_combiner); + GRPC_STATUS_RESOURCE_EXHAUSTED)); gpr_free(message_string); return; } @@ -154,7 +152,6 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, const grpc_call_element_args* args) { channel_data* chand = (channel_data*)elem->channel_data; call_data* calld = (call_data*)elem->call_data; - calld->call_combiner = args->call_combiner; calld->next_recv_message_ready = NULL; GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem, grpc_schedule_on_exec_ctx); @@ -262,6 +259,7 @@ const grpc_channel_filter grpc_message_size_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "message_size"}; diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c index c8b2fe5f99..b4d2cb4b8c 100644 --- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c @@ -177,6 +177,7 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = { 0, init_channel_elem, destroy_channel_elem, + grpc_call_next_get_peer, grpc_channel_next_get_info, "workaround_cronet_compression"}; diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 2f0ac85152..7541bd5c92 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1370,28 +1370,17 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, "send_initial_metadata_finished"); } } - if (op_payload->send_initial_metadata.peer_string != NULL) { - gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string, - (gpr_atm)gpr_strdup(t->peer_string)); - } } if (op->send_message) { on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->fetching_send_message_finished = add_closure_barrier(op->on_complete); if (s->write_closed) { - // Return an error unless the client has already received trailing - // metadata from the server, since an application using a - // streaming call might send another message before getting a - // recv_message failure, breaking out of its loop, and then - // starting recv_trailing_metadata. grpc_chttp2_complete_closure_step( exec_ctx, t, s, &s->fetching_send_message_finished, - t->is_client && s->received_trailing_metadata - ? GRPC_ERROR_NONE - : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Attempt to send message after stream was closed", - &s->write_closed_error, 1), + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Attempt to send message after stream was closed", + &s->write_closed_error, 1), "fetching_send_message_finished"); } else { GPR_ASSERT(s->fetching_send_message == NULL); @@ -1477,10 +1466,6 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, op_payload->recv_initial_metadata.recv_initial_metadata; s->trailing_metadata_available = op_payload->recv_initial_metadata.trailing_metadata_available; - if (op_payload->recv_initial_metadata.peer_string != NULL) { - gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string, - (gpr_atm)gpr_strdup(t->peer_string)); - } grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s); } @@ -1839,7 +1824,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } } } - if (s->read_closed && s->frame_storage.length == 0 && !pending_data && + if (s->read_closed && s->frame_storage.length == 0 && + (!pending_data || s->seen_error) && s->recv_trailing_metadata_finished != NULL) { grpc_chttp2_incoming_metadata_buffer_publish( exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata); @@ -2946,6 +2932,14 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, } /******************************************************************************* + * INTEGRATION GLUE + */ + +static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) { + return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string); +} + +/******************************************************************************* * MONITORING */ static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx, @@ -2962,6 +2956,7 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream), perform_transport_op, destroy_stream, destroy_transport, + chttp2_get_peer, chttp2_get_endpoint}; grpc_transport *grpc_create_chttp2_transport( diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 9fff30d54f..3c41a8958f 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -509,8 +509,6 @@ struct grpc_chttp2_stream { /** Are we buffering writes on this stream? If yes, we won't become writable until there's enough queued up in the flow_controlled_buffer */ bool write_buffering; - /** Has trailing metadata been received. */ - bool received_trailing_metadata; /** the error that resulted in this stream being read-closed */ grpc_error *read_closed_error; diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 19bd86fd0c..18d163ee98 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -623,7 +623,6 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, *s->trailing_metadata_available = true; } t->hpack_parser.on_header = on_trailing_header; - s->received_trailing_metadata = true; } else { GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata")); t->hpack_parser.on_header = on_initial_header; @@ -632,7 +631,6 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, case 1: GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata")); t->hpack_parser.on_header = on_trailing_header; - s->received_trailing_metadata = true; break; case 2: gpr_log(GPR_ERROR, "too many header frames received"); diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 09420d92e7..abb558982b 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -1386,6 +1386,10 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} +static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { + return NULL; +} + static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { return NULL; @@ -1404,6 +1408,7 @@ static const grpc_transport_vtable grpc_cronet_vtable = { perform_op, destroy_stream, destroy_transport, + get_peer, get_endpoint}; grpc_transport *grpc_create_cronet_transport(void *engine, const char *target, diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c index b2d6f2d0c9..6f4b429ee2 100644 --- a/src/core/ext/transport/inproc/inproc_transport.c +++ b/src/core/ext/transport/inproc/inproc_transport.c @@ -1251,14 +1251,20 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt, // Nothing to do here } +static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) { + return gpr_strdup("inproc"); +} + static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) { return NULL; } static const grpc_transport_vtable inproc_vtable = { - sizeof(inproc_stream), "inproc", init_stream, - set_pollset, set_pollset_set, perform_stream_op, - perform_transport_op, destroy_stream, destroy_transport, + sizeof(inproc_stream), "inproc", + init_stream, set_pollset, + set_pollset_set, perform_stream_op, + perform_transport_op, destroy_stream, + destroy_transport, get_peer, get_endpoint}; /******************************************************************************* |