diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/ext/client_config/client_channel.c | 41 | ||||
-rw-r--r-- | src/core/ext/client_config/lb_policy.c | 10 | ||||
-rw-r--r-- | src/core/ext/client_config/lb_policy.h | 10 | ||||
-rw-r--r-- | src/core/ext/client_config/subchannel.c | 8 | ||||
-rw-r--r-- | src/core/ext/lb_policy/grpclb/grpclb.c | 18 | ||||
-rw-r--r-- | src/core/ext/lb_policy/pick_first/pick_first.c | 18 | ||||
-rw-r--r-- | src/core/ext/lb_policy/round_robin/round_robin.c | 18 | ||||
-rw-r--r-- | src/core/lib/channel/channel_stack.c | 19 | ||||
-rw-r--r-- | src/core/lib/channel/channel_stack.h | 5 | ||||
-rw-r--r-- | src/core/lib/channel/deadline_filter.c | 274 | ||||
-rw-r--r-- | src/core/lib/channel/deadline_filter.h | 77 | ||||
-rw-r--r-- | src/core/lib/channel/http_client_filter.c | 4 | ||||
-rw-r--r-- | src/core/lib/iomgr/error.c | 59 | ||||
-rw-r--r-- | src/core/lib/iomgr/error.h | 8 | ||||
-rw-r--r-- | src/core/lib/iomgr/tcp_client_posix.c | 96 | ||||
-rw-r--r-- | src/core/lib/surface/call.c | 88 | ||||
-rw-r--r-- | src/core/lib/surface/completion_queue.h | 2 | ||||
-rw-r--r-- | src/core/lib/surface/init.c | 7 | ||||
-rw-r--r-- | src/core/lib/transport/transport.c | 6 |
19 files changed, 591 insertions, 177 deletions
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c index be333f4e0d..2f5b94c4a2 100644 --- a/src/core/ext/client_config/client_channel.c +++ b/src/core/ext/client_config/client_channel.c @@ -45,6 +45,7 @@ #include "src/core/ext/client_config/subchannel.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" +#include "src/core/lib/channel/deadline_filter.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/profiling/timers.h" @@ -111,7 +112,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy_cancel_picks( exec_ctx, chand->lb_policy, /* mask= */ GRPC_INITIAL_METADATA_IGNORE_CONNECTIVITY, - /* check= */ 0); + /* check= */ 0, GRPC_ERROR_REF(error)); } grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error, reason); @@ -377,6 +378,14 @@ typedef enum { for initial metadata before trying to create a call object, and handling cancellation gracefully. */ typedef struct client_channel_call_data { + // State for handling deadlines. + // The code in deadline_filter.c requires this to be the first field. + // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state + // and this struct both independently store a pointer to the call + // stack and each has its own mutex. If/when we have time, find a way + // to avoid this without breaking the grpc_deadline_state abstraction. + grpc_deadline_state deadline_state; + /** either 0 for no call, 1 for cancelled, or a pointer to a grpc_subchannel_call */ gpr_atm subchannel_call; @@ -469,7 +478,7 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, gpr_atm_no_barrier_store(&calld->subchannel_call, 1); fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING( "Failed to create subchannel", &error, 1)); - } else if (1 == gpr_atm_acq_load(&calld->subchannel_call)) { + } else if (GET_CALL(calld) == CANCELLED_CALL) { /* already cancelled before subchannel became ready */ fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING( @@ -515,7 +524,7 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, grpc_connected_subchannel **connected_subchannel, - grpc_closure *on_ready); + grpc_closure *on_ready, grpc_error *error); static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -526,7 +535,8 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL); } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, cpa->initial_metadata_flags, - cpa->connected_subchannel, cpa->on_ready)) { + cpa->connected_subchannel, cpa->on_ready, + GRPC_ERROR_NONE)) { grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL); } gpr_free(cpa); @@ -536,7 +546,7 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, grpc_connected_subchannel **connected_subchannel, - grpc_closure *on_ready) { + grpc_closure *on_ready, grpc_error *error) { GPR_TIMER_BEGIN("pick_subchannel", 0); channel_data *chand = elem->channel_data; @@ -550,21 +560,24 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, if (initial_metadata == NULL) { if (chand->lb_policy != NULL) { grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, - connected_subchannel); + connected_subchannel, GRPC_ERROR_REF(error)); } for (closure = chand->waiting_for_config_closures.head; closure != NULL; closure = closure->next_data.next) { cpa = closure->cb_arg; if (cpa->connected_subchannel == connected_subchannel) { cpa->connected_subchannel = NULL; - grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, - GRPC_ERROR_CREATE("Pick cancelled"), NULL); + grpc_exec_ctx_sched( + exec_ctx, cpa->on_ready, + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); } } gpr_mu_unlock(&chand->mu); GPR_TIMER_END("pick_subchannel", 0); + GRPC_ERROR_UNREF(error); return true; } + GPR_ASSERT(error == GRPC_ERROR_NONE); if (chand->lb_policy != NULL) { grpc_lb_policy *lb_policy = chand->lb_policy; int r; @@ -613,6 +626,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op) { call_data *calld = elem->call_data; GRPC_CALL_LOG_OP(GPR_INFO, elem, op); + grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); /* try to (atomically) get the call */ grpc_subchannel_call *call = GET_CALL(calld); GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); @@ -657,12 +671,12 @@ retry: break; case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL: pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel, - NULL); + NULL, GRPC_ERROR_REF(op->cancel_error)); break; } gpr_mu_unlock(&calld->mu); - grpc_transport_stream_op_finish_with_failure(exec_ctx, op, - GRPC_ERROR_CANCELLED); + grpc_transport_stream_op_finish_with_failure( + exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); GPR_TIMER_END("cc_start_transport_stream_op", 0); return; } @@ -676,7 +690,8 @@ retry: GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, op->send_initial_metadata_flags, - &calld->connected_subchannel, &calld->next_step)) { + &calld->connected_subchannel, &calld->next_step, + GRPC_ERROR_NONE)) { calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); } @@ -709,6 +724,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_call_element_args *args) { call_data *calld = elem->call_data; + grpc_deadline_state_init(&calld->deadline_state, args->call_stack); gpr_atm_rel_store(&calld->subchannel_call, 0); gpr_mu_init(&calld->mu); calld->connected_subchannel = NULL; @@ -727,6 +743,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, const grpc_call_final_info *final_info, void *and_free_memory) { call_data *calld = elem->call_data; + grpc_deadline_state_destroy(exec_ctx, &calld->deadline_state); grpc_subchannel_call *call = GET_CALL(calld); if (call != NULL && call != CANCELLED_CALL) { GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); diff --git a/src/core/ext/client_config/lb_policy.c b/src/core/ext/client_config/lb_policy.c index 8b980b2cca..3e498f5f56 100644 --- a/src/core/ext/client_config/lb_policy.c +++ b/src/core/ext/client_config/lb_policy.c @@ -110,16 +110,18 @@ int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, } void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connected_subchannel **target) { - policy->vtable->cancel_pick(exec_ctx, policy, target); + grpc_connected_subchannel **target, + grpc_error *error) { + policy->vtable->cancel_pick(exec_ctx, policy, target, error); } void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq) { + uint32_t initial_metadata_flags_eq, + grpc_error *error) { policy->vtable->cancel_picks(exec_ctx, policy, initial_metadata_flags_mask, - initial_metadata_flags_eq); + initial_metadata_flags_eq, error); } void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) { diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_config/lb_policy.h index a2f5446fc6..eb07ad3360 100644 --- a/src/core/ext/client_config/lb_policy.h +++ b/src/core/ext/client_config/lb_policy.h @@ -65,10 +65,10 @@ struct grpc_lb_policy_vtable { uint32_t initial_metadata_flags, grpc_connected_subchannel **target, grpc_closure *on_complete); void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connected_subchannel **target); + grpc_connected_subchannel **target, grpc_error *error); void (*cancel_picks)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq); + uint32_t initial_metadata_flags_eq, grpc_error *error); void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, grpc_closure *closure); @@ -139,7 +139,8 @@ void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, grpc_closure *closure); void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connected_subchannel **target); + grpc_connected_subchannel **target, + grpc_error *error); /** Cancel all pending picks which have: (initial_metadata_flags & initial_metadata_flags_mask) == @@ -147,7 +148,8 @@ void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq); + uint32_t initial_metadata_flags_eq, + grpc_error *error); /** Try to enter a READY connectivity state */ void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); diff --git a/src/core/ext/client_config/subchannel.c b/src/core/ext/client_config/subchannel.c index 2c4364b259..d5894357d6 100644 --- a/src/core/ext/client_config/subchannel.c +++ b/src/core/ext/client_config/subchannel.c @@ -219,8 +219,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta, : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); #ifdef GRPC_STREAM_REFCOUNT_DEBUG gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "SUBCHANNEL: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val, - old_val + delta, reason); + "SUBCHANNEL: %p %12s 0x%08d -> 0x%08d [%s]", c, purpose, (int)old_val, + (int)(old_val + delta), reason); #endif return old_val; } @@ -633,7 +633,9 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg, c->have_alarm = 1; grpc_connectivity_state_set( exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_REFERENCING("Connect Failed", &error, 1), + grpc_error_set_int( + GRPC_ERROR_CREATE_REFERENCING("Connect Failed", &error, 1), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE), "connect_failed"); gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now); const char *errmsg = grpc_error_string(error); diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index af913d8a9d..550f5c5972 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -533,7 +533,8 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { } static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target) { + grpc_connected_subchannel **target, + grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)pol; gpr_mu_lock(&glb_policy->mu); pending_pick *pp = glb_policy->pending_picks; @@ -544,8 +545,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_polling_entity_del_from_pollset_set( exec_ctx, pp->pollent, glb_policy->base.interested_parties); *target = NULL; - grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, - GRPC_ERROR_CANCELLED, NULL); + grpc_exec_ctx_sched( + exec_ctx, &pp->wrapped_on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = glb_policy->pending_picks; @@ -554,12 +556,14 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&glb_policy->mu); + GRPC_ERROR_UNREF(error); } static grpc_call *lb_client_data_get_call(struct lb_client_data *lb_client); static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq) { + uint32_t initial_metadata_flags_eq, + grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)pol; gpr_mu_lock(&glb_policy->mu); if (glb_policy->lb_client != NULL) { @@ -574,8 +578,9 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, initial_metadata_flags_eq) { grpc_polling_entity_del_from_pollset_set( exec_ctx, pp->pollent, glb_policy->base.interested_parties); - grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, - GRPC_ERROR_CANCELLED, NULL); + grpc_exec_ctx_sched( + exec_ctx, &pp->wrapped_on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = glb_policy->pending_picks; @@ -584,6 +589,7 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&glb_policy->mu); + GRPC_ERROR_UNREF(error); } static void query_for_backends(grpc_exec_ctx *exec_ctx, diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index 9decf70692..5962ec3327 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -128,7 +128,8 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { } static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target) { + grpc_connected_subchannel **target, + grpc_error *error) { pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pending_pick *pp; gpr_mu_lock(&p->mu); @@ -140,8 +141,9 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, p->base.interested_parties); *target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE("Pick Cancelled"), NULL); + grpc_exec_ctx_sched( + exec_ctx, pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -150,11 +152,13 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&p->mu); + GRPC_ERROR_UNREF(error); } static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq) { + uint32_t initial_metadata_flags_eq, + grpc_error *error) { pick_first_lb_policy *p = (pick_first_lb_policy *)pol; pending_pick *pp; gpr_mu_lock(&p->mu); @@ -166,8 +170,9 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, initial_metadata_flags_eq) { grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, p->base.interested_parties); - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE("Pick Cancelled"), NULL); + grpc_exec_ctx_sched( + exec_ctx, pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -176,6 +181,7 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&p->mu); + GRPC_ERROR_UNREF(error); } static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) { diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 7bcf608ab9..654fcf2d2a 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -281,7 +281,8 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { } static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target) { + grpc_connected_subchannel **target, + grpc_error *error) { round_robin_lb_policy *p = (round_robin_lb_policy *)pol; pending_pick *pp; gpr_mu_lock(&p->mu); @@ -293,8 +294,9 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, p->base.interested_parties); *target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED, - NULL); + grpc_exec_ctx_sched( + exec_ctx, pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -303,11 +305,13 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&p->mu); + GRPC_ERROR_UNREF(error); } static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq) { + uint32_t initial_metadata_flags_eq, + grpc_error *error) { round_robin_lb_policy *p = (round_robin_lb_policy *)pol; pending_pick *pp; gpr_mu_lock(&p->mu); @@ -320,8 +324,9 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent, p->base.interested_parties); *pp->target = NULL; - grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED, - NULL); + grpc_exec_ctx_sched( + exec_ctx, pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL); gpr_free(pp); } else { pp->next = p->pending_picks; @@ -330,6 +335,7 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, pp = next; } gpr_mu_unlock(&p->mu); + GRPC_ERROR_UNREF(error); } static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) { diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index 0655b9353f..4e734ad113 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -276,16 +276,16 @@ static void destroy_op(grpc_exec_ctx *exec_ctx, void *op, grpc_error *error) { } void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx, - grpc_call_element *cur_elem) { + grpc_call_element *elem) { grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); memset(op, 0, sizeof(*op)); op->cancel_error = GRPC_ERROR_CANCELLED; op->on_complete = grpc_closure_create(destroy_op, op); - grpc_call_next_op(exec_ctx, cur_elem, op); + elem->filter->start_transport_stream_op(exec_ctx, elem, op); } void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx, - grpc_call_element *cur_elem, + grpc_call_element *elem, grpc_status_code status, gpr_slice *optional_message) { grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); @@ -293,5 +293,16 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx, op->on_complete = grpc_closure_create(destroy_op, op); grpc_transport_stream_op_add_cancellation_with_message(op, status, optional_message); - grpc_call_next_op(exec_ctx, cur_elem, op); + elem->filter->start_transport_stream_op(exec_ctx, elem, op); +} + +void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_status_code status, + gpr_slice *optional_message) { + grpc_transport_stream_op *op = gpr_malloc(sizeof(*op)); + memset(op, 0, sizeof(*op)); + op->on_complete = grpc_closure_create(destroy_op, op); + grpc_transport_stream_op_add_close(op, status, optional_message); + elem->filter->start_transport_stream_op(exec_ctx, elem, op); } diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 6b73cce380..eeaab17d39 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -290,6 +290,11 @@ void grpc_call_element_send_cancel_with_message(grpc_exec_ctx *exec_ctx, grpc_status_code status, gpr_slice *optional_message); +void grpc_call_element_send_close_with_message(grpc_exec_ctx *exec_ctx, + grpc_call_element *cur_elem, + grpc_status_code status, + gpr_slice *optional_message); + extern int grpc_trace_channel; #define GRPC_CALL_LOG_OP(sev, elem, op) \ diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c new file mode 100644 index 0000000000..010fedd7b7 --- /dev/null +++ b/src/core/lib/channel/deadline_filter.c @@ -0,0 +1,274 @@ +// +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +#include "src/core/lib/channel/deadline_filter.h" + +#include <stdbool.h> +#include <string.h> + +#include <grpc/support/log.h> +#include <grpc/support/sync.h> +#include <grpc/support/time.h> + +#include "src/core/lib/iomgr/timer.h" + +// +// grpc_deadline_state +// + +// Timer callback. +static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { + grpc_call_element* elem = arg; + grpc_deadline_state* deadline_state = elem->call_data; + gpr_mu_lock(&deadline_state->timer_mu); + deadline_state->timer_pending = false; + gpr_mu_unlock(&deadline_state->timer_mu); + if (error != GRPC_ERROR_CANCELLED) { + gpr_slice msg = gpr_slice_from_static_string("Deadline Exceeded"); + grpc_call_element_send_cancel_with_message( + exec_ctx, elem, GRPC_STATUS_DEADLINE_EXCEEDED, &msg); + gpr_slice_unref(msg); + } + GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); +} + +// Starts the deadline timer. +static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + gpr_timespec deadline) { + grpc_deadline_state* deadline_state = elem->call_data; + deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); + if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) { + // Take a reference to the call stack, to be owned by the timer. + GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); + gpr_mu_lock(&deadline_state->timer_mu); + deadline_state->timer_pending = true; + grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback, + elem, gpr_now(GPR_CLOCK_MONOTONIC)); + gpr_mu_unlock(&deadline_state->timer_mu); + } +} + +// Cancels the deadline timer. +static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, + grpc_deadline_state* deadline_state) { + gpr_mu_lock(&deadline_state->timer_mu); + if (deadline_state->timer_pending) { + grpc_timer_cancel(exec_ctx, &deadline_state->timer); + deadline_state->timer_pending = false; + } + gpr_mu_unlock(&deadline_state->timer_mu); +} + +// Callback run when the call is complete. +static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { + grpc_deadline_state* deadline_state = arg; + cancel_timer_if_needed(exec_ctx, deadline_state); + // Invoke the next callback. + deadline_state->next_on_complete->cb( + exec_ctx, deadline_state->next_on_complete->cb_arg, error); +} + +// Inject our own on_complete callback into op. +static void inject_on_complete_cb(grpc_deadline_state* deadline_state, + grpc_transport_stream_op* op) { + deadline_state->next_on_complete = op->on_complete; + grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state); + op->on_complete = &deadline_state->on_complete; +} + +void grpc_deadline_state_init(grpc_deadline_state* deadline_state, + grpc_call_stack* call_stack) { + memset(deadline_state, 0, sizeof(*deadline_state)); + deadline_state->call_stack = call_stack; + gpr_mu_init(&deadline_state->timer_mu); +} + +void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, + grpc_deadline_state* deadline_state) { + cancel_timer_if_needed(exec_ctx, deadline_state); + gpr_mu_destroy(&deadline_state->timer_mu); +} + +void grpc_deadline_state_client_start_transport_stream_op( + grpc_exec_ctx* exec_ctx, grpc_call_element* elem, + grpc_transport_stream_op* op) { + grpc_deadline_state* deadline_state = elem->call_data; + if (op->cancel_error != GRPC_ERROR_NONE || + op->close_error != GRPC_ERROR_NONE) { + cancel_timer_if_needed(exec_ctx, deadline_state); + } else { + // If we're sending initial metadata, get the deadline from the metadata + // and start the timer if needed. + if (op->send_initial_metadata != NULL) { + start_timer_if_needed(exec_ctx, elem, + op->send_initial_metadata->deadline); + } + // Make sure we know when the call is complete, so that we can cancel + // the timer. + if (op->recv_trailing_metadata != NULL) { + inject_on_complete_cb(deadline_state, op); + } + } +} + +// +// filter code +// + +// Constructor for channel_data. Used for both client and server filters. +static void init_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_channel_element_args* args) { + GPR_ASSERT(!args->is_last); +} + +// Destructor for channel_data. Used for both client and server filters. +static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem) {} + +// Call data used for both client and server filter. +typedef struct base_call_data { + grpc_deadline_state deadline_state; +} base_call_data; + +// Additional call data used only for the server filter. +typedef struct server_call_data { + base_call_data base; // Must be first. + // The closure for receiving initial metadata. + grpc_closure recv_initial_metadata_ready; + // Received initial metadata batch. + grpc_metadata_batch* recv_initial_metadata; + // The original recv_initial_metadata_ready closure, which we chain to + // after our own closure is invoked. + grpc_closure* next_recv_initial_metadata_ready; +} server_call_data; + +// Constructor for call_data. Used for both client and server filters. +static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_call_element_args* args) { + base_call_data* calld = elem->call_data; + // Note: size of call data is different between client and server. + memset(calld, 0, elem->filter->sizeof_call_data); + grpc_deadline_state_init(&calld->deadline_state, args->call_stack); + return GRPC_ERROR_NONE; +} + +// Destructor for call_data. Used for both client and server filters. +static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, + const grpc_call_final_info* final_info, + void* and_free_memory) { + base_call_data* calld = elem->call_data; + grpc_deadline_state_destroy(exec_ctx, &calld->deadline_state); +} + +// Method for starting a call op for client filter. +static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_transport_stream_op* op) { + grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); + // Chain to next filter. + grpc_call_next_op(exec_ctx, elem, op); +} + +// Callback for receiving initial metadata on the server. +static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { + grpc_call_element* elem = arg; + server_call_data* calld = elem->call_data; + // Get deadline from metadata and start the timer if needed. + start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline); + // Invoke the next callback. + calld->next_recv_initial_metadata_ready->cb( + exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error); +} + +// Method for starting a call op for server filter. +static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx, + grpc_call_element* elem, + grpc_transport_stream_op* op) { + server_call_data* calld = elem->call_data; + if (op->cancel_error != GRPC_ERROR_NONE || + op->close_error != GRPC_ERROR_NONE) { + cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state); + } else { + // If we're receiving initial metadata, we need to get the deadline + // from the recv_initial_metadata_ready callback. So we inject our + // own callback into that hook. + if (op->recv_initial_metadata_ready != NULL) { + calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready; + calld->recv_initial_metadata = op->recv_initial_metadata; + grpc_closure_init(&calld->recv_initial_metadata_ready, + recv_initial_metadata_ready, elem); + op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready; + } + // Make sure we know when the call is complete, so that we can cancel + // the timer. + // Note that we trigger this on recv_trailing_metadata, even though + // the client never sends trailing metadata, because this is the + // hook that tells us when the call is complete on the server side. + if (op->recv_trailing_metadata != NULL) { + inject_on_complete_cb(&calld->base.deadline_state, op); + } + } + // Chain to next filter. + grpc_call_next_op(exec_ctx, elem, op); +} + +const grpc_channel_filter grpc_client_deadline_filter = { + client_start_transport_stream_op, + grpc_channel_next_op, + sizeof(base_call_data), + init_call_elem, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + destroy_call_elem, + 0, // sizeof(channel_data) + init_channel_elem, + destroy_channel_elem, + grpc_call_next_get_peer, + "deadline", +}; + +const grpc_channel_filter grpc_server_deadline_filter = { + server_start_transport_stream_op, + grpc_channel_next_op, + sizeof(server_call_data), + init_call_elem, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + destroy_call_elem, + 0, // sizeof(channel_data) + init_channel_elem, + destroy_channel_elem, + grpc_call_next_get_peer, + "deadline", +}; diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h new file mode 100644 index 0000000000..a09f85afe6 --- /dev/null +++ b/src/core/lib/channel/deadline_filter.h @@ -0,0 +1,77 @@ +// +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GRPC_CORE_LIB_CHANNEL_DEADLINE_FILTER_H +#define GRPC_CORE_LIB_CHANNEL_DEADLINE_FILTER_H + +#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/iomgr/timer.h" + +// State used for filters that enforce call deadlines. +// Should be the first field in the filter's call_data. +typedef struct grpc_deadline_state { + // We take a reference to the call stack for the timer callback. + grpc_call_stack* call_stack; + // Guards access to timer_pending and timer. + gpr_mu timer_mu; + // True if the timer callback is currently pending. + bool timer_pending; + // The deadline timer. + grpc_timer timer; + // Closure to invoke when the call is complete. + // We use this to cancel the timer. + grpc_closure on_complete; + // The original on_complete closure, which we chain to after our own + // closure is invoked. + grpc_closure* next_on_complete; +} grpc_deadline_state; + +void grpc_deadline_state_init(grpc_deadline_state* call_data, + grpc_call_stack* call_stack); +void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, + grpc_deadline_state* call_data); + +// To be used in a filter's start_transport_stream_op() method to +// enforce call deadlines. +// It is the caller's responsibility to chain to the next filter if +// necessary after this function returns. +// REQUIRES: The first field in elem->call_data is a grpc_deadline_state. +void grpc_deadline_state_client_start_transport_stream_op( + grpc_exec_ctx* exec_ctx, grpc_call_element* elem, + grpc_transport_stream_op* op); + +// Deadline filters for direct client channels and server channels. +// Note: Deadlines for non-direct client channels are handled by the +// client_channel filter. +extern const grpc_channel_filter grpc_client_deadline_filter; +extern const grpc_channel_filter grpc_server_deadline_filter; + +#endif /* GRPC_CORE_LIB_CHANNEL_DEADLINE_FILTER_H */ diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c index edcc741ff6..1dc05fb20d 100644 --- a/src/core/lib/channel/http_client_filter.c +++ b/src/core/lib/channel/http_client_filter.c @@ -103,8 +103,8 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) { grpc_mdstr_as_c_string(md->value)); gpr_slice message = gpr_slice_from_copied_string(message_string); gpr_free(message_string); - grpc_call_element_send_cancel_with_message(a->exec_ctx, a->elem, - GRPC_STATUS_CANCELLED, &message); + grpc_call_element_send_close_with_message(a->exec_ctx, a->elem, + GRPC_STATUS_CANCELLED, &message); return NULL; } else if (md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) { return NULL; diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c index e366961936..29b322dced 100644 --- a/src/core/lib/iomgr/error.c +++ b/src/core/lib/iomgr/error.c @@ -324,6 +324,65 @@ const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) { return gpr_avl_get(err->strs, (void *)(uintptr_t)which); } +typedef struct { + grpc_error *error; + grpc_status_code code; + const char *msg; +} special_error_status_map; +static special_error_status_map error_status_map[] = { + {GRPC_ERROR_NONE, GRPC_STATUS_OK, ""}, + {GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "RPC cancelled"}, + {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"}, +}; + +static grpc_error *recursively_find_error_with_status(grpc_error *error, + intptr_t *status) { + // If the error itself has a status code, return it. + if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, status)) { + return error; + } + // Otherwise, search through its children. + intptr_t key = 0; + while (true) { + grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++); + if (child_error == NULL) break; + grpc_error *result = + recursively_find_error_with_status(child_error, status); + if (result != NULL) return result; + } + return NULL; +} + +void grpc_error_get_status(grpc_error *error, grpc_status_code *code, + const char **msg) { + // Handle special errors via the static map. + for (size_t i = 0; + i < sizeof(error_status_map) / sizeof(special_error_status_map); ++i) { + if (error == error_status_map[i].error) { + *code = error_status_map[i].code; + *msg = error_status_map[i].msg; + return; + } + } + // Populate code. + // Start with the parent error and recurse through the tree of children + // until we find the first one that has a status code. + intptr_t status = GRPC_STATUS_UNKNOWN; // Default in case we don't find one. + grpc_error *found_error = recursively_find_error_with_status(error, &status); + *code = (grpc_status_code)status; + // Now populate msg. + // If we found an error with a status code above, use that; otherwise, + // fall back to using the parent error. + if (found_error == NULL) found_error = error; + // If the error has a status message, use it. Otherwise, fall back to + // the error description. + *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE); + if (*msg == NULL) { + *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION); + if (*msg == NULL) *msg = "uknown error"; // Just in case. + } +} + grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) { GPR_TIMER_BEGIN("grpc_error_add_child", 0); grpc_error *new = copy_error_and_unref(src); diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index 6c769accdb..7e2fd7a3bd 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -37,6 +37,7 @@ #include <stdbool.h> #include <stdint.h> +#include <grpc/status.h> #include <grpc/support/time.h> /// Opaque representation of an error. @@ -175,6 +176,13 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, /// Returns NULL if the specified string is not set. /// Caller does NOT own return value. const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which); + +/// A utility function to get the status code and message to be returned +/// to the application. If not set in the top-level message, looks +/// through child errors until it finds the first one with these attributes. +void grpc_error_get_status(grpc_error *error, grpc_status_code *code, + const char **msg); + /// Add a child error: an error that is believed to have contributed to this /// error occurring. Allows root causing high level errors from lower level /// errors that contributed to them. diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c index 80c7a3f128..3496b6094f 100644 --- a/src/core/lib/iomgr/tcp_client_posix.c +++ b/src/core/lib/iomgr/tcp_client_posix.c @@ -146,61 +146,57 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { grpc_timer_cancel(exec_ctx, &ac->alarm); gpr_mu_lock(&ac->mu); - if (error == GRPC_ERROR_NONE) { - do { - so_error_size = sizeof(so_error); - err = getsockopt(grpc_fd_wrapped_fd(fd), SOL_SOCKET, SO_ERROR, &so_error, - &so_error_size); - } while (err < 0 && errno == EINTR); - if (err < 0) { - error = GRPC_OS_ERROR(errno, "getsockopt"); - goto finish; - } else if (so_error != 0) { - if (so_error == ENOBUFS) { - /* We will get one of these errors if we have run out of - memory in the kernel for the data structures allocated - when you connect a socket. If this happens it is very - likely that if we wait a little bit then try again the - connection will work (since other programs or this - program will close their network connections and free up - memory). This does _not_ indicate that there is anything - wrong with the server we are connecting to, this is a - local problem. - - If you are looking at this code, then chances are that - your program or another program on the same computer - opened too many network connections. The "easy" fix: - don't do that! */ - gpr_log(GPR_ERROR, "kernel out of buffers"); - gpr_mu_unlock(&ac->mu); - grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure); - return; - } else { - switch (so_error) { - case ECONNREFUSED: - error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, errno); - error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - "Connection refused"); - break; - default: - error = GRPC_OS_ERROR(errno, "getsockopt(SO_ERROR)"); - break; - } - goto finish; - } - } else { - grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); - *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str); - fd = NULL; - goto finish; - } - } else { + if (error != GRPC_ERROR_NONE) { error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, "Timeout occurred"); goto finish; } - GPR_UNREACHABLE_CODE(return ); + do { + so_error_size = sizeof(so_error); + err = getsockopt(grpc_fd_wrapped_fd(fd), SOL_SOCKET, SO_ERROR, &so_error, + &so_error_size); + } while (err < 0 && errno == EINTR); + if (err < 0) { + error = GRPC_OS_ERROR(errno, "getsockopt"); + goto finish; + } + + switch (so_error) { + case 0: + grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); + *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str); + fd = NULL; + break; + case ENOBUFS: + /* We will get one of these errors if we have run out of + memory in the kernel for the data structures allocated + when you connect a socket. If this happens it is very + likely that if we wait a little bit then try again the + connection will work (since other programs or this + program will close their network connections and free up + memory). This does _not_ indicate that there is anything + wrong with the server we are connecting to, this is a + local problem. + + If you are looking at this code, then chances are that + your program or another program on the same computer + opened too many network connections. The "easy" fix: + don't do that! */ + gpr_log(GPR_ERROR, "kernel out of buffers"); + gpr_mu_unlock(&ac->mu); + grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure); + return; + case ECONNREFUSED: + /* This error shouldn't happen for anything other than connect(). */ + error = GRPC_OS_ERROR(so_error, "connect"); + break; + default: + /* We don't really know which syscall triggered the problem here, + so punt by reporting getsockopt(). */ + error = GRPC_OS_ERROR(so_error, "getsockopt(SO_ERROR)"); + break; + } finish: if (fd != NULL) { diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 119f5e82ab..35ca060bec 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -126,8 +126,6 @@ struct grpc_call { /* client or server call */ bool is_client; - /* is the alarm set */ - bool have_alarm; /** has grpc_call_destroy been called */ bool destroy_called; /** flag indicating that cancellation is inherited */ @@ -170,9 +168,6 @@ struct grpc_call { /* Contexts for various subsystems (security, tracing, ...). */ grpc_call_context_element context[GRPC_CONTEXT_COUNT]; - /* Deadline alarm - if have_alarm is non-zero */ - grpc_timer alarm; - /* for the client, extra metadata is initial metadata; for the server, it's trailing metadata */ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT]; @@ -215,8 +210,6 @@ struct grpc_call { #define CALL_FROM_TOP_ELEM(top_elem) \ CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem)) -static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call, - gpr_timespec deadline); static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call, grpc_transport_stream_op *op); static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, @@ -264,21 +257,17 @@ grpc_call *grpc_call_create( call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); } } - call->send_deadline = - gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC); + send_deadline = gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC); GRPC_CHANNEL_INTERNAL_REF(channel, "call"); /* initial refcount dropped by grpc_call_destroy */ grpc_error *error = grpc_call_stack_init( &exec_ctx, channel_stack, 1, destroy_call, call, call->context, server_transport_data, CALL_STACK_FROM_CALL(call)); if (error != GRPC_ERROR_NONE) { - intptr_t status; - if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) - status = GRPC_STATUS_UNKNOWN; - const char *error_str = - grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION); - close_with_status(&exec_ctx, call, (grpc_status_code)status, - error_str == NULL ? "unknown error" : error_str); + grpc_status_code status; + const char *error_str; + grpc_error_get_status(error, &status, &error_str); + close_with_status(&exec_ctx, call, status, error_str); GRPC_ERROR_UNREF(error); } if (cq != NULL) { @@ -338,10 +327,7 @@ grpc_call *grpc_call_create( gpr_mu_unlock(&parent_call->mu); } - if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) != - 0) { - set_deadline_alarm(&exec_ctx, call, send_deadline); - } + call->send_deadline = send_deadline; grpc_exec_ctx_finish(&exec_ctx); GPR_TIMER_END("grpc_call_create", 0); return call; @@ -454,20 +440,11 @@ static void set_status_details(grpc_call *call, status_source source, static void set_status_from_error(grpc_call *call, status_source source, grpc_error *error) { - intptr_t status; - if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &status)) { - set_status_code(call, source, (uint32_t)status); - } else { - set_status_code(call, source, GRPC_STATUS_INTERNAL); - } - const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE); - bool free_msg = false; - if (msg == NULL) { - free_msg = true; - msg = grpc_error_string(error); - } + grpc_status_code status; + const char *msg; + grpc_error_get_status(error, &status, &msg); + set_status_code(call, source, (uint32_t)status); set_status_details(call, source, grpc_mdstr_from_string(msg)); - if (free_msg) grpc_error_free_string(msg); } static void set_incoming_compression_algorithm( @@ -740,9 +717,6 @@ void grpc_call_destroy(grpc_call *c) { gpr_mu_lock(&c->mu); GPR_ASSERT(!c->destroy_called); c->destroy_called = 1; - if (c->have_alarm) { - grpc_timer_cancel(&exec_ctx, &c->alarm); - } cancel = !c->received_final_op; gpr_mu_unlock(&c->mu); if (cancel) grpc_call_cancel(c, NULL); @@ -780,7 +754,6 @@ typedef struct termination_closure { grpc_closure closure; grpc_call *call; grpc_error *error; - grpc_closure *op_closure; enum { TC_CANCEL, TC_CLOSE } type; grpc_transport_stream_op op; } termination_closure; @@ -797,7 +770,6 @@ static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp, break; } GRPC_ERROR_UNREF(tc->error); - grpc_exec_ctx_sched(exec_ctx, tc->op_closure, GRPC_ERROR_NONE, NULL); gpr_free(tc); } @@ -817,7 +789,6 @@ static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) { tc->op.close_error = tc->error; /* reuse closure to catch completion */ grpc_closure_init(&tc->closure, done_termination, tc); - tc->op_closure = tc->op.on_complete; tc->op.on_complete = &tc->closure; execute_op(exec_ctx, tc->call, &tc->op); } @@ -900,32 +871,6 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { return CALL_FROM_TOP_ELEM(elem); } -static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_call *call = arg; - gpr_mu_lock(&call->mu); - call->have_alarm = 0; - if (error != GRPC_ERROR_CANCELLED) { - cancel_with_status(exec_ctx, call, GRPC_STATUS_DEADLINE_EXCEEDED, - "Deadline Exceeded"); - } - gpr_mu_unlock(&call->mu); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm"); -} - -static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call, - gpr_timespec deadline) { - if (call->have_alarm) { - gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice"); - assert(0); - return; - } - GRPC_CALL_INTERNAL_REF(call, "alarm"); - call->have_alarm = 1; - call->send_deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - grpc_timer_init(exec_ctx, &call->alarm, call->send_deadline, call_alarm, call, - gpr_now(GPR_CLOCK_MONOTONIC)); -} - /* we offset status by a small amount when storing it into transport metadata as metadata cannot store a 0 value (which is used as OK for grpc_status_codes */ @@ -1274,14 +1219,6 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("validate_filtered_metadata", 0); validate_filtered_metadata(exec_ctx, bctl); GPR_TIMER_END("validate_filtered_metadata", 0); - - if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) != - 0 && - !call->is_client) { - GPR_TIMER_BEGIN("set_deadline_alarm", 0); - set_deadline_alarm(exec_ctx, call, md->deadline); - GPR_TIMER_END("set_deadline_alarm", 0); - } } call->has_initial_md_been_received = true; @@ -1311,7 +1248,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, gpr_mu_lock(&call->mu); if (bctl->send_initial_metadata) { if (error != GRPC_ERROR_NONE) { - set_status_code(call, STATUS_FROM_CORE, GRPC_STATUS_UNAVAILABLE); + set_status_from_error(call, STATUS_FROM_CORE, error); } grpc_metadata_batch_destroy( &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]); @@ -1329,9 +1266,6 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, grpc_metadata_batch_filter(md, recv_trailing_filter, call); call->received_final_op = true; - if (call->have_alarm) { - grpc_timer_cancel(exec_ctx, &call->alarm); - } /* propagate cancellation to any interested children */ child_call = call->first_child; if (child_call != NULL) { diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h index 3049284f68..4dbf3aae63 100644 --- a/src/core/lib/surface/completion_queue.h +++ b/src/core/lib/surface/completion_queue.h @@ -57,6 +57,8 @@ typedef struct grpc_cq_completion { uintptr_t next; } grpc_cq_completion; +//#define GRPC_CQ_REF_COUNT_DEBUG + #ifdef GRPC_CQ_REF_COUNT_DEBUG void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason, const char *file, int line); diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c index edda0c85fa..9520006d86 100644 --- a/src/core/lib/surface/init.c +++ b/src/core/lib/surface/init.c @@ -43,6 +43,7 @@ #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/compress_filter.h" #include "src/core/lib/channel/connected_channel.h" +#include "src/core/lib/channel/deadline_filter.h" #include "src/core/lib/channel/http_client_filter.h" #include "src/core/lib/channel/http_server_filter.h" #include "src/core/lib/debug/trace.h" @@ -99,6 +100,12 @@ static bool maybe_add_http_filter(grpc_channel_stack_builder *builder, static void register_builtin_channel_init() { grpc_channel_init_register_stage( + GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, + prepend_filter, (void *)&grpc_client_deadline_filter); + grpc_channel_init_register_stage( + GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, prepend_filter, + (void *)&grpc_server_deadline_filter); + grpc_channel_init_register_stage( GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, prepend_filter, (void *)&grpc_compress_filter); grpc_channel_init_register_stage( diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c index 08f9d7e8d9..82fc605218 100644 --- a/src/core/lib/transport/transport.c +++ b/src/core/lib/transport/transport.c @@ -47,7 +47,7 @@ void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) { gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type, - refcount, refcount->destroy.cb_arg, val, val + 1, reason); + refcount, refcount->destroy.cb_arg, (int)val, (int)val + 1, reason); #else void grpc_stream_ref(grpc_stream_refcount *refcount) { #endif @@ -59,7 +59,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, const char *reason) { gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type, - refcount, refcount->destroy.cb_arg, val, val - 1, reason); + refcount, refcount->destroy.cb_arg, (int)val, (int)val - 1, reason); #else void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount) { @@ -224,7 +224,7 @@ void grpc_transport_stream_op_add_cancellation_with_message( error = GRPC_ERROR_CREATE("Call cancelled"); } error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, status); - add_error(op, &op->close_error, error); + add_error(op, &op->cancel_error, error); } void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op, |