diff options
Diffstat (limited to 'src/core/lib')
23 files changed, 461 insertions, 246 deletions
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index ec973d4e7f..3fb2a60ac7 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -173,7 +173,6 @@ grpc_error *grpc_call_stack_init( grpc_slice path, gpr_timespec start_time, gpr_timespec deadline, grpc_call_stack *call_stack) { grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); - grpc_call_element_args args; size_t count = channel_stack->count; grpc_call_element *call_elems; char *user_data; @@ -188,13 +187,15 @@ grpc_error *grpc_call_stack_init( /* init per-filter data */ grpc_error *first_error = GRPC_ERROR_NONE; - args.start_time = start_time; + const grpc_call_element_args args = { + .start_time = start_time, + .call_stack = call_stack, + .server_transport_data = transport_server_data, + .context = context, + .path = path, + .deadline = deadline, + }; for (i = 0; i < count; i++) { - args.call_stack = call_stack; - args.server_transport_data = transport_server_data; - args.context = context; - args.path = path; - args.deadline = deadline; call_elems[i].filter = channel_elems[i].filter; call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].call_data = user_data; diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 1cf07d43c2..1e943dc2e5 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -131,7 +131,7 @@ typedef struct { argument. */ grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args); + const grpc_call_element_args *args); void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_polling_entity *pollent); diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c index 22781c7839..aa41014a21 100644 --- a/src/core/lib/channel/compress_filter.c +++ b/src/core/lib/channel/compress_filter.c @@ -274,7 +274,7 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c index 068c61c92a..29796f7ca7 100644 --- a/src/core/lib/channel/connected_channel.c +++ b/src/core/lib/channel/connected_channel.c @@ -83,7 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; int r = grpc_transport_init_stream( diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c index bc9a2effc2..f9668be0fa 100644 --- a/src/core/lib/channel/deadline_filter.c +++ b/src/core/lib/channel/deadline_filter.c @@ -52,9 +52,6 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { grpc_call_element* elem = arg; grpc_deadline_state* deadline_state = elem->call_data; - gpr_mu_lock(&deadline_state->timer_mu); - deadline_state->timer_pending = false; - gpr_mu_unlock(&deadline_state->timer_mu); if (error != GRPC_ERROR_CANCELLED) { grpc_call_element_signal_error( exec_ctx, elem, @@ -66,53 +63,64 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, } // Starts the deadline timer. -static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, - gpr_timespec deadline) { - grpc_deadline_state* deadline_state = elem->call_data; - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - // Note: We do not start the timer if there is already a timer - // pending. This should be okay, because this is only called from two - // functions exported by this module: grpc_deadline_state_start(), which - // starts the initial timer, and grpc_deadline_state_reset(), which - // cancels any pre-existing timer before starting a new one. In - // particular, we want to ensure that if grpc_deadline_state_start() - // winds up trying to start the timer after grpc_deadline_state_reset() - // has already done so, we ignore the value from the former. - if (!deadline_state->timer_pending && - gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) { - // Take a reference to the call stack, to be owned by the timer. - GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); - deadline_state->timer_pending = true; - grpc_closure_init(&deadline_state->timer_callback, timer_callback, elem, - grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, - &deadline_state->timer_callback, - gpr_now(GPR_CLOCK_MONOTONIC)); - } -} static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec deadline) { + deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); + if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) { + return; + } grpc_deadline_state* deadline_state = elem->call_data; - gpr_mu_lock(&deadline_state->timer_mu); - start_timer_if_needed_locked(exec_ctx, elem, deadline); - gpr_mu_unlock(&deadline_state->timer_mu); + grpc_deadline_timer_state cur_state; + grpc_closure* closure = NULL; +retry: + cur_state = + (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state); + switch (cur_state) { + case GRPC_DEADLINE_STATE_PENDING: + // Note: We do not start the timer if there is already a timer + return; + case GRPC_DEADLINE_STATE_FINISHED: + if (gpr_atm_rel_cas(&deadline_state->timer_state, + GRPC_DEADLINE_STATE_FINISHED, + GRPC_DEADLINE_STATE_PENDING)) { + // If we've already created and destroyed a timer, we always create a + // new closure: we have no other guarantee that the inlined closure is + // not in use (it may hold a pending call to timer_callback) + closure = grpc_closure_create(timer_callback, elem, + grpc_schedule_on_exec_ctx); + } else { + goto retry; + } + break; + case GRPC_DEADLINE_STATE_INITIAL: + if (gpr_atm_rel_cas(&deadline_state->timer_state, + GRPC_DEADLINE_STATE_INITIAL, + GRPC_DEADLINE_STATE_PENDING)) { + closure = + grpc_closure_init(&deadline_state->timer_callback, timer_callback, + elem, grpc_schedule_on_exec_ctx); + } else { + goto retry; + } + break; + } + GPR_ASSERT(closure); + GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); + grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, + gpr_now(GPR_CLOCK_MONOTONIC)); } // Cancels the deadline timer. -static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx, - grpc_deadline_state* deadline_state) { - if (deadline_state->timer_pending) { - grpc_timer_cancel(exec_ctx, &deadline_state->timer); - deadline_state->timer_pending = false; - } -} static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_deadline_state* deadline_state) { - gpr_mu_lock(&deadline_state->timer_mu); - cancel_timer_if_needed_locked(exec_ctx, deadline_state); - gpr_mu_unlock(&deadline_state->timer_mu); + if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING, + GRPC_DEADLINE_STATE_FINISHED)) { + grpc_timer_cancel(exec_ctx, &deadline_state->timer); + } else { + // timer was either in STATE_INITAL (nothing to cancel) + // OR in STATE_FINISHED (again nothing to cancel) + } } // Callback run when the call is complete. @@ -120,8 +128,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { grpc_deadline_state* deadline_state = arg; cancel_timer_if_needed(exec_ctx, deadline_state); // Invoke the next callback. - deadline_state->next_on_complete->cb( - exec_ctx, deadline_state->next_on_complete->cb_arg, error); + grpc_closure_run(exec_ctx, deadline_state->next_on_complete, + GRPC_ERROR_REF(error)); } // Inject our own on_complete callback into op. @@ -138,14 +146,12 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_deadline_state* deadline_state = elem->call_data; memset(deadline_state, 0, sizeof(*deadline_state)); deadline_state->call_stack = call_stack; - gpr_mu_init(&deadline_state->timer_mu); } void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem) { grpc_deadline_state* deadline_state = elem->call_data; cancel_timer_if_needed(exec_ctx, deadline_state); - gpr_mu_destroy(&deadline_state->timer_mu); } // Callback and associated state for starting the timer after call stack @@ -187,10 +193,8 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec new_deadline) { grpc_deadline_state* deadline_state = elem->call_data; - gpr_mu_lock(&deadline_state->timer_mu); - cancel_timer_if_needed_locked(exec_ctx, deadline_state); - start_timer_if_needed_locked(exec_ctx, elem, new_deadline); - gpr_mu_unlock(&deadline_state->timer_mu); + cancel_timer_if_needed(exec_ctx, deadline_state); + start_timer_if_needed(exec_ctx, elem, new_deadline); } void grpc_deadline_state_client_start_transport_stream_op( @@ -244,7 +248,7 @@ typedef struct server_call_data { // Constructor for call_data. Used for both client and server filters. static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_call_element_args* args) { + const grpc_call_element_args* args) { // Note: size of call data is different between client and server. memset(elem->call_data, 0, elem->filter->sizeof_call_data); grpc_deadline_state_init(exec_ctx, elem, args->call_stack); diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h index bd2b84f79e..94717f6bc7 100644 --- a/src/core/lib/channel/deadline_filter.h +++ b/src/core/lib/channel/deadline_filter.h @@ -35,16 +35,18 @@ #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/iomgr/timer.h" +typedef enum grpc_deadline_timer_state { + GRPC_DEADLINE_STATE_INITIAL, + GRPC_DEADLINE_STATE_PENDING, + GRPC_DEADLINE_STATE_FINISHED +} grpc_deadline_timer_state; + // State used for filters that enforce call deadlines. // Must be the first field in the filter's call_data. typedef struct grpc_deadline_state { // We take a reference to the call stack for the timer callback. grpc_call_stack* call_stack; - // Guards access to timer_pending and timer. - gpr_mu timer_mu; - // True if the timer callback is currently pending. - bool timer_pending; - // The deadline timer. + gpr_atm timer_state; grpc_timer timer; grpc_closure timer_callback; // Closure to invoke when the call is complete. diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c index 49a2a980e0..c031533dd8 100644 --- a/src/core/lib/channel/http_client_filter.c +++ b/src/core/lib/channel/http_client_filter.c @@ -386,7 +386,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { call_data *calld = elem->call_data; calld->on_done_recv_initial_metadata = NULL; calld->on_done_recv_trailing_metadata = NULL; diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c index bb185351a8..ce519f9c92 100644 --- a/src/core/lib/channel/http_server_filter.c +++ b/src/core/lib/channel/http_server_filter.c @@ -252,12 +252,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, *calld->pp_recv_message = calld->payload_bin_delivered ? NULL : (grpc_byte_stream *)&calld->read_stream; - calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg, - err); + grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); calld->recv_message_ready = NULL; calld->payload_bin_delivered = true; } - calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err); + grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err)); } static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, @@ -268,8 +267,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, /* do nothing. This is probably a GET request, and payload will be returned in hs_on_complete callback. */ } else { - calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg, - err); + grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); } } @@ -343,7 +341,7 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; /* initialize members */ diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c index 5e22860cfb..22938c64b5 100644 --- a/src/core/lib/channel/message_size_filter.c +++ b/src/core/lib/channel/message_size_filter.c @@ -166,7 +166,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx, // Constructor for call_data. static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_call_element_args* args) { + const grpc_call_element_args* args) { channel_data* chand = elem->channel_data; call_data* calld = elem->call_data; calld->next_recv_message_ready = NULL; diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index ffacdac393..2613512acb 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -137,8 +137,8 @@ typedef enum { } grpc_error_times; /// The following "special" errors can be propagated without allocating memory. -/// They are always even so that other code (particularly combiner locks) can -/// safely use the lower bit for themselves. +/// They are always even so that other code (particularly combiner locks, +/// polling engines) can safely use the lower bit for themselves. #define GRPC_ERROR_NONE ((grpc_error *)NULL) #define GRPC_ERROR_OOM ((grpc_error *)2) diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index fac3705142..11208b9ad1 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -68,7 +68,7 @@ static int grpc_polling_trace = 0; /* Disabled by default */ gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ } -/* Uncomment the following enable extra checks on poll_object operations */ +/* Uncomment the following to enable extra checks on poll_object operations */ /* #define PO_DEBUG */ static int grpc_wakeup_signal = -1; @@ -140,24 +140,61 @@ struct grpc_fd { Ref/Unref by two to avoid altering the orphaned bit */ gpr_atm refst; - /* Indicates that the fd is shutdown and that any pending read/write closures - should fail */ - bool shutdown; - grpc_error *shutdown_error; /* reason for shutdown: set iff shutdown==true */ + /* Internally stores data of type (grpc_error *). If the FD is shutdown, this + contains reason for shutdown (i.e a pointer to grpc_error) ORed with + FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit + of (grpc_error *) addresses is guaranteed to be zero. Even if the + (grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM + etc, the lower bit is guaranteed to be zero. - /* The fd is either closed or we relinquished control of it. In either cases, - this indicates that the 'fd' on this structure is no longer valid */ + Once an fd is shutdown, any pending or future read/write closures on the + fd should fail */ + gpr_atm shutdown_error; + + /* The fd is either closed or we relinquished control of it. In either + cases, this indicates that the 'fd' on this structure is no longer + valid */ bool orphaned; - /* TODO: sreek - Move this to a lockfree implementation */ - grpc_closure *read_closure; - grpc_closure *write_closure; + /* Closures to call when the fd is readable or writable respectively. These + fields contain one of the following values: + CLOSURE_READY : The fd has an I/O event of interest but there is no + closure yet to execute + + CLOSURE_NOT_READY : The fd has no I/O event of interest + + closure ptr : The closure to be executed when the fd has an I/O + event of interest + + shutdown_error | FD_SHUTDOWN_BIT : + 'shutdown_error' field ORed with FD_SHUTDOWN_BIT. + This indicates that the fd is shutdown. Since all + memory allocations are word-aligned, the lower two + bits of the shutdown_error pointer are always 0. So + it is safe to OR these with FD_SHUTDOWN_BIT + + Valid state transitions: + + <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY + | | ^ | ^ | | + | | | | | | | + | +--------------4----------+ 6 +---------2---------------+ | + | | | + | v | + +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+ + + For 1, 4 : See set_ready() function + For 2, 3 : See notify_on() function + For 5,6,7: See set_shutdown() function */ + gpr_atm read_closure; + gpr_atm write_closure; struct grpc_fd *freelist_next; grpc_closure *on_done_closure; - /* The pollset that last noticed that the fd is readable */ - grpc_pollset *read_notifier_pollset; + /* The pollset that last noticed that the fd is readable. The actual type + * stored in this is (grpc_pollset *) */ + gpr_atm read_notifier_pollset; grpc_iomgr_object iomgr_object; }; @@ -180,8 +217,10 @@ static void fd_unref(grpc_fd *fd); static void fd_global_init(void); static void fd_global_shutdown(void); -#define CLOSURE_NOT_READY ((grpc_closure *)0) -#define CLOSURE_READY ((grpc_closure *)1) +#define CLOSURE_NOT_READY ((gpr_atm)0) +#define CLOSURE_READY ((gpr_atm)2) + +#define FD_SHUTDOWN_BIT 1 /******************************************************************************* * Polling island Declarations @@ -908,7 +947,11 @@ static void unref_by(grpc_fd *fd, int n) { fd->freelist_next = fd_freelist; fd_freelist = fd; grpc_iomgr_unregister_object(&fd->iomgr_object); - if (fd->shutdown) GRPC_ERROR_UNREF(fd->shutdown_error); + + grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error); + /* Clear the least significant bit if it set (in case fd was shutdown) */ + err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT); + GRPC_ERROR_UNREF(err); gpr_mu_unlock(&fd_freelist_mu); } else { @@ -972,13 +1015,14 @@ static grpc_fd *fd_create(int fd, const char *name) { gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); new_fd->fd = fd; - new_fd->shutdown = false; + gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE); new_fd->orphaned = false; - new_fd->read_closure = CLOSURE_NOT_READY; - new_fd->write_closure = CLOSURE_NOT_READY; + gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY); + gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY); + gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); + new_fd->freelist_next = NULL; new_fd->on_done_closure = NULL; - new_fd->read_notifier_pollset = NULL; gpr_mu_unlock(&new_fd->po.mu); @@ -1060,101 +1104,206 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, GRPC_ERROR_UNREF(error); } -static grpc_error *fd_shutdown_error(grpc_fd *fd) { - if (!fd->shutdown) { - return GRPC_ERROR_NONE; - } else { - return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1); +static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state, + grpc_closure *closure) { + while (true) { + /* Fast-path: CLOSURE_NOT_READY -> <closure>. + The 'release' cas here matches the 'acquire' load in set_ready and + set_shutdown ensuring that the closure (scheduled by set_ready or + set_shutdown) happens-after the I/O event on the fd */ + if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) { + return; /* Fast-path successful. Return */ + } + + /* Slowpath. The 'acquire' load matches the 'release' cas in set_ready and + set_shutdown */ + gpr_atm curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_NOT_READY: { + break; /* retry */ + } + + case CLOSURE_READY: { + /* Change the state to CLOSURE_NOT_READY. Schedule the closure if + successful. If not, the state most likely transitioned to shutdown. + We should retry. + + This can be a no-barrier cas since the state is being transitioned to + CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any + closure when transitioning out of CLOSURE_NO_READY state (i.e there + is no other code that needs to 'happen-after' this) */ + if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) { + grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); + return; /* Slow-path successful. Return */ + } + + break; /* retry */ + } + + default: { + /* 'curr' is either a closure or the fd is shutdown(in which case 'curr' + contains a pointer to the shutdown-error). If the fd is shutdown, + schedule the closure with the shutdown error */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT); + grpc_closure_sched( + exec_ctx, closure, + GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1)); + return; + } + + /* There is already a closure!. This indicates a bug in the code */ + gpr_log(GPR_ERROR, + "notify_on called with a previous callback still pending"); + abort(); + } + } } + + GPR_UNREACHABLE_CODE(return ); } -static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st, grpc_closure *closure) { - if (fd->shutdown) { - grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown")); - } else if (*st == CLOSURE_NOT_READY) { - /* not ready ==> switch to a waiting state by setting the closure */ - *st = closure; - } else if (*st == CLOSURE_READY) { - /* already ready ==> queue the closure to run immediately */ - *st = CLOSURE_NOT_READY; - grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd)); - } else { - /* upcallptr was set to a different closure. This is an error! */ - gpr_log(GPR_ERROR, - "User called a notify_on function with a previous callback still " - "pending"); - abort(); +static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state, + grpc_error *shutdown_err) { + /* Try the fast-path first (i.e expect the current value to be + CLOSURE_NOT_READY */ + gpr_atm curr = CLOSURE_NOT_READY; + gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT; + + while (true) { + /* The 'release' cas here matches the 'acquire' load in notify_on to ensure + that the closure it schedules 'happens-after' the set_shutdown is called + on the fd */ + if (gpr_atm_rel_cas(state, curr, new_state)) { + return; /* Fast-path successful. Return */ + } + + /* Fallback to slowpath. This 'acquire' load matches the 'release' cas in + notify_on and set_ready */ + curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_READY: { + break; /* retry */ + } + + case CLOSURE_NOT_READY: { + break; /* retry */ + } + + default: { + /* 'curr' is either a closure or the fd is already shutdown */ + + /* If fd is already shutdown, we are done */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + return; + } + + /* Fd is not shutdown. Schedule the closure and move the state to + shutdown state. The 'release' cas here matches the 'acquire' load in + notify_on to ensure that the closure it schedules 'happens-after' + the set_shutdown is called on the fd */ + if (gpr_atm_rel_cas(state, curr, new_state)) { + grpc_closure_sched( + exec_ctx, (grpc_closure *)curr, + GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1)); + return; + } + + /* 'curr' was a closure but now changed to a different state. We will + have to retry */ + break; + } + } } + + GPR_UNREACHABLE_CODE(return ); } -/* returns 1 if state becomes not ready */ -static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st) { - if (*st == CLOSURE_READY) { - /* duplicate ready ==> ignore */ - return 0; - } else if (*st == CLOSURE_NOT_READY) { - /* not ready, and not waiting ==> flag ready */ - *st = CLOSURE_READY; - return 0; - } else { - /* waiting ==> queue closure */ - grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd)); - *st = CLOSURE_NOT_READY; - return 1; +static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) { + /* Try an optimistic case first (i.e assume current state is + CLOSURE_NOT_READY). + + This 'release' cas matches the 'acquire' load in notify_on ensuring that + any closure (scheduled by notify_on) 'happens-after' the return from + epoll_pwait */ + if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) { + return; /* early out */ + } + + /* The 'acquire' load here matches the 'release' cas in notify_on and + set_shutdown */ + gpr_atm curr = gpr_atm_acq_load(state); + switch (curr) { + case CLOSURE_READY: { + /* Already ready. We are done here */ + break; + } + + case CLOSURE_NOT_READY: { + /* The state was not CLOSURE_NOT_READY when we checked initially at the + beginning of this function but now it is CLOSURE_NOT_READY again. + This is only possible if the state transitioned out of + CLOSURE_NOT_READY to either CLOSURE_READY or <some closure> and then + back to CLOSURE_NOT_READY again (i.e after we entered this function, + the fd became "ready" and the necessary actions were already done). + So there is no need to make the state CLOSURE_READY now */ + break; + } + + default: { + /* 'curr' is either a closure or the fd is shutdown */ + if ((curr & FD_SHUTDOWN_BIT) > 0) { + /* The fd is shutdown. Do nothing */ + } else if (gpr_atm_no_barrier_cas(state, curr, CLOSURE_NOT_READY)) { + /* The cas above was no-barrier since the state is being transitioned to + CLOSURE_NOT_READY; notify_on and set_shutdown do not schedule any + closures when transitioning out of CLOSURE_NO_READY state (i.e there + is no other code that needs to 'happen-after' this) */ + + grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE); + } + /* else the state changed again (only possible by either a racing + set_ready or set_shutdown functions. In both these cases, the closure + would have been scheduled for execution. So we are done here */ + break; + } } } static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_pollset *notifier = NULL; - - gpr_mu_lock(&fd->po.mu); - notifier = fd->read_notifier_pollset; - gpr_mu_unlock(&fd->po.mu); - - return notifier; + gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); + return (grpc_pollset *)notifier; } static bool fd_is_shutdown(grpc_fd *fd) { - gpr_mu_lock(&fd->po.mu); - const bool r = fd->shutdown; - gpr_mu_unlock(&fd->po.mu); - return r; + grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error); + return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0); } /* Might be called multiple times */ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - gpr_mu_lock(&fd->po.mu); - /* Do the actual shutdown only once */ - if (!fd->shutdown) { - fd->shutdown = true; - fd->shutdown_error = why; - + /* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */ + if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE, + (gpr_atm)why | FD_SHUTDOWN_BIT)) { shutdown(fd->fd, SHUT_RDWR); - /* Flush any pending read and write closures. Since fd->shutdown is 'true' - at this point, the closures would be called with 'success = false' */ - set_ready_locked(exec_ctx, fd, &fd->read_closure); - set_ready_locked(exec_ctx, fd, &fd->write_closure); + + set_shutdown(exec_ctx, fd, &fd->read_closure, why); + set_shutdown(exec_ctx, fd, &fd->write_closure, why); } else { + /* Shutdown already called */ GRPC_ERROR_UNREF(why); } - gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->po.mu); - notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); - gpr_mu_unlock(&fd->po.mu); + notify_on(exec_ctx, fd, &fd->read_closure, closure); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->po.mu); - notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); - gpr_mu_unlock(&fd->po.mu); + notify_on(exec_ctx, fd, &fd->write_closure, closure); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -1343,18 +1492,19 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - /* Need the fd->po.mu since we might be racing with fd_notify_on_read */ - gpr_mu_lock(&fd->po.mu); - set_ready_locked(exec_ctx, fd, &fd->read_closure); - fd->read_notifier_pollset = notifier; - gpr_mu_unlock(&fd->po.mu); + set_ready(exec_ctx, fd, &fd->read_closure); + + /* Note, it is possible that fd_become_readable might be called twice with + different 'notifier's when an fd becomes readable and it is in two epoll + sets (This can happen briefly during polling island merges). In such cases + it does not really matter which notifer is set as the read_notifier_pollset + (They would both point to the same polling island anyway) */ + /* Use release store to match with acquire load in fd_get_read_notifier */ + gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - /* Need the fd->po.mu since we might be racing with fd_notify_on_write */ - gpr_mu_lock(&fd->po.mu); - set_ready_locked(exec_ctx, fd, &fd->write_closure); - gpr_mu_unlock(&fd->po.mu); + set_ready(exec_ctx, fd, &fd->write_closure); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, @@ -1737,7 +1887,7 @@ retry: (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), (void *)bag); /* No need to lock 'pi_new' here since this is a new polling island - * and no one has a reference to it yet */ + and no one has a reference to it yet */ polling_island_remove_all_fds_locked(pi_new, true, &error); /* Ref and unref so that the polling island gets deleted during unref diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index 9b5f3209f0..79ff910738 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -113,14 +113,15 @@ static grpc_error *try_split_host_port(const char *name, /* parse name, splitting it into host and port parts */ grpc_error *error; gpr_split_host_port(name, host, port); - if (host == NULL) { + if (*host == NULL) { char *msg; gpr_asprintf(&msg, "unparseable host:port: '%s'", name); error = GRPC_ERROR_CREATE(msg); gpr_free(msg); return error; } - if (port == NULL) { + if (*port == NULL) { + // TODO(murgatroid99): add tests for this case if (default_port == NULL) { char *msg; gpr_asprintf(&msg, "no port in name '%s'", name); diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c index 628ad4a45b..5bbe8fa34c 100644 --- a/src/core/lib/iomgr/socket_utils_windows.c +++ b/src/core/lib/iomgr/socket_utils_windows.c @@ -41,8 +41,12 @@ #include <grpc/support/log.h> const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { +#ifdef GPR_WIN_INET_NTOP + return inet_ntop(af, src, dst, size); +#else /* Windows InetNtopA wants a mutable ip pointer */ return InetNtopA(af, (void *)src, dst, size); +#endif /* GPR_WIN_INET_NTOP */ } #endif /* GRPC_WINDOWS_SOCKETUTILS */ diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 5225a5402b..3de0795187 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -46,6 +46,8 @@ #include "src/core/lib/iomgr/tcp_uv.h" #include "src/core/lib/iomgr/timer.h" +extern int grpc_tcp_trace; + typedef struct grpc_uv_tcp_connect { uv_connect_t connect_req; grpc_timer alarm; @@ -70,6 +72,12 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { int done; grpc_uv_tcp_connect *connect = acp; + if (grpc_tcp_trace) { + const char *str = grpc_error_string(error); + gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", + connect->addr_name, str); + grpc_error_free_string(str); + } if (error == GRPC_ERROR_NONE) { /* error == NONE implies that the timer ran out, and wasn't cancelled. If it was cancelled, then the handler that cancelled it also should close @@ -145,6 +153,12 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, connect->resource_quota = resource_quota; uv_tcp_init(uv_default_loop(), connect->tcp_handle); connect->connect_req.data = connect; + + if (grpc_tcp_trace) { + gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", + connect->addr_name); + } + // TODO(murgatroid99): figure out what the return value here means uv_tcp_connect(&connect->connect_req, connect->tcp_handle, (const struct sockaddr *)resolved_addr->addr, diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 8a5617e7c1..6d638bcbaa 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -180,25 +180,25 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, GPR_ASSERT(now.clock_type == g_clock_type); timer->closure = closure; timer->deadline = deadline; - timer->triggered = 0; if (!g_initialized) { - timer->triggered = 1; + timer->pending = false; grpc_closure_sched( exec_ctx, timer->closure, GRPC_ERROR_CREATE("Attempt to create timer before initialization")); return; } + gpr_mu_lock(&shard->mu); + timer->pending = true; if (gpr_time_cmp(deadline, now) <= 0) { - timer->triggered = 1; + timer->pending = false; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); + gpr_mu_unlock(&shard->mu); + /* early out */ return; } - /* TODO(ctiller): check deadline expired */ - - gpr_mu_lock(&shard->mu); grpc_time_averaged_stats_add_sample(&shard->stats, ts_to_dbl(gpr_time_sub(deadline, now))); if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) { @@ -243,9 +243,9 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; gpr_mu_lock(&shard->mu); - if (!timer->triggered) { + if (timer->pending) { grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); - timer->triggered = 1; + timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { list_remove(timer); } else { @@ -296,7 +296,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) { } timer = grpc_timer_heap_top(&shard->heap); if (gpr_time_cmp(timer->deadline, now) > 0) return NULL; - timer->triggered = 1; + timer->pending = false; grpc_timer_heap_pop(&shard->heap); return timer; } diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h index 9d901c7e68..1608dce9fb 100644 --- a/src/core/lib/iomgr/timer_generic.h +++ b/src/core/lib/iomgr/timer_generic.h @@ -40,7 +40,7 @@ struct grpc_timer { gpr_timespec deadline; uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */ - int triggered; + bool pending; struct grpc_timer *next; struct grpc_timer *prev; grpc_closure *closure; diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index fa2cdee964..f28a14405d 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) { void run_expired_timer(uv_timer_t *handle) { grpc_timer *timer = (grpc_timer *)handle->data; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GPR_ASSERT(!timer->triggered); - timer->triggered = 1; + GPR_ASSERT(timer->pending); + timer->pending = 0; grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE); stop_uv_timer(handle); grpc_exec_ctx_finish(&exec_ctx); @@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, uv_timer_t *uv_timer; timer->closure = closure; if (gpr_time_cmp(deadline, now) <= 0) { - timer->triggered = 1; + timer->pending = 0; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); return; } - timer->triggered = 0; + timer->pending = 1; timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now)); uv_timer = gpr_malloc(sizeof(uv_timer_t)); uv_timer_init(uv_default_loop(), uv_timer); @@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { - if (!timer->triggered) { - timer->triggered = 1; + if (timer->pending) { + timer->pending = 0; grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); stop_uv_timer((uv_timer_t *)timer->uv_timer); } diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h index 13cf8bd4fa..9870cd4a5c 100644 --- a/src/core/lib/iomgr/timer_uv.h +++ b/src/core/lib/iomgr/timer_uv.h @@ -41,7 +41,7 @@ struct grpc_timer { /* This is actually a uv_timer_t*, but we want to keep platform-specific types out of headers */ void *uv_timer; - int triggered; + int pending; }; #endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */ diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index b9bbe1b304..a23082a866 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -302,7 +302,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { call_data *calld = elem->call_data; memset(calld, 0, sizeof(*calld)); return GRPC_ERROR_NONE; diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index 36e81d6501..14619d97ca 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -197,7 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx, /* Constructor for call_data */ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 48a1e586e1..3c563bcc6f 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -101,6 +101,17 @@ typedef struct { grpc_error *error; } received_status; +static gpr_atm pack_received_status(received_status r) { + return r.is_set ? (1 | (gpr_atm)r.error) : 0; +} + +static received_status unpack_received_status(gpr_atm atm) { + return (atm & 1) == 0 + ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE} + : (received_status){.is_set = true, + .error = (grpc_error *)(atm & ~(gpr_atm)1)}; +} + #define MAX_ERRORS_PER_BATCH 3 typedef struct batch_control { @@ -142,8 +153,6 @@ struct grpc_call { bool destroy_called; /** flag indicating that cancellation is inherited */ bool cancellation_is_inherited; - /** bitmask of live batches */ - uint8_t used_batches; /** which ops are in-flight */ bool sent_initial_metadata; bool sending_message; @@ -165,8 +174,8 @@ struct grpc_call { Element 0 is initial metadata, element 1 is trailing metadata. */ grpc_metadata_array *buffered_metadata[2]; - /* Received call statuses from various sources */ - received_status status[STATUS_SOURCE_COUNT]; + /* Packed received call statuses from various sources */ + gpr_atm status[STATUS_SOURCE_COUNT]; /* Call data useful used for reporting. Only valid after the call has * completed */ @@ -446,7 +455,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time); for (i = 0; i < STATUS_SOURCE_COUNT; i++) { - GRPC_ERROR_UNREF(c->status[i].error); + GRPC_ERROR_UNREF( + unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error); } grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c); @@ -614,13 +624,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, */ static bool get_final_status_from( - grpc_call *call, status_source from_source, bool allow_ok_status, + grpc_call *call, grpc_error *error, bool allow_ok_status, void (*set_value)(grpc_status_code code, void *user_data), void *set_value_user_data, grpc_slice *details) { grpc_status_code code; const char *msg = NULL; - grpc_error_get_status(call->status[from_source].error, call->send_deadline, - &code, &msg, NULL); + grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL); if (code == GRPC_STATUS_OK && !allow_ok_status) { return false; } @@ -638,12 +647,15 @@ static void get_final_status(grpc_call *call, void *user_data), void *set_value_user_data, grpc_slice *details) { int i; + received_status status[STATUS_SOURCE_COUNT]; + for (i = 0; i < STATUS_SOURCE_COUNT; i++) { + status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i])); + } if (grpc_call_error_trace) { gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR"); for (i = 0; i < STATUS_SOURCE_COUNT; i++) { - if (call->status[i].is_set) { - gpr_log(GPR_DEBUG, " %d: %s", i, - grpc_error_string(call->status[i].error)); + if (status[i].is_set) { + gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error)); } } } @@ -653,9 +665,9 @@ static void get_final_status(grpc_call *call, /* search for the best status we can present: ideally the error we use has a clearly defined grpc-status, and we'll prefer that. */ for (i = 0; i < STATUS_SOURCE_COUNT; i++) { - if (call->status[i].is_set && - grpc_error_has_clear_grpc_status(call->status[i].error)) { - if (get_final_status_from(call, (status_source)i, allow_ok_status != 0, + if (status[i].is_set && + grpc_error_has_clear_grpc_status(status[i].error)) { + if (get_final_status_from(call, status[i].error, allow_ok_status != 0, set_value, set_value_user_data, details)) { return; } @@ -663,8 +675,8 @@ static void get_final_status(grpc_call *call, } /* If no clearly defined status exists, search for 'anything' */ for (i = 0; i < STATUS_SOURCE_COUNT; i++) { - if (call->status[i].is_set) { - if (get_final_status_from(call, (status_source)i, allow_ok_status != 0, + if (status[i].is_set) { + if (get_final_status_from(call, status[i].error, allow_ok_status != 0, set_value, set_value_user_data, details)) { return; } @@ -681,12 +693,13 @@ static void get_final_status(grpc_call *call, static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, status_source source, grpc_error *error) { - if (call->status[source].is_set) { + if (!gpr_atm_rel_cas(&call->status[source], + pack_received_status((received_status){ + .is_set = false, .error = GRPC_ERROR_NONE}), + pack_received_status((received_status){ + .is_set = true, .error = error}))) { GRPC_ERROR_UNREF(error); - return; } - call->status[source].is_set = true; - call->status[source].error = error; } /******************************************************************************* @@ -997,25 +1010,48 @@ static bool are_initial_metadata_flags_valid(uint32_t flags, bool is_client) { return !(flags & invalid_positions); } -static batch_control *allocate_batch_control(grpc_call *call) { - size_t i; - for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) { - if ((call->used_batches & (1 << i)) == 0) { - call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i)); - return &call->active_batches[i]; - } +static int batch_slot_for_op(grpc_op_type type) { + switch (type) { + case GRPC_OP_SEND_INITIAL_METADATA: + return 0; + case GRPC_OP_SEND_MESSAGE: + return 1; + case GRPC_OP_SEND_CLOSE_FROM_CLIENT: + case GRPC_OP_SEND_STATUS_FROM_SERVER: + return 2; + case GRPC_OP_RECV_INITIAL_METADATA: + return 3; + case GRPC_OP_RECV_MESSAGE: + return 4; + case GRPC_OP_RECV_CLOSE_ON_SERVER: + case GRPC_OP_RECV_STATUS_ON_CLIENT: + return 5; + } + GPR_UNREACHABLE_CODE(return 123456789); +} + +static batch_control *allocate_batch_control(grpc_call *call, + const grpc_op *ops, + size_t num_ops) { + int slot = batch_slot_for_op(ops[0].op); + for (size_t i = 1; i < num_ops; i++) { + int op_slot = batch_slot_for_op(ops[i].op); + slot = GPR_MIN(slot, op_slot); + } + batch_control *bctl = &call->active_batches[slot]; + if (bctl->call != NULL) { + return NULL; } - return NULL; + memset(bctl, 0, sizeof(*bctl)); + bctl->call = call; + return bctl; } static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data, grpc_cq_completion *storage) { batch_control *bctl = user_data; grpc_call *call = bctl->call; - gpr_mu_lock(&call->mu); - call->used_batches = (uint8_t)( - call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches))); - gpr_mu_unlock(&call->mu); + bctl->call = NULL; GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); } @@ -1098,12 +1134,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, if (bctl->is_notify_tag_closure) { /* unrefs bctl->error */ + bctl->call = NULL; grpc_closure_run(exec_ctx, bctl->notify_tag, error); - gpr_mu_lock(&call->mu); - bctl->call->used_batches = - (uint8_t)(bctl->call->used_batches & - ~(uint8_t)(1 << (bctl - bctl->call->active_batches))); - gpr_mu_unlock(&call->mu); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); } else { /* unrefs bctl->error */ @@ -1315,6 +1347,11 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, finish_batch_step(exec_ctx, bctl); } +static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p, + grpc_cq_completion *completion) { + gpr_free(completion); +} + static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, const grpc_op *ops, size_t nops, void *notify_tag, @@ -1329,32 +1366,34 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, grpc_metadata compression_md; GPR_TIMER_BEGIN("grpc_call_start_batch", 0); - GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag); - /* TODO(ctiller): this feels like it could be made lock-free */ - gpr_mu_lock(&call->mu); - bctl = allocate_batch_control(call); - memset(bctl, 0, sizeof(*bctl)); - bctl->call = call; - bctl->notify_tag = notify_tag; - bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0); - - grpc_transport_stream_op *stream_op = &bctl->op; - memset(stream_op, 0, sizeof(*stream_op)); - stream_op->covered_by_poller = true; - if (nops == 0) { - GRPC_CALL_INTERNAL_REF(call, "completion"); if (!is_notify_tag_closure) { grpc_cq_begin_op(call->cq, notify_tag); + grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE, + free_no_op_completion, NULL, + gpr_malloc(sizeof(grpc_cq_completion))); + } else { + grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE); } - gpr_mu_unlock(&call->mu); - post_batch_completion(exec_ctx, bctl); error = GRPC_CALL_OK; goto done; } + /* TODO(ctiller): this feels like it could be made lock-free */ + bctl = allocate_batch_control(call, ops, nops); + if (bctl == NULL) { + return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS; + } + bctl->notify_tag = notify_tag; + bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0); + + gpr_mu_lock(&call->mu); + grpc_transport_stream_op *stream_op = &bctl->op; + memset(stream_op, 0, sizeof(*stream_op)); + stream_op->covered_by_poller = true; + /* rewrite batch ops into a transport op */ for (i = 0; i < nops; i++) { op = &ops[i]; diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c index 48de0e1d5b..49bc4c114b 100644 --- a/src/core/lib/surface/lame_client.c +++ b/src/core/lib/surface/lame_client.c @@ -122,7 +122,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx, static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { call_data *calld = elem->call_data; gpr_atm_no_barrier_store(&calld->filled_metadata, 0); return GRPC_ERROR_NONE; diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 6ab1c0d94d..7210c69fb0 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -879,7 +879,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd, static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_call_element_args *args) { + const grpc_call_element_args *args) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; memset(calld, 0, sizeof(call_data)); @@ -1198,7 +1198,9 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, crm->server_registered_method = rm; crm->flags = rm->flags; crm->has_host = has_host; - crm->host = host; + if (has_host) { + crm->host = host; + } crm->method = method; } GPR_ASSERT(slots <= UINT32_MAX); |