diff options
Diffstat (limited to 'src/core/lib/iomgr/combiner.c')
-rw-r--r-- | src/core/lib/iomgr/combiner.c | 245 |
1 files changed, 81 insertions, 164 deletions
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index 4b8102ea54..f6976c5650 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -24,7 +24,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/log.h> -#include "src/core/lib/iomgr/workqueue.h" +#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/profiling/timers.h" grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false); @@ -41,93 +41,47 @@ grpc_tracer_flag grpc_combiner_trace = GRPC_TRACER_INITIALIZER(false); struct grpc_combiner { grpc_combiner *next_combiner_on_this_exec_ctx; - grpc_workqueue *optional_workqueue; - grpc_closure_scheduler uncovered_scheduler; - grpc_closure_scheduler covered_scheduler; - grpc_closure_scheduler uncovered_finally_scheduler; - grpc_closure_scheduler covered_finally_scheduler; + grpc_closure_scheduler scheduler; + grpc_closure_scheduler finally_scheduler; gpr_mpscq queue; + // either: + // a pointer to the initiating exec ctx if that is the only exec_ctx that has + // ever queued to this combiner, or NULL. If this is non-null, it's not + // dereferencable (since the initiating exec_ctx may have gone out of scope) + gpr_atm initiating_exec_ctx_or_null; // state is: // lower bit - zero if orphaned (STATE_UNORPHANED) // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT) gpr_atm state; - // number of elements in the list that are covered by a poller: if >0, we can - // offload safely - gpr_atm elements_covered_by_poller; bool time_to_execute_final_list; - bool final_list_covered_by_poller; grpc_closure_list final_list; grpc_closure offload; gpr_refcount refs; }; -static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error); -static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, +static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, + grpc_error *error); +static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error); -static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, - grpc_error *error); -static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, - grpc_error *error); - -static const grpc_closure_scheduler_vtable scheduler_uncovered = { - combiner_exec_uncovered, combiner_exec_uncovered, - "combiner:immediately:uncovered"}; -static const grpc_closure_scheduler_vtable scheduler_covered = { - combiner_exec_covered, combiner_exec_covered, - "combiner:immediately:covered"}; -static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = { - combiner_finally_exec_uncovered, combiner_finally_exec_uncovered, - "combiner:finally:uncovered"}; -static const grpc_closure_scheduler_vtable finally_scheduler_covered = { - combiner_finally_exec_covered, combiner_finally_exec_covered, - "combiner:finally:covered"}; -static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); - -typedef struct { - grpc_error *error; - bool covered_by_poller; -} error_data; - -static uintptr_t pack_error_data(error_data d) { - return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0); -} - -static error_data unpack_error_data(uintptr_t p) { - return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1}; -} +static const grpc_closure_scheduler_vtable scheduler = { + combiner_exec, combiner_exec, "combiner:immediately"}; +static const grpc_closure_scheduler_vtable finally_scheduler = { + combiner_finally_exec, combiner_finally_exec, "combiner:finally"}; -static bool is_covered_by_poller(grpc_combiner *lock) { - return lock->final_list_covered_by_poller || - gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0; -} - -#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d" -#define IS_COVERED_BY_POLLER_ARGS(lock) \ - (lock)->final_list_covered_by_poller, \ - gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \ - is_covered_by_poller((lock)) +static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); -grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) { +grpc_combiner *grpc_combiner_create(void) { grpc_combiner *lock = gpr_malloc(sizeof(*lock)); gpr_ref_init(&lock->refs, 1); lock->next_combiner_on_this_exec_ctx = NULL; lock->time_to_execute_final_list = false; - lock->optional_workqueue = optional_workqueue; - lock->final_list_covered_by_poller = false; - lock->uncovered_scheduler.vtable = &scheduler_uncovered; - lock->covered_scheduler.vtable = &scheduler_covered; - lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered; - lock->covered_finally_scheduler.vtable = &finally_scheduler_covered; + lock->scheduler.vtable = &scheduler; + lock->finally_scheduler.vtable = &finally_scheduler; gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); - gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0); gpr_mpscq_init(&lock->queue); grpc_closure_list_init(&lock->final_list); - grpc_closure_init(&lock->offload, offload, lock, - grpc_workqueue_scheduler(lock->optional_workqueue)); + grpc_closure_init(&lock->offload, offload, lock, grpc_executor_scheduler); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); return lock; } @@ -136,7 +90,6 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock)); GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); gpr_mpscq_destroy(&lock->queue); - GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner"); gpr_free(lock); } @@ -193,48 +146,40 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, } } -static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, - grpc_closure *cl, grpc_error *error, - bool covered_by_poller) { +#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ + ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ + offsetof(grpc_combiner, scheduler_name))) + +static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, + grpc_error *error) { GPR_TIMER_BEGIN("combiner.execute", 0); + grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); - GRPC_COMBINER_TRACE(gpr_log( - GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock, - cl, covered_by_poller, last)); - GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed - assert(cl->cb); - cl->error_data.scratch = - pack_error_data((error_data){error, covered_by_poller}); - if (covered_by_poller) { - gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1); - } - gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next); + GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, + "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, + lock, cl, last)); if (last == 1) { + gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, + (gpr_atm)exec_ctx); // first element on this list: add it to the list of combiner locks // executing within this exec_ctx push_last_on_exec_ctx(exec_ctx, lock); + } else { + // there may be a race with setting here: if that happens, we may delay + // offload for one or two actions, and that's fine + gpr_atm initiator = + gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null); + if (initiator != 0 && initiator != (gpr_atm)exec_ctx) { + gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0); + } } + GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed + assert(cl->cb); + cl->error_data.error = error; + gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next); GPR_TIMER_END("combiner.execute", 0); } -#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ - ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ - offsetof(grpc_combiner, scheduler_name))) - -static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, - grpc_error *error) { - combiner_exec(exec_ctx, - COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl, - error, false); -} - -static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, - grpc_error *error) { - combiner_exec(exec_ctx, - COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl, - error, true); -} - static void move_next(grpc_exec_ctx *exec_ctx) { exec_ctx->active_combiner = exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; @@ -250,8 +195,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { move_next(exec_ctx); - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock, - lock->optional_workqueue)); + GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE); } @@ -263,22 +207,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { return false; } - GRPC_COMBINER_TRACE( - gpr_log(GPR_DEBUG, - "C:%p grpc_combiner_continue_exec_ctx workqueue=%p " - "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT - " exec_ctx_ready_to_finish=%d " - "time_to_execute_final_list=%d", - lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock), - grpc_exec_ctx_ready_to_finish(exec_ctx), - lock->time_to_execute_final_list)); - - if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) && - grpc_exec_ctx_ready_to_finish(exec_ctx)) { + bool contended = + gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0; + + GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, + "C:%p grpc_combiner_continue_exec_ctx " + "contended=%d " + "exec_ctx_ready_to_finish=%d " + "time_to_execute_final_list=%d", + lock, contended, + grpc_exec_ctx_ready_to_finish(exec_ctx), + lock->time_to_execute_final_list)); + + if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) && + grpc_executor_is_threaded()) { GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0); - // this execution context wants to move on, and we have a workqueue (and - // so can help the execution context out): schedule remaining work to be - // picked up on the workqueue + // this execution context wants to move on: schedule remaining work to be + // picked up on the executor queue_offload(exec_ctx, lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; @@ -295,29 +240,23 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { // queue is in an inconsistent state: use this as a cue that we should // go off and do something else for a while (and come back later) GPR_TIMER_MARK("delay_busy", 0); - if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) { - queue_offload(exec_ctx, lock); - } + queue_offload(exec_ctx, lock); GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; } GPR_TIMER_BEGIN("combiner.exec1", 0); grpc_closure *cl = (grpc_closure *)n; - error_data err = unpack_error_data(cl->error_data.scratch); + grpc_error *cl_err = cl->error_data.error; #ifndef NDEBUG cl->scheduled = false; #endif - cl->cb(exec_ctx, cl->cb_arg, err.error); - if (err.covered_by_poller) { - gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1); - } - GRPC_ERROR_UNREF(err.error); + cl->cb(exec_ctx, cl->cb_arg, cl_err); + GRPC_ERROR_UNREF(cl_err); GPR_TIMER_END("combiner.exec1", 0); } else { grpc_closure *c = lock->final_list.head; GPR_ASSERT(c != NULL); grpc_closure_list_init(&lock->final_list); - lock->final_list_covered_by_poller = false; int loops = 0; while (c != NULL) { GPR_TIMER_BEGIN("combiner.exec_1final", 0); @@ -383,20 +322,20 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, grpc_error *error); -static void combiner_execute_finally(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock, grpc_closure *closure, - grpc_error *error, - bool covered_by_poller) { - GRPC_COMBINER_TRACE(gpr_log( - GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock, - closure, exec_ctx->active_combiner, covered_by_poller)); +static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, + grpc_closure *closure, grpc_error *error) { + grpc_combiner *lock = + COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); + GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, + "C:%p grpc_combiner_execute_finally c=%p; ac=%p", + lock, closure, exec_ctx->active_combiner)); GPR_TIMER_BEGIN("combiner.execute_finally", 0); if (exec_ctx->active_combiner != lock) { GPR_TIMER_MARK("slowpath", 0); - grpc_closure_sched( - exec_ctx, grpc_closure_create(enqueue_finally, closure, - grpc_combiner_scheduler(lock, false)), - error); + grpc_closure_sched(exec_ctx, + grpc_closure_create(enqueue_finally, closure, + grpc_combiner_scheduler(lock)), + error); GPR_TIMER_END("combiner.execute_finally", 0); return; } @@ -404,42 +343,20 @@ static void combiner_execute_finally(grpc_exec_ctx *exec_ctx, if (grpc_closure_list_empty(lock->final_list)) { gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); } - if (covered_by_poller) { - lock->final_list_covered_by_poller = true; - } grpc_closure_list_append(&lock->final_list, closure, error); GPR_TIMER_END("combiner.execute_finally", 0); } static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, grpc_error *error) { - combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure, - GRPC_ERROR_REF(error), false); -} - -static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, - grpc_closure *cl, - grpc_error *error) { - combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER( - cl, uncovered_finally_scheduler), - cl, error, false); -} - -static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, - grpc_closure *cl, grpc_error *error) { - combiner_execute_finally( - exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler), - cl, error, true); + combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error)); } -grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner, - bool covered_by_poller) { - return covered_by_poller ? &combiner->covered_scheduler - : &combiner->uncovered_scheduler; +grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) { + return &combiner->scheduler; } grpc_closure_scheduler *grpc_combiner_finally_scheduler( - grpc_combiner *combiner, bool covered_by_poller) { - return covered_by_poller ? &combiner->covered_finally_scheduler - : &combiner->uncovered_finally_scheduler; + grpc_combiner *combiner) { + return &combiner->finally_scheduler; } |