diff options
Diffstat (limited to 'src/core/lib/iomgr/exec_ctx.h')
-rw-r--r-- | src/core/lib/iomgr/exec_ctx.h | 31 |
1 files changed, 3 insertions, 28 deletions
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h index 7e50cb9825..e566f1b3e8 100644 --- a/src/core/lib/iomgr/exec_ctx.h +++ b/src/core/lib/iomgr/exec_ctx.h @@ -66,17 +66,6 @@ typedef struct grpc_combiner grpc_combiner; #ifndef GRPC_EXECUTION_CONTEXT_SANITIZER struct grpc_exec_ctx { grpc_closure_list closure_list; - /** The workqueue we're stealing work from. - As items are queued to the execution context, we try to steal one - workqueue item and execute it inline (assuming the exec_ctx is not - finished) - doing so does not invalidate the workqueue's contract, and - provides a small latency win in cases where we get a hit */ - grpc_workqueue *stealing_from_workqueue; - /** The workqueue item that was stolen from the workqueue above. When new - items are scheduled to be offloaded to that workqueue, we need to update - this like a 1-deep fifo to maintain the invariant that workqueue items - queued by one thread are started in order */ - grpc_closure *stolen_closure; /** currently active combiner: updated only via combiner.c */ grpc_combiner *active_combiner; /** last active combiner in the active combiner list */ @@ -89,10 +78,7 @@ struct grpc_exec_ctx { /* initializer for grpc_exec_ctx: prefer to use GRPC_EXEC_CTX_INIT whenever possible */ #define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \ - { \ - GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \ - finish_check \ - } + { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check } #else struct grpc_exec_ctx { bool cached_ready_to_finish; @@ -108,6 +94,8 @@ struct grpc_exec_ctx { #define GRPC_EXEC_CTX_INIT \ GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL) +extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx; + /** Flush any work that has been enqueued onto this grpc_exec_ctx. * Caller must guarantee that no interfering locks are held. * Returns true if work was performed, false otherwise. */ @@ -115,14 +103,6 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx); /** Finish any pending work for a grpc_exec_ctx. Must be called before * the instance is destroyed, or work may be lost. */ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); -/** Add a closure to be executed in the future. - If \a offload_target_or_null is NULL, the closure will be executed at the - next exec_ctx.{finish,flush} point. - If \a offload_target_or_null is non-NULL, the closure will be scheduled - against the workqueue, and a reference to the workqueue will be consumed. */ -void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error, - grpc_workqueue *offload_target_or_null); /** Returns true if we'd like to leave this execution context as soon as possible: useful for deciding whether to do something more or not depending on outside context */ @@ -131,11 +111,6 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx); bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); /** A finish check that is always ready to finish */ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); -/** Add a list of closures to be executed at the next flush/finish point. - * Leaves \a list empty. */ -void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx, - grpc_closure_list *list, - grpc_workqueue *offload_target_or_null); void grpc_exec_ctx_global_init(void); |