aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib/iomgr')
-rw-r--r--src/core/lib/iomgr/block_annotate.h19
-rw-r--r--src/core/lib/iomgr/call_combiner.cc31
-rw-r--r--src/core/lib/iomgr/call_combiner.h40
-rw-r--r--src/core/lib/iomgr/closure.h58
-rw-r--r--src/core/lib/iomgr/combiner.cc128
-rw-r--r--src/core/lib/iomgr/combiner.h12
-rw-r--r--src/core/lib/iomgr/endpoint.cc36
-rw-r--r--src/core/lib/iomgr/endpoint.h41
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.cc7
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.cc10
-rw-r--r--src/core/lib/iomgr/error.cc6
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc196
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc238
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc180
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc199
-rw-r--r--src/core/lib/iomgr/ev_posix.cc80
-rw-r--r--src/core/lib/iomgr/ev_posix.h72
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc124
-rw-r--r--src/core/lib/iomgr/exec_ctx.h182
-rw-r--r--src/core/lib/iomgr/executor.cc58
-rw-r--r--src/core/lib/iomgr/executor.h6
-rw-r--r--src/core/lib/iomgr/fork_posix.cc17
-rw-r--r--src/core/lib/iomgr/iocp_windows.cc30
-rw-r--r--src/core/lib/iomgr/iocp_windows.h3
-rw-r--r--src/core/lib/iomgr/iomgr.cc115
-rw-r--r--src/core/lib/iomgr/iomgr.h6
-rw-r--r--src/core/lib/iomgr/iomgr_uv.cc5
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc15
-rw-r--r--src/core/lib/iomgr/lockfree_event.h6
-rw-r--r--src/core/lib/iomgr/polling_entity.cc16
-rw-r--r--src/core/lib/iomgr/polling_entity.h6
-rw-r--r--src/core/lib/iomgr/pollset.h9
-rw-r--r--src/core/lib/iomgr/pollset_set.h15
-rw-r--r--src/core/lib/iomgr/pollset_set_uv.cc15
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.cc15
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc17
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc29
-rw-r--r--src/core/lib/iomgr/resolve_address.h3
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc20
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.cc15
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc15
-rw-r--r--src/core/lib/iomgr/resource_quota.cc182
-rw-r--r--src/core/lib/iomgr/resource_quota.h25
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.cc2
-rw-r--r--src/core/lib/iomgr/socket_mutator.cc2
-rw-r--r--src/core/lib/iomgr/socket_windows.cc19
-rw-r--r--src/core/lib/iomgr/socket_windows.h9
-rw-r--r--src/core/lib/iomgr/tcp_client.h3
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc64
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h3
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc37
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.cc45
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc262
-rw-r--r--src/core/lib/iomgr/tcp_posix.h7
-rw-r--r--src/core/lib/iomgr/tcp_server.h15
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc64
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.cc57
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.cc58
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc99
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc93
-rw-r--r--src/core/lib/iomgr/tcp_windows.h2
-rw-r--r--src/core/lib/iomgr/timer.h13
-rw-r--r--src/core/lib/iomgr/timer_generic.cc47
-rw-r--r--src/core/lib/iomgr/timer_manager.cc30
-rw-r--r--src/core/lib/iomgr/timer_uv.cc26
-rw-r--r--src/core/lib/iomgr/udp_server.cc83
-rw-r--r--src/core/lib/iomgr/udp_server.h17
67 files changed, 1851 insertions, 1508 deletions
diff --git a/src/core/lib/iomgr/block_annotate.h b/src/core/lib/iomgr/block_annotate.h
index a57873aabb..340ebcb1af 100644
--- a/src/core/lib/iomgr/block_annotate.h
+++ b/src/core/lib/iomgr/block_annotate.h
@@ -31,27 +31,26 @@ void gpr_thd_end_blocking_region();
do { \
gpr_thd_start_blocking_region(); \
} while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION \
- do { \
- gpr_thd_end_blocking_region(); \
- grpc_core::ExecCtx::Get()->InvalidateNow(); \
- } while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
do { \
gpr_thd_end_blocking_region(); \
} while (0)
-
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
+ do { \
+ gpr_thd_end_blocking_region(); \
+ grpc_exec_ctx_invalidate_now((ec)); \
+ } while (0)
#else
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION \
- do { \
- grpc_core::ExecCtx::Get()->InvalidateNow(); \
- } while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
do { \
} while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
+ do { \
+ grpc_exec_ctx_invalidate_now((ec)); \
+ } while (0)
#endif
#endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index a9f48fb3c2..b5910b42e4 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -56,7 +56,8 @@ void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
#define DEBUG_FMT_ARGS
#endif
-void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_closure* closure,
grpc_error* error DEBUG_ARGS,
const char* reason) {
@@ -74,16 +75,15 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
- GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
+ GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
if (prev_size == 0) {
- GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED();
-
+ GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx);
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
- GRPC_CLOSURE_SCHED(closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
} else {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
@@ -95,7 +95,8 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
GPR_TIMER_END("call_combiner_start", 0);
}
-void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
GPR_TIMER_BEGIN("call_combiner_stop", 0);
if (grpc_call_combiner_trace.enabled()) {
@@ -130,7 +131,7 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
- GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
@@ -139,9 +140,10 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
GPR_TIMER_END("call_combiner_stop", 0);
}
-void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_closure* closure) {
- GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL();
+ GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx);
while (true) {
// Decode original state.
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
@@ -155,7 +157,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
"for pre-existing cancellation",
call_combiner, closure);
}
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(original_error));
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
@@ -174,7 +176,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
"call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure);
}
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
}
break;
}
@@ -183,9 +185,10 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
}
}
-void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_error* error) {
- GRPC_STATS_INC_CALL_COMBINER_CANCELLED();
+ GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx);
while (true) {
gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
grpc_error* original_error = decode_cancel_state_error(original_state);
@@ -202,7 +205,7 @@ void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
}
- GRPC_CLOSURE_SCHED(notify_on_cancel, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
}
break;
}
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 9f7e6ce1c9..c07af51c91 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -53,29 +53,37 @@ void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
#ifndef NDEBUG
-#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
- grpc_call_combiner_start((call_combiner), (closure), (error), __FILE__, \
- __LINE__, (reason))
-#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
- grpc_call_combiner_stop((call_combiner), __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
+ (reason))
/// Starts processing \a closure on \a call_combiner.
-void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* file, int line, const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
-void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
const char* file, int line, const char* reason);
#else
-#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
- grpc_call_combiner_start((call_combiner), (closure), (error), (reason))
-#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
- grpc_call_combiner_stop((call_combiner), (reason))
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
/// Starts processing \a closure on \a call_combiner.
-void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_closure* closure, grpc_error* error,
const char* reason);
/// Yields the call combiner to the next closure in the queue, if any.
-void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
const char* reason);
#endif
@@ -101,11 +109,13 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
/// cancellation; this effectively unregisters the previously set closure.
/// However, most filters will not need to explicitly unregister their
/// callbacks, as this is done automatically when the call is destroyed.
-void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_closure* closure);
/// Indicates that the call has been cancelled.
-void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
grpc_error* error);
#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 88af76006a..46793dd2c5 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -47,15 +47,18 @@ typedef struct grpc_closure_list {
* describing what went wrong.
* Error contract: it is not the cb's job to unref this error;
* the closure scheduler will do that after the cb returns */
-typedef void (*grpc_iomgr_cb_func)(void* arg, grpc_error* error);
+typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
typedef struct grpc_closure_scheduler grpc_closure_scheduler;
typedef struct grpc_closure_scheduler_vtable {
/* NOTE: for all these functions, closure->scheduler == the scheduler that was
used to find this vtable */
- void (*run)(grpc_closure* closure, grpc_error* error);
- void (*sched)(grpc_closure* closure, grpc_error* error);
+ void (*run)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
+ void (*sched)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
const char* name;
} grpc_closure_scheduler_vtable;
@@ -143,12 +146,13 @@ typedef struct {
grpc_closure wrapper;
} wrapped_closure;
-inline void closure_wrapper(void* arg, grpc_error* error) {
+inline void closure_wrapper(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
wrapped_closure* wc = (wrapped_closure*)arg;
grpc_iomgr_cb_func cb = wc->cb;
void* cb_arg = wc->cb_arg;
gpr_free(wc);
- cb(cb_arg, error);
+ cb(exec_ctx, cb_arg, error);
}
} // namespace closure_impl
@@ -243,10 +247,12 @@ inline bool grpc_closure_list_empty(grpc_closure_list closure_list) {
}
#ifndef NDEBUG
-inline void grpc_closure_run(const char* file, int line, grpc_closure* c,
+inline void grpc_closure_run(const char* file, int line,
+ grpc_exec_ctx* exec_ctx, grpc_closure* c,
grpc_error* error) {
#else
-inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
+inline void grpc_closure_run(grpc_exec_ctx* exec_ctx, grpc_closure* c,
+ grpc_error* error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != nullptr) {
@@ -256,7 +262,7 @@ inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
c->run = true;
#endif
assert(c->cb);
- c->scheduler->vtable->run(c, error);
+ c->scheduler->vtable->run(exec_ctx, c, error);
} else {
GRPC_ERROR_UNREF(error);
}
@@ -267,17 +273,20 @@ inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
#ifndef NDEBUG
-#define GRPC_CLOSURE_RUN(closure, error) \
- grpc_closure_run(__FILE__, __LINE__, closure, error)
+#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
+ grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error)
#else
-#define GRPC_CLOSURE_RUN(closure, error) grpc_closure_run(closure, error)
+#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
+ grpc_closure_run(exec_ctx, closure, error)
#endif
#ifndef NDEBUG
-inline void grpc_closure_sched(const char* file, int line, grpc_closure* c,
+inline void grpc_closure_sched(const char* file, int line,
+ grpc_exec_ctx* exec_ctx, grpc_closure* c,
grpc_error* error) {
#else
-inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
+inline void grpc_closure_sched(grpc_exec_ctx* exec_ctx, grpc_closure* c,
+ grpc_error* error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != nullptr) {
@@ -296,7 +305,7 @@ inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
c->run = false;
#endif
assert(c->cb);
- c->scheduler->vtable->sched(c, error);
+ c->scheduler->vtable->sched(exec_ctx, c, error);
} else {
GRPC_ERROR_UNREF(error);
}
@@ -305,17 +314,20 @@ inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
/** Schedule a closure to be run. Does not need to be run from a safe point. */
#ifndef NDEBUG
-#define GRPC_CLOSURE_SCHED(closure, error) \
- grpc_closure_sched(__FILE__, __LINE__, closure, error)
+#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
+ grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error)
#else
-#define GRPC_CLOSURE_SCHED(closure, error) grpc_closure_sched(closure, error)
+#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
+ grpc_closure_sched(exec_ctx, closure, error)
#endif
#ifndef NDEBUG
inline void grpc_closure_list_sched(const char* file, int line,
+ grpc_exec_ctx* exec_ctx,
grpc_closure_list* list) {
#else
-inline void grpc_closure_list_sched(grpc_closure_list* list) {
+inline void grpc_closure_list_sched(grpc_exec_ctx* exec_ctx,
+ grpc_closure_list* list) {
#endif
grpc_closure* c = list->head;
while (c != nullptr) {
@@ -335,7 +347,7 @@ inline void grpc_closure_list_sched(grpc_closure_list* list) {
c->run = false;
#endif
assert(c->cb);
- c->scheduler->vtable->sched(c, c->error_data.error);
+ c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
c = next;
}
list->head = list->tail = nullptr;
@@ -344,11 +356,11 @@ inline void grpc_closure_list_sched(grpc_closure_list* list) {
/** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */
#ifndef NDEBUG
-#define GRPC_CLOSURE_LIST_SCHED(closure_list) \
- grpc_closure_list_sched(__FILE__, __LINE__, closure_list)
+#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
+ grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list)
#else
-#define GRPC_CLOSURE_LIST_SCHED(closure_list) \
- grpc_closure_list_sched(closure_list)
+#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
+ grpc_closure_list_sched(exec_ctx, closure_list)
#endif
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index e4d7a6abd8..15c009dd77 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -61,15 +61,17 @@ struct grpc_combiner {
gpr_refcount refs;
};
-static void combiner_exec(grpc_closure* closure, grpc_error* error);
-static void combiner_finally_exec(grpc_closure* closure, grpc_error* error);
+static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
+static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error);
static const grpc_closure_scheduler_vtable scheduler = {
combiner_exec, combiner_exec, "combiner:immediately"};
static const grpc_closure_scheduler_vtable finally_scheduler = {
combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
-static void offload(void* arg, grpc_error* error);
+static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error);
grpc_combiner* grpc_combiner_create(void) {
grpc_combiner* lock = (grpc_combiner*)gpr_zalloc(sizeof(*lock));
@@ -85,19 +87,19 @@ grpc_combiner* grpc_combiner_create(void) {
return lock;
}
-static void really_destroy(grpc_combiner* lock) {
+static void really_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
gpr_free(lock);
}
-static void start_destroy(grpc_combiner* lock) {
+static void start_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
- really_destroy(lock);
+ really_destroy(exec_ctx, lock);
}
}
@@ -113,10 +115,11 @@ static void start_destroy(grpc_combiner* lock) {
#define GRPC_COMBINER_DEBUG_SPAM(op, delta)
#endif
-void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
+void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
if (gpr_unref(&lock->refs)) {
- start_destroy(lock);
+ start_destroy(exec_ctx, lock);
}
}
@@ -126,25 +129,23 @@ grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
return lock;
}
-static void push_last_on_exec_ctx(grpc_combiner* lock) {
+static void push_last_on_exec_ctx(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock) {
lock->next_combiner_on_this_exec_ctx = nullptr;
- if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
- grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
- grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
+ if (exec_ctx->active_combiner == nullptr) {
+ exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
} else {
- grpc_core::ExecCtx::Get()
- ->combiner_data()
- ->last_combiner->next_combiner_on_this_exec_ctx = lock;
- grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
+ exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
+ exec_ctx->last_combiner = lock;
}
}
-static void push_first_on_exec_ctx(grpc_combiner* lock) {
- lock->next_combiner_on_this_exec_ctx =
- grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
- grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock;
+static void push_first_on_exec_ctx(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock) {
+ lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
+ exec_ctx->active_combiner = lock;
if (lock->next_combiner_on_this_exec_ctx == nullptr) {
- grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
+ exec_ctx->last_combiner = lock;
}
}
@@ -152,8 +153,9 @@ static void push_first_on_exec_ctx(grpc_combiner* lock) {
((grpc_combiner*)(((char*)((closure)->scheduler)) - \
offsetof(grpc_combiner, scheduler_name)))
-static void combiner_exec(grpc_closure* cl, grpc_error* error) {
- GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
+static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* cl,
+ grpc_error* error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
GPR_TIMER_BEGIN("combiner.execute", 0);
grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
@@ -161,19 +163,19 @@ static void combiner_exec(grpc_closure* cl, grpc_error* error) {
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
if (last == 1) {
- GRPC_STATS_INC_COMBINER_LOCKS_INITIATED();
+ GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx);
GPR_TIMER_MARK("combiner.initiated", 0);
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
- (gpr_atm)grpc_core::ExecCtx::Get());
+ (gpr_atm)exec_ctx);
// first element on this list: add it to the list of combiner locks
// executing within this exec_ctx
- push_last_on_exec_ctx(lock);
+ push_last_on_exec_ctx(exec_ctx, lock);
} else {
// there may be a race with setting here: if that happens, we may delay
// offload for one or two actions, and that's fine
gpr_atm initiator =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
- if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
+ if (initiator != 0 && initiator != (gpr_atm)exec_ctx) {
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
}
}
@@ -184,32 +186,29 @@ static void combiner_exec(grpc_closure* cl, grpc_error* error) {
GPR_TIMER_END("combiner.execute", 0);
}
-static void move_next() {
- grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
- grpc_core::ExecCtx::Get()
- ->combiner_data()
- ->active_combiner->next_combiner_on_this_exec_ctx;
- if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
- grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr;
+static void move_next(grpc_exec_ctx* exec_ctx) {
+ exec_ctx->active_combiner =
+ exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
+ if (exec_ctx->active_combiner == nullptr) {
+ exec_ctx->last_combiner = nullptr;
}
}
-static void offload(void* arg, grpc_error* error) {
+static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_combiner* lock = (grpc_combiner*)arg;
- push_last_on_exec_ctx(lock);
+ push_last_on_exec_ctx(exec_ctx, lock);
}
-static void queue_offload(grpc_combiner* lock) {
- GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
- move_next();
+static void queue_offload(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
+ GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
+ move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
- GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
-bool grpc_combiner_continue_exec_ctx() {
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
- grpc_combiner* lock =
- grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
+ grpc_combiner* lock = exec_ctx->active_combiner;
if (lock == nullptr) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
@@ -224,15 +223,15 @@ bool grpc_combiner_continue_exec_ctx() {
"exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, contended,
- grpc_core::ExecCtx::Get()->IsReadyToFinish(),
+ grpc_exec_ctx_ready_to_finish(exec_ctx),
lock->time_to_execute_final_list));
- if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() &&
+ if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) &&
grpc_executor_is_threaded()) {
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
// this execution context wants to move on: schedule remaining work to be
// picked up on the executor
- queue_offload(lock);
+ queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
@@ -248,7 +247,7 @@ bool grpc_combiner_continue_exec_ctx() {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
GPR_TIMER_MARK("delay_busy", 0);
- queue_offload(lock);
+ queue_offload(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
@@ -258,7 +257,7 @@ bool grpc_combiner_continue_exec_ctx() {
#ifndef NDEBUG
cl->scheduled = false;
#endif
- cl->cb(cl->cb_arg, cl_err);
+ cl->cb(exec_ctx, cl->cb_arg, cl_err);
GRPC_ERROR_UNREF(cl_err);
GPR_TIMER_END("combiner.exec1", 0);
} else {
@@ -275,7 +274,7 @@ bool grpc_combiner_continue_exec_ctx() {
#ifndef NDEBUG
c->scheduled = false;
#endif
- c->cb(c->cb_arg, error);
+ c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
GPR_TIMER_END("combiner.exec_1final", 0);
@@ -283,7 +282,7 @@ bool grpc_combiner_continue_exec_ctx() {
}
GPR_TIMER_MARK("unref", 0);
- move_next();
+ move_next(exec_ctx);
lock->time_to_execute_final_list = false;
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
@@ -312,7 +311,7 @@ bool grpc_combiner_continue_exec_ctx() {
return true;
case OLD_STATE_WAS(true, 1):
// and one count, one orphaned --> unlocked and orphaned
- really_destroy(lock);
+ really_destroy(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
case OLD_STATE_WAS(false, 0):
@@ -322,24 +321,27 @@ bool grpc_combiner_continue_exec_ctx() {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
GPR_UNREACHABLE_CODE(return true);
}
- push_first_on_exec_ctx(lock);
+ push_first_on_exec_ctx(exec_ctx, lock);
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return true;
}
-static void enqueue_finally(void* closure, grpc_error* error);
+static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
+ grpc_error* error);
-static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
- GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS();
+static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
grpc_combiner* lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
- GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock,
- closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_execute_finally c=%p; ac=%p",
+ lock, closure, exec_ctx->active_combiner));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
- if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
+ if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
- GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ GRPC_CLOSURE_CREATE(enqueue_finally, closure,
grpc_combiner_scheduler(lock)),
error);
GPR_TIMER_END("combiner.execute_finally", 0);
@@ -353,8 +355,10 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
GPR_TIMER_END("combiner.execute_finally", 0);
}
-static void enqueue_finally(void* closure, grpc_error* error) {
- combiner_finally_exec((grpc_closure*)closure, GRPC_ERROR_REF(error));
+static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
+ grpc_error* error) {
+ combiner_finally_exec(exec_ctx, (grpc_closure*)closure,
+ GRPC_ERROR_REF(error));
}
grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) {
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 46b9ac58be..0c05511331 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -40,24 +40,26 @@ grpc_combiner* grpc_combiner_create(void);
, const char *file, int line, const char *reason
#define GRPC_COMBINER_REF(combiner, reason) \
grpc_combiner_ref((combiner), __FILE__, __LINE__, (reason))
-#define GRPC_COMBINER_UNREF(combiner, reason) \
- grpc_combiner_unref((combiner), __FILE__, __LINE__, (reason))
+#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \
+ grpc_combiner_unref((exec_ctx), (combiner), __FILE__, __LINE__, (reason))
#else
#define GRPC_COMBINER_DEBUG_ARGS
#define GRPC_COMBINER_REF(combiner, reason) grpc_combiner_ref((combiner))
-#define GRPC_COMBINER_UNREF(combiner, reason) grpc_combiner_unref((combiner))
+#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \
+ grpc_combiner_unref((exec_ctx), (combiner))
#endif
// Ref/unref the lock, for when we're sharing the lock ownership
// Prefer to use the macros above
grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
-void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
+void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
// Fetch a scheduler to schedule closures against
grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* lock);
// Scheduler to execute \a action within the lock just prior to unlocking.
grpc_closure_scheduler* grpc_combiner_finally_scheduler(grpc_combiner* lock);
-bool grpc_combiner_continue_exec_ctx();
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx);
extern grpc_core::TraceFlag grpc_combiner_trace;
diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc
index 9d4b102822..5eab1d3158 100644
--- a/src/core/lib/iomgr/endpoint.cc
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -18,35 +18,41 @@
#include "src/core/lib/iomgr/endpoint.h"
-void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb) {
- ep->vtable->read(ep, slices, cb);
+void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->read(exec_ctx, ep, slices, cb);
}
-void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb) {
- ep->vtable->write(ep, slices, cb);
+void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
+ ep->vtable->write(exec_ctx, ep, slices, cb);
}
-void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
- ep->vtable->add_to_pollset(ep, pollset);
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
+ ep->vtable->add_to_pollset(exec_ctx, ep, pollset);
}
-void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep,
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
- ep->vtable->add_to_pollset_set(ep, pollset_set);
+ ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set);
}
-void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
+void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
- ep->vtable->delete_from_pollset_set(ep, pollset_set);
+ ep->vtable->delete_from_pollset_set(exec_ctx, ep, pollset_set);
}
-void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
- ep->vtable->shutdown(ep, why);
+void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
+ ep->vtable->shutdown(exec_ctx, ep, why);
}
-void grpc_endpoint_destroy(grpc_endpoint* ep) { ep->vtable->destroy(ep); }
+void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+ ep->vtable->destroy(exec_ctx, ep);
+}
char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
return ep->vtable->get_peer(ep);
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index cd53099334..6ab0a6591c 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -33,13 +33,18 @@ typedef struct grpc_endpoint grpc_endpoint;
typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
struct grpc_endpoint_vtable {
- void (*read)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb);
- void (*write)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb);
- void (*add_to_pollset)(grpc_endpoint* ep, grpc_pollset* pollset);
- void (*add_to_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset);
- void (*delete_from_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset);
- void (*shutdown)(grpc_endpoint* ep, grpc_error* why);
- void (*destroy)(grpc_endpoint* ep);
+ void (*read)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
+ void (*write)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
+ void (*add_to_pollset)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset);
+ void (*add_to_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset);
+ void (*delete_from_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, grpc_error* why);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
char* (*get_peer)(grpc_endpoint* ep);
int (*get_fd)(grpc_endpoint* ep);
@@ -50,8 +55,8 @@ struct grpc_endpoint_vtable {
indicates the endpoint is closed.
Valid slices may be placed into \a slices even when the callback is
invoked with error != GRPC_ERROR_NONE. */
-void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb);
+void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
char* grpc_endpoint_get_peer(grpc_endpoint* ep);
@@ -69,22 +74,26 @@ int grpc_endpoint_get_fd(grpc_endpoint* ep);
No guarantee is made to the content of slices after a write EXCEPT that
it is a valid slice buffer.
*/
-void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb);
+void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
/* Causes any pending and future read/write callbacks to run immediately with
success==0 */
-void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why);
-void grpc_endpoint_destroy(grpc_endpoint* ep);
+void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why);
+void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
/* Add an endpoint to a pollset or pollset_set, so that when the pollset is
polled, events from this endpoint are considered */
-void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset);
-void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep,
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset);
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset_set);
/* Delete an endpoint from a pollset_set */
-void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
+void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset_set);
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint);
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc
index 0b4aefd1b7..f5f59f9917 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.cc
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
@@ -54,17 +54,18 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
char* final_name;
create_sockets(sv);
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), args,
+ p.client = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], final_name), args,
"socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), args,
+ p.server = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[0], final_name), args,
"socketpair-client");
gpr_free(final_name);
+ grpc_exec_ctx_finish(&exec_ctx);
return p;
}
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.cc b/src/core/lib/iomgr/endpoint_pair_windows.cc
index cc07ac0708..afa995a1c7 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.cc
+++ b/src/core/lib/iomgr/endpoint_pair_windows.cc
@@ -72,12 +72,14 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
- grpc_core::ExecCtx exec_ctx;
- p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ p.client = grpc_tcp_create(&exec_ctx,
+ grpc_winsocket_create(sv[1], "endpoint:client"),
channel_args, "endpoint:server");
- p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
+ p.server = grpc_tcp_create(&exec_ctx,
+ grpc_winsocket_create(sv[0], "endpoint:server"),
channel_args, "endpoint:client");
-
+ grpc_exec_ctx_finish(&exec_ctx);
return p;
}
diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc
index 42cd7c455d..e6d640c106 100644
--- a/src/core/lib/iomgr/error.cc
+++ b/src/core/lib/iomgr/error.cc
@@ -156,7 +156,11 @@ static void unref_errs(grpc_error* err) {
}
}
-static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); }
+static void unref_slice(grpc_slice slice) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_slice_unref_internal(&exec_ctx, slice);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
static void unref_strs(grpc_error* err) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index d9e8a30f5e..0dda1d924c 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -299,29 +299,31 @@ static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
/* if 'releasing_fd' is true, it means that we are going to detach the internal
* fd from grpc_fd structure (i.e which means we should not be calling
* shutdown() syscall on that fd) */
-static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
- bool releasing_fd) {
- if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
+static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_error* why, bool releasing_fd) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
if (!releasing_fd) {
shutdown(fd->fd, SHUT_RDWR);
}
- fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
- fd_shutdown_internal(fd, why, false);
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
+ fd_shutdown_internal(exec_ctx, fd, why, false);
}
-static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
bool already_closed, const char* reason) {
grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != nullptr);
if (!fd->read_closure->IsShutdown()) {
- fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
+ fd_shutdown_internal(exec_ctx, fd,
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
is_release_fd);
}
@@ -333,7 +335,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
close(fd->fd);
}
- GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
grpc_iomgr_unregister_object(&fd->iomgr_object);
fd->read_closure->DestroyEvent();
@@ -345,7 +347,8 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
gpr_mu_unlock(&fd_freelist_mu);
}
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset*)notifier;
}
@@ -354,21 +357,26 @@ static bool fd_is_shutdown(grpc_fd* fd) {
return fd->read_closure->IsShutdown();
}
-static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
- fd->read_closure->NotifyOn(closure);
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
-static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
- fd->write_closure->NotifyOn(closure);
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
-static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
- fd->read_closure->SetReady();
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
+ fd->read_closure->SetReady(exec_ctx);
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+ fd->write_closure->SetReady(exec_ctx);
+}
/*******************************************************************************
* Pollset Definitions
@@ -471,7 +479,7 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
pollset->next = pollset->prev = nullptr;
}
-static void pollset_destroy(grpc_pollset* pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
pollset_neighborhood* neighborhood = pollset->neighborhood;
@@ -499,26 +507,27 @@ static void pollset_destroy(grpc_pollset* pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
+static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error* error = GRPC_ERROR_NONE;
if (pollset->root_worker != nullptr) {
grpc_pollset_worker* worker = pollset->root_worker;
do {
- GRPC_STATS_INC_POLLSET_KICK();
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
switch (worker->state) {
case KICKED:
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@@ -534,29 +543,32 @@ static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
return error;
}
-static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
pollset->begin_refs == 0) {
GPR_TIMER_MARK("pollset_finish_shutdown", 0);
- GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = nullptr;
}
}
-static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(pollset->shutdown_closure == nullptr);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
- pollset_maybe_finish_shutdown(pollset);
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
GPR_TIMER_END("pollset_shutdown", 0);
}
-static int poll_deadline_to_millis_timeout(grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
- grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
if (delta > INT_MAX) {
return INT_MAX;
} else if (delta < 0) {
@@ -574,7 +586,8 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
called by g_active_poller thread. So there is no need for synchronization
when accessing fields in g_epoll_set */
-static grpc_error* process_epoll_events(grpc_pollset* pollset) {
+static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
static const char* err_desc = "process_events";
grpc_error* error = GRPC_ERROR_NONE;
@@ -598,11 +611,11 @@ static grpc_error* process_epoll_events(grpc_pollset* pollset) {
bool write_ev = (ev->events & EPOLLOUT) != 0;
if (read_ev || cancel) {
- fd_become_readable(fd, pollset);
+ fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
- fd_become_writable(fd);
+ fd_become_writable(exec_ctx, fd);
}
}
}
@@ -618,26 +631,27 @@ static grpc_error* process_epoll_events(grpc_pollset* pollset) {
NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
(i.e the designated poller thread) will be calling this function. So there is
no need for any synchronization when accesing fields in g_epoll_set */
-static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
+static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("do_epoll_wait", 0);
int r;
- int timeout = poll_deadline_to_millis_timeout(deadline);
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (timeout != 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
}
do {
- GRPC_STATS_INC_SYSCALL_POLL();
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
- GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
+ GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
@@ -650,7 +664,8 @@ static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
return GRPC_ERROR_NONE;
}
-static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
+static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
GPR_TIMER_BEGIN("begin_worker", 0);
@@ -745,7 +760,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
SET_KICK_STATE(worker, KICKED);
}
}
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
if (grpc_polling_trace.enabled()) {
@@ -776,7 +791,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
}
static bool check_neighborhood_for_available_poller(
- pollset_neighborhood* neighborhood) {
+ grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
@@ -800,7 +815,7 @@ static bool check_neighborhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@@ -840,7 +855,8 @@ static bool check_neighborhood_for_available_poller(
return found_worker;
}
-static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
+static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
GPR_TIMER_BEGIN("end_worker", 0);
if (grpc_polling_trace.enabled()) {
@@ -850,7 +866,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
/* Make sure we appear kicked */
SET_KICK_STATE(worker, KICKED);
grpc_closure_list_move(&worker->schedule_on_end_work,
- grpc_core::ExecCtx::Get()->closure_list());
+ &exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (grpc_polling_trace.enabled()) {
@@ -859,11 +875,11 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv);
- if (grpc_core::ExecCtx::Get()->HasWork()) {
+ if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
} else {
@@ -878,7 +894,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
if (gpr_mu_trylock(&neighborhood->mu)) {
- found_worker = check_neighborhood_for_available_poller(neighborhood);
+ found_worker =
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
@@ -891,15 +908,16 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
gpr_mu_lock(&neighborhood->mu);
- found_worker = check_neighborhood_for_available_poller(neighborhood);
+ found_worker =
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
gpr_mu_unlock(&neighborhood->mu);
}
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
- } else if (grpc_core::ExecCtx::Get()->HasWork()) {
+ } else if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
if (worker->initialized_cv) {
@@ -909,7 +927,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_log(GPR_DEBUG, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) {
- pollset_maybe_finish_shutdown(pollset);
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
GPR_TIMER_END("end_worker", 0);
@@ -919,7 +937,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_pollset* ps,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
@@ -932,7 +950,7 @@ static grpc_error* pollset_work(grpc_pollset* ps,
return GRPC_ERROR_NONE;
}
- if (begin_worker(ps, &worker, worker_hdl, deadline)) {
+ if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!ps->shutting_down);
@@ -950,14 +968,14 @@ static grpc_error* pollset_work(grpc_pollset* ps,
process_epoll_events() returns very quickly: It just queues the work on
exec_ctx but does not execute it (the actual exectution or more
- accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
- AFTER selecting a designated poller). So we are not waiting long periods
- without a designated poller */
+ accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
+ a designated poller). So we are not waiting long periods without a
+ designated poller */
if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
gpr_atm_acq_load(&g_epoll_set.num_events)) {
- append_error(&error, do_epoll_wait(ps, deadline), err_desc);
+ append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
}
- append_error(&error, process_epoll_events(ps), err_desc);
+ append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
gpr_mu_lock(&ps->mu); /* lock */
@@ -965,17 +983,17 @@ static grpc_error* pollset_work(grpc_pollset* ps,
} else {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
}
- end_worker(ps, &worker, worker_hdl);
+ end_worker(exec_ctx, ps, &worker, worker_hdl);
gpr_tls_set(&g_current_thread_pollset, 0);
GPR_TIMER_END("pollset_work", 0);
return error;
}
-static grpc_error* pollset_kick(grpc_pollset* pollset,
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
- GRPC_STATS_INC_POLLSET_KICK();
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error* ret_err = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
gpr_strvec log;
@@ -1008,7 +1026,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
- GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
@@ -1017,14 +1035,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
@@ -1035,7 +1053,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
// there is no next worker
root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
&g_active_poller)) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
@@ -1043,7 +1061,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->state == UNKICKED) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
@@ -1061,12 +1079,12 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv);
}
goto done;
} else {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
@@ -1076,13 +1094,13 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
goto done;
}
} else {
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
goto done;
}
} else {
- GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
@@ -1099,7 +1117,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
- GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
@@ -1107,7 +1125,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
goto done;
} else if (specific_worker ==
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
@@ -1115,7 +1133,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
@@ -1123,7 +1141,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
@@ -1135,7 +1153,8 @@ done:
return ret_err;
}
-static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {}
/*******************************************************************************
* Pollset-set Definitions
@@ -1145,20 +1164,27 @@ static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-static void pollset_set_destroy(grpc_pollset_set* pss) {}
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss) {}
-static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {}
-static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {}
-static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {}
-static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {}
-static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
-static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
/*******************************************************************************
@@ -1234,7 +1260,7 @@ const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
gpr_log(GPR_ERROR,
"Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
- return nullptr;
+ return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index b2817156a8..62643df697 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -257,7 +257,8 @@ static gpr_mu fd_freelist_mu;
#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
+#define UNREF_BY(ec, fd, n, reason) \
+ unref_by(ec, fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (grpc_trace_fd_refcount.enabled()) {
@@ -268,13 +269,13 @@ static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
}
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n)
+#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
static void ref_by(grpc_fd* fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-static void fd_destroy(void* arg, grpc_error* error) {
+static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_fd* fd = (grpc_fd*)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
@@ -292,8 +293,8 @@ static void fd_destroy(void* arg, grpc_error* error) {
}
#ifndef NDEBUG
-static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
- int line) {
+static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n,
+ const char* reason, const char* file, int line) {
if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
@@ -301,11 +302,12 @@ static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
#else
-static void unref_by(grpc_fd* fd, int n) {
+static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
GRPC_CLOSURE_SCHED(
+ exec_ctx,
GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} else {
@@ -371,7 +373,8 @@ static int fd_wrapped_fd(grpc_fd* fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
}
-static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
bool already_closed, const char* reason) {
bool is_fd_closed = already_closed;
@@ -396,14 +399,15 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
to be alive (and not added to freelist) until the end of this function */
REF_BY(fd, 1, reason);
- GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&fd->orphan_mu);
- UNREF_BY(fd, 2, reason); /* Drop the reference */
+ UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
}
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset*)notifier;
}
@@ -413,20 +417,22 @@ static bool fd_is_shutdown(grpc_fd* fd) {
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
- if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
-static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
- fd->read_closure->NotifyOn(closure);
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
-static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
- fd->write_closure->NotifyOn(closure);
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -550,7 +556,8 @@ static void pollset_global_shutdown(void) {
}
/* pollset->mu must be held while calling this function */
-static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
@@ -560,7 +567,7 @@ static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
}
if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
pollset->containing_pollset_set_count == 0) {
- GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
pollset->shutdown_closure = nullptr;
}
}
@@ -568,7 +575,8 @@ static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
/* pollset->mu must be held before calling this function,
* pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
* held */
-static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
+static grpc_error* kick_one_worker(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_worker* specific_worker) {
pollable* p = specific_worker->pollable_obj;
grpc_core::mu_guard lock(&p->mu);
GPR_ASSERT(specific_worker != nullptr);
@@ -576,19 +584,19 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
}
- GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
}
- GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
specific_worker->kicked = true;
return GRPC_ERROR_NONE;
}
if (specific_worker == p->root_worker) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
}
@@ -597,7 +605,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
return error;
}
if (specific_worker->initialized_cv) {
- GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
}
@@ -610,9 +618,9 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
return GRPC_ERROR_NONE;
}
-static grpc_error* pollset_kick(grpc_pollset* pollset,
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
- GRPC_STATS_INC_POLLSET_KICK();
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
@@ -626,7 +634,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
}
- GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
return GRPC_ERROR_NONE;
} else {
@@ -646,28 +654,29 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
// so we take our chances and choose the SECOND worker enqueued against
// the pollset as a worker that's likely to be in cv_wait
return kick_one_worker(
- pollset->root_worker->links[PWLINK_POLLSET].next);
+ exec_ctx, pollset->root_worker->links[PWLINK_POLLSET].next);
}
} else {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
}
- GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
return GRPC_ERROR_NONE;
}
} else {
- return kick_one_worker(specific_worker);
+ return kick_one_worker(exec_ctx, specific_worker);
}
}
-static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
+static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
grpc_error* error = GRPC_ERROR_NONE;
const char* err_desc = "pollset_kick_all";
grpc_pollset_worker* w = pollset->root_worker;
if (w != nullptr) {
do {
- GRPC_STATS_INC_POLLSET_KICK();
- append_error(&error, kick_one_worker(w), err_desc);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ append_error(&error, kick_one_worker(exec_ctx, w), err_desc);
w = w->links[PWLINK_POLLSET].next;
} while (w != pollset->root_worker);
}
@@ -680,9 +689,10 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
*mu = &pollset->mu;
}
-static int poll_deadline_to_millis_timeout(grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
- grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
if (delta > INT_MAX)
return INT_MAX;
else if (delta < 0)
@@ -691,8 +701,9 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
return (int)delta;
}
-static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
- fd->read_closure->SetReady();
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -703,7 +714,9 @@ static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+ fd->write_closure->SetReady(exec_ctx);
+}
static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
gpr_mu_lock(&fd->pollable_mu);
@@ -732,14 +745,16 @@ static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
}
/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(pollset->shutdown_closure == nullptr);
pollset->shutdown_closure = closure;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
- pollset_maybe_finish_shutdown(pollset);
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
-static grpc_error* pollable_process_events(grpc_pollset* pollset,
+static grpc_error* pollable_process_events(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
pollable* pollable_obj, bool drain) {
static const char* err_desc = "pollset_process_events";
grpc_error* error = GRPC_ERROR_NONE;
@@ -769,10 +784,10 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
pollset, fd, cancel, read_ev, write_ev);
}
if (read_ev || cancel) {
- fd_become_readable(fd, pollset);
+ fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
- fd_become_writable(fd);
+ fd_become_writable(exec_ctx, fd);
}
}
}
@@ -781,13 +796,14 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
}
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
-static void pollset_destroy(grpc_pollset* pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
POLLABLE_UNREF(pollset->active_pollable, "pollset");
pollset->active_pollable = nullptr;
}
-static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
- int timeout = poll_deadline_to_millis_timeout(deadline);
+static grpc_error* pollable_epoll(grpc_exec_ctx* exec_ctx, pollable* p,
+ grpc_millis deadline) {
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (grpc_polling_trace.enabled()) {
char* desc = pollable_desc(p);
@@ -800,11 +816,11 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
}
int r;
do {
- GRPC_STATS_INC_SYSCALL_POLL();
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
@@ -859,7 +875,8 @@ static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
}
/* Return true if this thread should poll */
-static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
+static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
bool do_poll = (pollset->shutdown_closure == nullptr);
@@ -880,7 +897,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker,
- poll_deadline_to_millis_timeout(deadline));
+ poll_deadline_to_millis_timeout(exec_ctx, deadline));
}
while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
@@ -902,7 +919,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->pollable_obj, worker);
}
}
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ grpc_exec_ctx_invalidate_now(exec_ctx);
} else {
gpr_mu_unlock(&pollset->mu);
}
@@ -911,7 +928,8 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
return do_poll;
}
-static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
+static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(&worker->pollable_obj->mu);
@@ -927,7 +945,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
case WRR_EMPTIED:
if (pollset->active_pollable != worker->pollable_obj) {
// pollable no longer being polled: flush events
- pollable_process_events(pollset, worker->pollable_obj, true);
+ pollable_process_events(exec_ctx, pollset, worker->pollable_obj, true);
}
break;
case WRR_REMOVED:
@@ -937,7 +955,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
WRR_EMPTIED) {
- pollset_maybe_finish_shutdown(pollset);
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
@@ -952,7 +970,7 @@ static long gettid(void) { return syscall(__NR_gettid); }
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
@@ -970,7 +988,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
gpr_log(GPR_DEBUG,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
" kwp=%d pollable=%p",
- pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
+ pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
}
static const char* err_desc = "pollset_work";
@@ -978,23 +996,25 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
} else {
- if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
+ if (begin_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
if (WORKER_PTR->pollable_obj->event_cursor ==
WORKER_PTR->pollable_obj->event_count) {
- append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
- err_desc);
+ append_error(
+ &error,
+ pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj, deadline),
+ err_desc);
}
- append_error(
- &error,
- pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
- err_desc);
- grpc_core::ExecCtx::Get()->Flush();
+ append_error(&error,
+ pollable_process_events(exec_ctx, pollset,
+ WORKER_PTR->pollable_obj, false),
+ err_desc);
+ grpc_exec_ctx_flush(exec_ctx);
gpr_tls_set(&g_current_thread_pollset, 0);
gpr_tls_set(&g_current_thread_worker, 0);
}
- end_worker(pollset, WORKER_PTR, worker_hdl);
+ end_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl);
}
#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
gpr_free(worker);
@@ -1004,7 +1024,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
}
static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
- grpc_pollset* pollset, grpc_fd* fd) {
+ grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* fd) {
static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
@@ -1012,7 +1032,7 @@ static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
"PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
}
- append_error(&error, pollset_kick_all(pollset), err_desc);
+ append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
POLLABLE_UNREF(pollset->active_pollable, "pollset");
append_error(&error, fd_get_or_become_pollable(fd, &pollset->active_pollable),
err_desc);
@@ -1020,7 +1040,7 @@ static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
}
static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
- grpc_pollset* pollset, grpc_fd* and_add_fd) {
+ grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* and_add_fd) {
static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) {
@@ -1030,7 +1050,7 @@ static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd);
}
- append_error(&error, pollset_kick_all(pollset), err_desc);
+ append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
POLLABLE_UNREF(pollset->active_pollable, "pollset");
pollset->active_pollable = nullptr;
@@ -1048,25 +1068,27 @@ static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
}
/* expects pollsets locked, flag whether fd is locked or not */
-static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
+static grpc_error* pollset_add_fd_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset, grpc_fd* fd) {
grpc_error* error = GRPC_ERROR_NONE;
pollable* po_at_start =
POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
switch (pollset->active_pollable->type) {
case PO_EMPTY:
/* empty pollable --> single fd pollable */
- error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
+ error = pollset_transition_pollable_from_empty_to_fd_locked(exec_ctx,
+ pollset, fd);
break;
case PO_FD:
gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
1) == 0) {
- error =
- pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
+ error = pollset_transition_pollable_from_empty_to_fd_locked(
+ exec_ctx, pollset, fd);
} else {
/* fd --> multipoller */
- error =
- pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
+ error = pollset_transition_pollable_from_fd_to_multi_locked(
+ exec_ctx, pollset, fd);
}
gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
break;
@@ -1083,7 +1105,8 @@ static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
return error;
}
-static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
+static grpc_error* pollset_as_multipollable_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
pollable** pollable_obj) {
grpc_error* error = GRPC_ERROR_NONE;
pollable* po_at_start =
@@ -1100,8 +1123,8 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
POLLABLE_UNREF(pollset->active_pollable, "pollset");
error = pollable_create(PO_MULTI, &pollset->active_pollable);
} else {
- error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
- nullptr);
+ error = pollset_transition_pollable_from_fd_to_multi_locked(
+ exec_ctx, pollset, nullptr);
}
gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
break;
@@ -1119,9 +1142,10 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
return error;
}
-static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
gpr_mu_lock(&pollset->mu);
- grpc_error* error = pollset_add_fd_locked(pollset, fd);
+ grpc_error* error = pollset_add_fd_locked(exec_ctx, pollset, fd);
gpr_mu_unlock(&pollset->mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
@@ -1147,27 +1171,28 @@ static grpc_pollset_set* pollset_set_create(void) {
return pss;
}
-static void pollset_set_unref(grpc_pollset_set* pss) {
+static void pollset_set_unref(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss) {
if (pss == nullptr) return;
if (!gpr_unref(&pss->refs)) return;
- pollset_set_unref(pss->parent);
+ pollset_set_unref(exec_ctx, pss->parent);
gpr_mu_destroy(&pss->mu);
for (size_t i = 0; i < pss->pollset_count; i++) {
gpr_mu_lock(&pss->pollsets[i]->mu);
if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
- pollset_maybe_finish_shutdown(pss->pollsets[i]);
+ pollset_maybe_finish_shutdown(exec_ctx, pss->pollsets[i]);
}
gpr_mu_unlock(&pss->pollsets[i]->mu);
}
for (size_t i = 0; i < pss->fd_count; i++) {
- UNREF_BY(pss->fds[i], 2, "pollset_set");
+ UNREF_BY(exec_ctx, pss->fds[i], 2, "pollset_set");
}
gpr_free(pss->pollsets);
gpr_free(pss->fds);
gpr_free(pss);
}
-static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
@@ -1190,7 +1215,8 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GRPC_LOG_IF_ERROR(err_desc, error);
}
-static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
}
@@ -1198,7 +1224,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
size_t i;
for (i = 0; i < pss->fd_count; i++) {
if (pss->fds[i] == fd) {
- UNREF_BY(fd, 2, "pollset_set");
+ UNREF_BY(exec_ctx, fd, 2, "pollset_set");
break;
}
}
@@ -1210,7 +1236,8 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
gpr_mu_unlock(&pss->mu);
}
-static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
}
@@ -1229,15 +1256,15 @@ static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
gpr_mu_unlock(&pss->mu);
gpr_mu_lock(&ps->mu);
if (0 == --ps->containing_pollset_set_count) {
- pollset_maybe_finish_shutdown(ps);
+ pollset_maybe_finish_shutdown(exec_ctx, ps);
}
gpr_mu_unlock(&ps->mu);
}
// add all fds to pollables, and output a new array of unorphaned out_fds
// assumes pollsets are multipollable
-static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
- grpc_pollset** pollsets,
+static grpc_error* add_fds_to_pollsets(grpc_exec_ctx* exec_ctx, grpc_fd** fds,
+ size_t fd_count, grpc_pollset** pollsets,
size_t pollset_count,
const char* err_desc, grpc_fd** out_fds,
size_t* out_fd_count) {
@@ -1246,7 +1273,7 @@ static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
gpr_mu_lock(&fds[i]->orphan_mu);
if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
gpr_mu_unlock(&fds[i]->orphan_mu);
- UNREF_BY(fds[i], 2, "pollset_set");
+ UNREF_BY(exec_ctx, fds[i], 2, "pollset_set");
} else {
for (size_t j = 0; j < pollset_count; j++) {
append_error(&error,
@@ -1260,7 +1287,8 @@ static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
return error;
}
-static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
}
@@ -1268,8 +1296,8 @@ static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
static const char* err_desc = "pollset_set_add_pollset";
pollable* pollable_obj = nullptr;
gpr_mu_lock(&ps->mu);
- if (!GRPC_LOG_IF_ERROR(err_desc,
- pollset_as_multipollable_locked(ps, &pollable_obj))) {
+ if (!GRPC_LOG_IF_ERROR(err_desc, pollset_as_multipollable_locked(
+ exec_ctx, ps, &pollable_obj))) {
GPR_ASSERT(pollable_obj == nullptr);
gpr_mu_unlock(&ps->mu);
return;
@@ -1280,8 +1308,8 @@ static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
size_t initial_fd_count = pss->fd_count;
pss->fd_count = 0;
append_error(&error,
- add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
- pss->fds, &pss->fd_count),
+ add_fds_to_pollsets(exec_ctx, pss->fds, initial_fd_count, &ps, 1,
+ err_desc, pss->fds, &pss->fd_count),
err_desc);
if (pss->pollset_count == pss->pollset_capacity) {
pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
@@ -1295,7 +1323,8 @@ static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GRPC_LOG_IF_ERROR(err_desc, error);
}
-static void pollset_set_add_pollset_set(grpc_pollset_set* a,
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* a,
grpc_pollset_set* b) {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
@@ -1344,13 +1373,13 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
a->fd_count = 0;
append_error(
&error,
- add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
+ add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
err_desc);
append_error(
&error,
- add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
- "merge_b2a", a->fds, &a->fd_count),
+ add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count, a->pollsets,
+ a->pollset_count, "merge_b2a", a->fds, &a->fd_count),
err_desc);
if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
a->pollset_capacity =
@@ -1372,7 +1401,8 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
gpr_mu_unlock(&b->mu);
}
-static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
/*******************************************************************************
@@ -1451,7 +1481,7 @@ const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
gpr_log(GPR_ERROR,
"Skipping epollex becuase GRPC_LINUX_EPOLL is not defined.");
- return nullptr;
+ return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 7a8962f4a8..12c8483b8e 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -165,12 +165,13 @@ static void fd_global_shutdown(void);
#ifndef NDEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__)
+#define PI_UNREF(exec_ctx, p, r) \
+ pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
#else
#define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(p, r) pi_unref((p))
+#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif
@@ -269,7 +270,7 @@ static grpc_wakeup_fd polling_island_wakeup_fd;
static __thread polling_island* g_current_thread_polling_island;
/* Forward declaration */
-static void polling_island_delete(polling_island* pi);
+static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -283,7 +284,7 @@ gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
static void pi_add_ref(polling_island* pi);
-static void pi_unref(polling_island* pi);
+static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi);
#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island* pi, const char* reason,
@@ -298,8 +299,8 @@ static void pi_add_ref_dbg(polling_island* pi, const char* reason,
pi_add_ref(pi);
}
-static void pi_unref_dbg(polling_island* pi, const char* reason,
- const char* file, int line) {
+static void pi_unref_dbg(grpc_exec_ctx* exec_ctx, polling_island* pi,
+ const char* reason, const char* file, int line) {
if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG,
@@ -307,7 +308,7 @@ static void pi_unref_dbg(polling_island* pi, const char* reason,
" (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);
}
- pi_unref(pi);
+ pi_unref(exec_ctx, pi);
}
#endif
@@ -315,7 +316,7 @@ static void pi_add_ref(polling_island* pi) {
gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
}
-static void pi_unref(polling_island* pi) {
+static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi) {
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
@@ -326,9 +327,9 @@ static void pi_unref(polling_island* pi) {
*/
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(pi);
+ polling_island_delete(exec_ctx, pi);
if (next != nullptr) {
- PI_UNREF(next, "pi_delete"); /* Recursive call */
+ PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
}
}
@@ -464,7 +465,8 @@ static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
}
/* Might return NULL in case of an error */
-static polling_island* polling_island_create(grpc_fd* initial_fd,
+static polling_island* polling_island_create(grpc_exec_ctx* exec_ctx,
+ grpc_fd* initial_fd,
grpc_error** error) {
polling_island* pi = nullptr;
const char* err_desc = "polling_island_create";
@@ -480,7 +482,7 @@ static polling_island* polling_island_create(grpc_fd* initial_fd,
gpr_atm_rel_store(&pi->ref_count, 0);
gpr_atm_rel_store(&pi->poller_count, 0);
- gpr_atm_rel_store(&pi->merged_to, (gpr_atm) nullptr);
+ gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
@@ -495,13 +497,13 @@ static polling_island* polling_island_create(grpc_fd* initial_fd,
done:
if (*error != GRPC_ERROR_NONE) {
- polling_island_delete(pi);
+ polling_island_delete(exec_ctx, pi);
pi = nullptr;
}
return pi;
}
-static void polling_island_delete(polling_island* pi) {
+static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi) {
GPR_ASSERT(pi->fd_cnt == 0);
if (pi->epoll_fd >= 0) {
@@ -860,7 +862,8 @@ static int fd_wrapped_fd(grpc_fd* fd) {
return ret_fd;
}
-static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
bool already_closed, const char* reason) {
grpc_error* error = GRPC_ERROR_NONE;
polling_island* unref_pi = nullptr;
@@ -899,7 +902,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
fd->orphaned = true;
- GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
gpr_mu_unlock(&fd->po.mu);
UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -908,7 +911,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
The polling island owns a workqueue which owns an fd, and unreffing
inside the lock can cause an eventual lock loop that makes TSAN very
unhappy. */
- PI_UNREF(unref_pi, "fd_orphan");
+ PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
}
if (error != GRPC_ERROR_NONE) {
const char* msg = grpc_error_string(error);
@@ -917,7 +920,8 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
GRPC_ERROR_UNREF(error);
}
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
return (grpc_pollset*)notifier;
}
@@ -927,20 +931,22 @@ static bool fd_is_shutdown(grpc_fd* fd) {
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
- if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
+ if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
- fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
+ fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
}
GRPC_ERROR_UNREF(why);
}
-static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
- fd->read_closure->NotifyOn(closure);
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->read_closure->NotifyOn(exec_ctx, closure);
}
-static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
- fd->write_closure->NotifyOn(closure);
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ fd->write_closure->NotifyOn(exec_ctx, closure);
}
/*******************************************************************************
@@ -1022,11 +1028,11 @@ static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
}
/* p->mu must be held before calling this function */
-static grpc_error* pollset_kick(grpc_pollset* p,
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
grpc_pollset_worker* specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error* error = GRPC_ERROR_NONE;
- GRPC_STATS_INC_POLLSET_KICK();
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char* err_desc = "Kick Failure";
grpc_pollset_worker* worker = specific_worker;
if (worker != nullptr) {
@@ -1090,9 +1096,10 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
pollset->shutdown_done = nullptr;
}
-static int poll_deadline_to_millis_timeout(grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
- grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
if (delta > INT_MAX)
return INT_MAX;
else if (delta < 0)
@@ -1101,8 +1108,9 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
return (int)delta;
}
-static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
- fd->read_closure->SetReady();
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
+ fd->read_closure->SetReady(exec_ctx);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1113,34 +1121,39 @@ static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+ fd->write_closure->SetReady(exec_ctx);
+}
-static void pollset_release_polling_island(grpc_pollset* ps,
+static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* ps,
const char* reason) {
if (ps->po.pi != nullptr) {
- PI_UNREF(ps->po.pi, reason);
+ PI_UNREF(exec_ctx, ps->po.pi, reason);
}
ps->po.pi = nullptr;
}
-static void finish_shutdown_locked(grpc_pollset* pollset) {
+static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
/* The pollset cannot have any workers if we are at this stage */
GPR_ASSERT(!pollset_has_workers(pollset));
pollset->finish_shutdown_called = true;
/* Release the ref and set pollset->po.pi to NULL */
- pollset_release_polling_island(pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE);
+ pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we
@@ -1148,7 +1161,7 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
if (!pollset_has_workers(pollset)) {
GPR_ASSERT(!pollset->finish_shutdown_called);
GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(pollset);
+ finish_shutdown_locked(exec_ctx, pollset);
}
GPR_TIMER_END("pollset_shutdown", 0);
}
@@ -1156,14 +1169,15 @@ static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
* than destroying the mutexes, there is nothing special that needs to be done
* here */
-static void pollset_destroy(grpc_pollset* pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
gpr_mu_destroy(&pollset->po.mu);
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_pollset* pollset,
+static void pollset_work_and_unlock(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
grpc_pollset_worker* worker, int timeout_ms,
sigset_t* sig_mask, grpc_error** error) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
@@ -1185,7 +1199,7 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
this function (i.e pollset_work_and_unlock()) is called */
if (pollset->po.pi == nullptr) {
- pollset->po.pi = polling_island_create(nullptr, error);
+ pollset->po.pi = polling_island_create(exec_ctx, nullptr, error);
if (pollset->po.pi == nullptr) {
GPR_TIMER_END("pollset_work_and_unlock", 0);
return; /* Fatal error. We cannot continue */
@@ -1205,7 +1219,7 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
/* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
polling island to be deleted */
PI_ADD_REF(pi, "ps");
- PI_UNREF(pollset->po.pi, "ps");
+ PI_UNREF(exec_ctx, pollset->po.pi, "ps");
pollset->po.pi = pi;
}
@@ -1219,10 +1233,10 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
g_current_thread_polling_island = pi;
GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_POLL();
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
ep_rv =
epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
@@ -1260,10 +1274,10 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (read_ev || cancel) {
- fd_become_readable(fd, pollset);
+ fd_become_readable(exec_ctx, fd, pollset);
}
if (write_ev || cancel) {
- fd_become_writable(fd);
+ fd_become_writable(exec_ctx, fd);
}
}
}
@@ -1278,7 +1292,7 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
that we got before releasing the polling island lock). This is because
pollset->po.pi pointer might get udpated in other parts of the
code when there is an island merge while we are doing epoll_wait() above */
- PI_UNREF(pi, "ps_work");
+ PI_UNREF(exec_ctx, pi, "ps_work");
GPR_TIMER_END("pollset_work_and_unlock", 0);
}
@@ -1287,12 +1301,12 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
GPR_TIMER_BEGIN("pollset_work", 0);
grpc_error* error = GRPC_ERROR_NONE;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline);
+ int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline);
sigset_t new_mask;
@@ -1350,9 +1364,9 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
push_front_worker(pollset, &worker); /* Add worker to pollset */
- pollset_work_and_unlock(pollset, &worker, timeout_ms, &g_orig_sigmask,
- &error);
- grpc_core::ExecCtx::Get()->Flush();
+ pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms,
+ &g_orig_sigmask, &error);
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->po.mu);
@@ -1372,10 +1386,10 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
if (pollset->shutting_down && !pollset_has_workers(pollset) &&
!pollset->finish_shutdown_called) {
GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(pollset);
+ finish_shutdown_locked(exec_ctx, pollset);
gpr_mu_unlock(&pollset->po.mu);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->po.mu);
}
@@ -1390,8 +1404,9 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
return error;
}
-static void add_poll_object(poll_obj* bag, poll_obj_type bag_type,
- poll_obj* item, poll_obj_type item_type) {
+static void add_poll_object(grpc_exec_ctx* exec_ctx, poll_obj* bag,
+ poll_obj_type bag_type, poll_obj* item,
+ poll_obj_type item_type) {
GPR_TIMER_BEGIN("add_poll_object", 0);
#ifndef NDEBUG
@@ -1441,7 +1456,7 @@ retry:
keeping TSAN happy outweigh any performance advantage we might have
by keeping the lock held. */
gpr_mu_unlock(&item->mu);
- pi_new = polling_island_create(FD_FROM_PO(item), &error);
+ pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
gpr_mu_lock(&item->mu);
/* Need to reverify any assumptions made between the initial lock and
@@ -1460,11 +1475,11 @@ retry:
/* Ref and unref so that the polling island gets deleted during unref
*/
PI_ADD_REF(pi_new, "dance_of_destruction");
- PI_UNREF(pi_new, "dance_of_destruction");
+ PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
goto retry;
}
} else {
- pi_new = polling_island_create(nullptr, &error);
+ pi_new = polling_island_create(exec_ctx, nullptr, &error);
}
GRPC_POLLING_TRACE(
@@ -1518,7 +1533,7 @@ retry:
if (item->pi != pi_new) {
PI_ADD_REF(pi_new, poll_obj_string(item_type));
if (item->pi != nullptr) {
- PI_UNREF(item->pi, poll_obj_string(item_type));
+ PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
}
item->pi = pi_new;
}
@@ -1526,7 +1541,7 @@ retry:
if (bag->pi != pi_new) {
PI_ADD_REF(pi_new, poll_obj_string(bag_type));
if (bag->pi != nullptr) {
- PI_UNREF(bag->pi, poll_obj_string(bag_type));
+ PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
}
bag->pi = pi_new;
}
@@ -1538,8 +1553,10 @@ retry:
GPR_TIMER_END("add_poll_object", 0);
}
-static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
- add_poll_object(&pollset->po, POLL_OBJ_POLLSET, &fd->po, POLL_OBJ_FD);
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
+ add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
+ POLL_OBJ_FD);
}
/*******************************************************************************
@@ -1556,39 +1573,48 @@ static grpc_pollset_set* pollset_set_create(void) {
return pss;
}
-static void pollset_set_destroy(grpc_pollset_set* pss) {
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss) {
gpr_mu_destroy(&pss->po.mu);
if (pss->po.pi != nullptr) {
- PI_UNREF(pss->po.pi, "pss_destroy");
+ PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
}
gpr_free(pss);
}
-static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
- add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &fd->po, POLL_OBJ_FD);
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
+ add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
+ POLL_OBJ_FD);
}
-static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
/* Nothing to do */
}
-static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
- add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &ps->po, POLL_OBJ_POLLSET);
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
+ add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
+ POLL_OBJ_POLLSET);
}
-static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
/* Nothing to do */
}
-static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
- add_poll_object(&bag->po, POLL_OBJ_POLLSET_SET, &item->po,
+ add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
POLL_OBJ_POLLSET_SET);
}
-static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
/* Nothing to do */
}
@@ -1734,7 +1760,7 @@ const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
gpr_log(GPR_ERROR,
"Skipping epollsig becuase GRPC_LINUX_EPOLL is not defined.");
- return nullptr;
+ return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 006e3ddd2f..e32e1ba42a 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -128,7 +128,8 @@ static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
MUST NOT be called with a pollset lock taken
if got_read or got_write are 1, also does the become_{readable,writable} as
appropriate. */
-static void fd_end_poll(grpc_fd_watcher* rec, int got_read, int got_write,
+static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* rec,
+ int got_read, int got_write,
grpc_pollset* read_notifier_pollset);
/* Return 1 if this fd is orphaned, 0 otherwise */
@@ -185,9 +186,11 @@ struct grpc_pollset {
};
/* Add an fd to a pollset */
-static void pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
-static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
/* Convert a timespec to milliseconds:
- very small or negative poll times are clamped to zero to do a
@@ -196,7 +199,8 @@ static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(grpc_millis deadline);
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis deadline);
/* Allow kick to wakeup the currently polling worker */
#define GRPC_POLLSET_CAN_KICK_SELF 1
@@ -204,7 +208,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis deadline);
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static grpc_error* pollset_kick_ext(grpc_pollset* p,
+static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
grpc_pollset_worker* specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
@@ -349,7 +353,8 @@ static bool fd_is_orphaned(grpc_fd* fd) {
}
/* Return the read-notifier pollset */
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
grpc_pollset* notifier = nullptr;
gpr_mu_lock(&fd->mu);
@@ -359,36 +364,39 @@ static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
return notifier;
}
-static grpc_error* pollset_kick_locked(grpc_fd_watcher* watcher) {
+static grpc_error* pollset_kick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_fd_watcher* watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- grpc_error* err = pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ grpc_error* err =
+ pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
-static void maybe_wake_one_watcher_locked(grpc_fd* fd) {
+static void maybe_wake_one_watcher_locked(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next);
+ pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
-static void wake_all_watchers_locked(grpc_fd* fd) {
+static void wake_all_watchers_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
grpc_fd_watcher* watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher);
+ pollset_kick_locked(exec_ctx, watcher);
}
if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
@@ -397,12 +405,12 @@ static int has_watchers(grpc_fd* fd) {
fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
}
-static void close_fd_locked(grpc_fd* fd) {
+static void close_fd_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
fd->closed = 1;
if (!fd->released) {
close(fd->fd);
}
- GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
}
static int fd_wrapped_fd(grpc_fd* fd) {
@@ -413,7 +421,8 @@ static int fd_wrapped_fd(grpc_fd* fd) {
}
}
-static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
bool already_closed, const char* reason) {
fd->on_done_closure = on_done;
fd->released = release_fd != nullptr;
@@ -426,9 +435,9 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
- close_fd_locked(fd);
+ close_fd_locked(exec_ctx, fd);
} else {
- wake_all_watchers_locked(fd);
+ wake_all_watchers_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
@@ -460,10 +469,10 @@ static grpc_error* fd_shutdown_error(grpc_fd* fd) {
}
}
-static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
- grpc_closure* closure) {
+static void notify_on_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure** st, grpc_closure* closure) {
if (fd->shutdown) {
- GRPC_CLOSURE_SCHED(closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
@@ -471,8 +480,8 @@ static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- GRPC_CLOSURE_SCHED(closure, fd_shutdown_error(fd));
- maybe_wake_one_watcher_locked(fd);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@@ -483,7 +492,8 @@ static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
}
/* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_fd* fd, grpc_closure** st) {
+static int set_ready_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure** st) {
if (*st == CLOSURE_READY) {
/* duplicate ready ==> ignore */
return 0;
@@ -493,18 +503,18 @@ static int set_ready_locked(grpc_fd* fd, grpc_closure** st) {
return 0;
} else {
/* waiting ==> queue closure */
- GRPC_CLOSURE_SCHED(*st, fd_shutdown_error(fd));
+ GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd));
*st = CLOSURE_NOT_READY;
return 1;
}
}
static void set_read_notifier_pollset_locked(
- grpc_fd* fd, grpc_pollset* read_notifier_pollset) {
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_pollset* read_notifier_pollset) {
fd->read_notifier_pollset = read_notifier_pollset;
}
-static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
gpr_mu_lock(&fd->mu);
/* only shutdown once */
if (!fd->shutdown) {
@@ -512,8 +522,8 @@ static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
fd->shutdown_error = why;
/* signal read/write closed to OS so that future operations fail */
shutdown(fd->fd, SHUT_RDWR);
- set_ready_locked(fd, &fd->read_closure);
- set_ready_locked(fd, &fd->write_closure);
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
} else {
GRPC_ERROR_UNREF(why);
}
@@ -527,15 +537,17 @@ static bool fd_is_shutdown(grpc_fd* fd) {
return r;
}
-static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
gpr_mu_lock(&fd->mu);
- notify_on_locked(fd, &fd->read_closure, closure);
+ notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
gpr_mu_unlock(&fd->mu);
}
-static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
gpr_mu_lock(&fd->mu);
- notify_on_locked(fd, &fd->write_closure, closure);
+ notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
gpr_mu_unlock(&fd->mu);
}
@@ -590,7 +602,8 @@ static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
return mask;
}
-static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write,
+static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* watcher,
+ int got_read, int got_write,
grpc_pollset* read_notifier_pollset) {
int was_polling = 0;
int kick = 0;
@@ -624,23 +637,23 @@ static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write,
watcher->prev->next = watcher->next;
}
if (got_read) {
- if (set_ready_locked(fd, &fd->read_closure)) {
+ if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
kick = 1;
}
if (read_notifier_pollset != nullptr) {
- set_read_notifier_pollset_locked(fd, read_notifier_pollset);
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
}
}
if (got_write) {
- if (set_ready_locked(fd, &fd->write_closure)) {
+ if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
kick = 1;
}
}
if (kick) {
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
}
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
- close_fd_locked(fd);
+ close_fd_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
@@ -701,12 +714,12 @@ static void kick_append_error(grpc_error** composite, grpc_error* error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error* pollset_kick_ext(grpc_pollset* p,
+static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
grpc_pollset_worker* specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error* error = GRPC_ERROR_NONE;
- GRPC_STATS_INC_POLLSET_KICK();
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
if (specific_worker != nullptr) {
@@ -772,9 +785,9 @@ static grpc_error* pollset_kick_ext(grpc_pollset* p,
return error;
}
-static grpc_error* pollset_kick(grpc_pollset* p,
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
grpc_pollset_worker* specific_worker) {
- return pollset_kick_ext(p, specific_worker, 0);
+ return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
@@ -808,7 +821,7 @@ static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
pollset->pollset_set_count = 0;
}
-static void pollset_destroy(grpc_pollset* pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
while (pollset->local_wakeup_cache) {
@@ -821,7 +834,8 @@ static void pollset_destroy(grpc_pollset* pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
gpr_mu_lock(&pollset->mu);
size_t i;
/* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
@@ -836,19 +850,19 @@ static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
- pollset_kick(pollset, nullptr);
+ pollset_kick(exec_ctx, pollset, nullptr);
exit:
gpr_mu_unlock(&pollset->mu);
}
-static void finish_shutdown(grpc_pollset* pollset) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
size_t i;
for (i = 0; i < pollset->fd_count; i++) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
}
pollset->fd_count = 0;
- GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
static void work_combine_error(grpc_error** composite, grpc_error* error) {
@@ -859,7 +873,7 @@ static void work_combine_error(grpc_error** composite, grpc_error* error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error* pollset_work(grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
@@ -898,7 +912,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
if (!pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
- GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
goto done;
}
/* If we're shutting down then we don't execute any extended work */
@@ -930,7 +944,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
grpc_fd_watcher* watchers;
struct pollfd* pfds;
- timeout = poll_deadline_to_millis_timeout(deadline);
+ timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (pollset->fd_count + 2 <= inline_elements) {
pfds = pollfd_space;
@@ -974,9 +988,9 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_POLL();
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = grpc_poll_function(pfds, pfd_count, timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
@@ -989,16 +1003,16 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
for (i = 1; i < pfd_count; i++) {
if (watchers[i].fd == nullptr) {
- fd_end_poll(&watchers[i], 0, 0, nullptr);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
} else {
// Wake up all the file descriptors, if we have an invalid one
// we can identify it on the next pollset_work()
- fd_end_poll(&watchers[i], 1, 1, pollset);
+ fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
}
}
} else if (r == 0) {
for (i = 1; i < pfd_count; i++) {
- fd_end_poll(&watchers[i], 0, 0, nullptr);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
@@ -1010,14 +1024,14 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
}
for (i = 1; i < pfd_count; i++) {
if (watchers[i].fd == nullptr) {
- fd_end_poll(&watchers[i], 0, 0, nullptr);
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
} else {
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
- fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
+ fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
}
@@ -1040,7 +1054,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
worker list, which means nobody could ask us to re-evaluate polling). */
done:
if (!locked) {
- queued_work |= grpc_core::ExecCtx::Get()->Flush();
+ queued_work |= grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
locked = 1;
}
@@ -1069,21 +1083,21 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
/* check shutdown conditions */
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
- pollset_kick(pollset, nullptr);
+ pollset_kick(exec_ctx, pollset, nullptr);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
- finish_shutdown(pollset);
- grpc_core::ExecCtx::Get()->Flush();
+ finish_shutdown(exec_ctx, pollset);
+ grpc_exec_ctx_flush(exec_ctx);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* pollset_work.
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
- GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
gpr_mu_unlock(&pollset->mu);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
}
}
@@ -1093,24 +1107,26 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
return error;
}
-static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
- GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}
if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
}
}
-static int poll_deadline_to_millis_timeout(grpc_millis deadline) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) return -1;
if (deadline == 0) return 0;
- grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx);
if (n < 0) return 0;
if (n > INT_MAX) return -1;
return (int)n;
@@ -1127,7 +1143,8 @@ static grpc_pollset_set* pollset_set_create(void) {
return pollset_set;
}
-static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
@@ -1142,7 +1159,7 @@ static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
!pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
} else {
gpr_mu_unlock(&pollset->mu);
}
@@ -1153,7 +1170,8 @@ static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
gpr_free(pollset_set);
}
-static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
size_t i, j;
gpr_mu_lock(&pollset->mu);
@@ -1172,7 +1190,7 @@ static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
if (fd_is_orphaned(pollset_set->fds[i])) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
} else {
- pollset_add_fd(pollset, pollset_set->fds[i]);
+ pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
pollset_set->fds[j++] = pollset_set->fds[i];
}
}
@@ -1180,7 +1198,8 @@ static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
gpr_mu_unlock(&pollset_set->mu);
}
-static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
@@ -1200,13 +1219,14 @@ static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
!pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
- finish_shutdown(pollset);
+ finish_shutdown(exec_ctx, pollset);
} else {
gpr_mu_unlock(&pollset->mu);
}
}
-static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
size_t i, j;
gpr_mu_lock(&bag->mu);
@@ -1221,7 +1241,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
if (fd_is_orphaned(bag->fds[i])) {
GRPC_FD_UNREF(bag->fds[i], "pollset_set");
} else {
- pollset_set_add_fd(item, bag->fds[i]);
+ pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
bag->fds[j++] = bag->fds[i];
}
}
@@ -1229,7 +1249,8 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
gpr_mu_unlock(&bag->mu);
}
-static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
size_t i;
gpr_mu_lock(&bag->mu);
@@ -1244,7 +1265,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
gpr_mu_unlock(&bag->mu);
}
-static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
@@ -1255,15 +1277,16 @@ static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
GRPC_FD_REF(fd, "pollset_set");
pollset_set->fds[pollset_set->fd_count++] = fd;
for (i = 0; i < pollset_set->pollset_count; i++) {
- pollset_add_fd(pollset_set->pollsets[i], fd);
+ pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_add_fd(pollset_set->pollset_sets[i], fd);
+ pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
-static void pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
@@ -1276,7 +1299,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
}
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
- pollset_set_del_fd(pollset_set->pollset_sets[i], fd);
+ pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index b516f93058..031c97564a 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -46,7 +46,7 @@ grpc_poll_function_type grpc_poll_function = poll;
grpc_wakeup_fd grpc_global_wakeup_fd;
-static const grpc_event_engine_vtable* g_event_engine = nullptr;
+static const grpc_event_engine_vtable* g_event_engine;
static const char* g_poll_strategy_name = nullptr;
typedef const grpc_event_engine_vtable* (*event_engine_factory_fn)(
@@ -184,25 +184,28 @@ int grpc_fd_wrapped_fd(grpc_fd* fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
-void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason) {
- g_event_engine->fd_orphan(fd, on_done, release_fd, already_closed, reason);
+void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason) {
+ g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, already_closed,
+ reason);
}
-void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why) {
- g_event_engine->fd_shutdown(fd, why);
+void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
+ g_event_engine->fd_shutdown(exec_ctx, fd, why);
}
bool grpc_fd_is_shutdown(grpc_fd* fd) {
return g_event_engine->fd_is_shutdown(fd);
}
-void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
- g_event_engine->fd_notify_on_read(fd, closure);
+void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ g_event_engine->fd_notify_on_read(exec_ctx, fd, closure);
}
-void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
- g_event_engine->fd_notify_on_write(fd, closure);
+void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
+ g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
}
size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
@@ -211,63 +214,72 @@ void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
g_event_engine->pollset_init(pollset, mu);
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
- g_event_engine->pollset_shutdown(pollset, closure);
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
+ g_event_engine->pollset_shutdown(exec_ctx, pollset, closure);
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {
- g_event_engine->pollset_destroy(pollset);
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+ g_event_engine->pollset_destroy(exec_ctx, pollset);
}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_millis deadline) {
- return g_event_engine->pollset_work(pollset, worker, deadline);
+ return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline);
}
-grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
- return g_event_engine->pollset_kick(pollset, specific_worker);
+ return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
-void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd) {
- g_event_engine->pollset_add_fd(pollset, fd);
+void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd) {
+ g_event_engine->pollset_add_fd(exec_ctx, pollset, fd);
}
grpc_pollset_set* grpc_pollset_set_create(void) {
return g_event_engine->pollset_set_create();
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {
- g_event_engine->pollset_set_destroy(pollset_set);
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {
+ g_event_engine->pollset_set_destroy(exec_ctx, pollset_set);
}
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
- g_event_engine->pollset_set_add_pollset(pollset_set, pollset);
+ g_event_engine->pollset_set_add_pollset(exec_ctx, pollset_set, pollset);
}
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
- g_event_engine->pollset_set_del_pollset(pollset_set, pollset);
+ g_event_engine->pollset_set_del_pollset(exec_ctx, pollset_set, pollset);
}
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
- g_event_engine->pollset_set_add_pollset_set(bag, item);
+ g_event_engine->pollset_set_add_pollset_set(exec_ctx, bag, item);
}
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {
- g_event_engine->pollset_set_del_pollset_set(bag, item);
+ g_event_engine->pollset_set_del_pollset_set(exec_ctx, bag, item);
}
-void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
- g_event_engine->pollset_set_add_fd(pollset_set, fd);
+void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
+ g_event_engine->pollset_set_add_fd(exec_ctx, pollset_set, fd);
}
-void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
- g_event_engine->pollset_set_del_fd(pollset_set, fd);
+void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
+ g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 62f1162a23..16fa10ca56 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -36,36 +36,48 @@ typedef struct grpc_event_engine_vtable {
grpc_fd* (*fd_create)(int fd, const char* name);
int (*fd_wrapped_fd)(grpc_fd* fd);
- void (*fd_orphan)(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason);
- void (*fd_shutdown)(grpc_fd* fd, grpc_error* why);
- void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure);
- void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure);
+ void (*fd_orphan)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason);
+ void (*fd_shutdown)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
+ void (*fd_notify_on_read)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
+ void (*fd_notify_on_write)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
bool (*fd_is_shutdown)(grpc_fd* fd);
- grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd);
+ grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd);
void (*pollset_init)(grpc_pollset* pollset, gpr_mu** mu);
- void (*pollset_shutdown)(grpc_pollset* pollset, grpc_closure* closure);
- void (*pollset_destroy)(grpc_pollset* pollset);
- grpc_error* (*pollset_work)(grpc_pollset* pollset,
+ void (*pollset_shutdown)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure);
+ void (*pollset_destroy)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
+ grpc_error* (*pollset_work)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_millis deadline);
- grpc_error* (*pollset_kick)(grpc_pollset* pollset,
+ grpc_error* (*pollset_kick)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker);
- void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd);
+ void (*pollset_add_fd)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
grpc_pollset_set* (*pollset_set_create)(void);
- void (*pollset_set_destroy)(grpc_pollset_set* pollset_set);
- void (*pollset_set_add_pollset)(grpc_pollset_set* pollset_set,
+ void (*pollset_set_destroy)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set);
+ void (*pollset_set_add_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
- void (*pollset_set_del_pollset)(grpc_pollset_set* pollset_set,
+ void (*pollset_set_del_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
- void (*pollset_set_add_pollset_set)(grpc_pollset_set* bag,
+ void (*pollset_set_add_pollset_set)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item);
- void (*pollset_set_del_pollset_set)(grpc_pollset_set* bag,
+ void (*pollset_set_del_pollset_set)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item);
- void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
- void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
+ void (*pollset_set_add_fd)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
+ void (*pollset_set_del_fd)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
void (*shutdown_engine)(void);
} grpc_event_engine_vtable;
@@ -91,14 +103,14 @@ int grpc_fd_wrapped_fd(grpc_fd* fd);
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write.
MUST NOT be called with a pollset lock taken */
-void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
- bool already_closed, const char* reason);
+void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason);
/* Has grpc_fd_shutdown been called on an fd? */
bool grpc_fd_is_shutdown(grpc_fd* fd);
/* Cause any current and future callbacks to fail. */
-void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why);
+void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
/* Register read interest, causing read_cb to be called once when fd becomes
readable, on deadline specified by deadline, or on shutdown triggered by
@@ -113,23 +125,29 @@ void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why);
underlying platform. This means that users must drain fd in read_cb before
calling notify_on_read again. Users are also expected to handle spurious
events, i.e read_cb is called while nothing can be readable from fd */
-void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure);
+void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
/* Exactly the same semantics as above, except based on writable events. */
-void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure);
+void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
/* Return the read notifier pollset from the fd */
-grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd);
+grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd);
/* pollset_posix functions */
/* Add an fd to a pollset */
-void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
+void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
/* pollset_set_posix functions */
-void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
-void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
+void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
+void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int);
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index e005437e0a..1777456342 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -25,7 +25,39 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/profiling/timers.h"
-static void exec_ctx_run(grpc_closure* closure, grpc_error* error) {
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx) {
+ if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
+ if (exec_ctx->check_ready_to_finish(exec_ctx,
+ exec_ctx->check_ready_to_finish_arg)) {
+ exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
+ return true;
+ }
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
+ return false;
+}
+
+bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
+ return true;
+}
+
+bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx) {
+ return exec_ctx->active_combiner != nullptr ||
+ !grpc_closure_list_empty(exec_ctx->closure_list);
+}
+
+void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx) {
+ exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
+ grpc_exec_ctx_flush(exec_ctx);
+}
+
+static void exec_ctx_run(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
#ifndef NDEBUG
closure->scheduled = false;
if (grpc_trace_closure.enabled()) {
@@ -35,7 +67,7 @@ static void exec_ctx_run(grpc_closure* closure, grpc_error* error) {
closure->line_initiated);
}
#endif
- closure->cb(closure->cb_arg, error);
+ closure->cb(exec_ctx, closure->cb_arg, error);
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
@@ -44,13 +76,42 @@ static void exec_ctx_run(grpc_closure* closure, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
-static void exec_ctx_sched(grpc_closure* closure, grpc_error* error) {
- grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure,
- error);
+bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx) {
+ bool did_something = 0;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
+ for (;;) {
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_closure* c = exec_ctx->closure_list.head;
+ exec_ctx->closure_list.head = exec_ctx->closure_list.tail = nullptr;
+ while (c != nullptr) {
+ grpc_closure* next = c->next_data.next;
+ grpc_error* error = c->error_data.error;
+ did_something = true;
+ exec_ctx_run(exec_ctx, c, error);
+ c = next;
+ }
+ } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
+ break;
+ }
+ }
+ GPR_ASSERT(exec_ctx->active_combiner == nullptr);
+ GPR_TIMER_END("grpc_exec_ctx_flush", 0);
+ return did_something;
+}
+
+static void exec_ctx_sched(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
static gpr_timespec g_start_time;
+void grpc_exec_ctx_global_init(void) {
+ g_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+}
+
+void grpc_exec_ctx_global_shutdown(void) {}
+
static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x =
@@ -70,6 +131,18 @@ static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
return (gpr_atm)x;
}
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx) {
+ if (!exec_ctx->now_is_valid) {
+ exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx->now_is_valid = true;
+ }
+ return exec_ctx->now;
+}
+
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx) {
+ exec_ctx->now_is_valid = false;
+}
+
gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
gpr_clock_type clock_type) {
// special-case infinities as grpc_millis can be 32bit on some platforms
@@ -102,44 +175,3 @@ static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
exec_ctx_run, exec_ctx_sched, "exec_ctx"};
static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
-
-namespace grpc_core {
-GPR_TLS_CLASS_DEF(ExecCtx::exec_ctx_);
-
-void ExecCtx::GlobalInit(void) {
- g_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_tls_init(&exec_ctx_);
-}
-
-bool ExecCtx::Flush() {
- bool did_something = 0;
- GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
- for (;;) {
- if (!grpc_closure_list_empty(closure_list_)) {
- grpc_closure* c = closure_list_.head;
- closure_list_.head = closure_list_.tail = nullptr;
- while (c != nullptr) {
- grpc_closure* next = c->next_data.next;
- grpc_error* error = c->error_data.error;
- did_something = true;
- exec_ctx_run(c, error);
- c = next;
- }
- } else if (!grpc_combiner_continue_exec_ctx()) {
- break;
- }
- }
- GPR_ASSERT(combiner_data_.active_combiner == nullptr);
- GPR_TIMER_END("grpc_exec_ctx_flush", 0);
- return did_something;
-}
-
-grpc_millis ExecCtx::Now() {
- if (!now_is_valid_) {
- now_ = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
- now_is_valid_ = true;
- }
- return now_;
-}
-
-} // namespace grpc_core
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index b0c1740155..b415d2c255 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -21,8 +21,6 @@
#include <grpc/support/atm.h>
#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/tls.h>
#include "src/core/lib/iomgr/closure.h"
@@ -43,13 +41,6 @@ typedef struct grpc_combiner grpc_combiner;
should be given to not delete said call/channel from this exec_ctx */
#define GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP 2
-extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx;
-
-gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
-grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
-grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
-
-namespace grpc_core {
/** Execution context.
* A bag of data that collects information along a callstack.
* Generally created at public API entry points, and passed down as
@@ -70,130 +61,63 @@ namespace grpc_core {
* - Instances are always passed as the first argument to a function that
* takes it, and always as a pointer (grpc_exec_ctx is never copied).
*/
-class ExecCtx {
- public:
- /** Default Constructor */
-
- ExecCtx() : flags_(GRPC_EXEC_CTX_FLAG_IS_FINISHED) { Set(this); }
-
- /** Parameterised Constructor */
- ExecCtx(uintptr_t fl) : flags_(fl) { Set(this); }
-
- /** Destructor */
- ~ExecCtx() {
- flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
- Flush();
- Set(last_exec_ctx_);
- }
-
- /** Disallow copy and assignment operators */
- ExecCtx(const ExecCtx&) = delete;
- ExecCtx& operator=(const ExecCtx&) = delete;
-
- /** Return starting_cpu */
- unsigned starting_cpu() const { return starting_cpu_; }
-
- struct CombinerData {
- /* currently active combiner: updated only via combiner.c */
- grpc_combiner* active_combiner;
- /* last active combiner in the active combiner list */
- grpc_combiner* last_combiner;
- };
-
- /** Only to be used by grpc-combiner code */
- CombinerData* combiner_data() { return &combiner_data_; }
-
- /** Return pointer to grpc_closure_list */
- grpc_closure_list* closure_list() { return &closure_list_; }
-
- /** Return flags */
- uintptr_t flags() { return flags_; }
-
- /** Checks if there is work to be done */
- bool HasWork() {
- return combiner_data_.active_combiner != NULL ||
- !grpc_closure_list_empty(closure_list_);
- }
-
- /** Flush any work that has been enqueued onto this grpc_exec_ctx.
- * Caller must guarantee that no interfering locks are held.
- * Returns true if work was performed, false otherwise. */
- bool Flush();
-
- /** Returns true if we'd like to leave this execution context as soon as
-possible: useful for deciding whether to do something more or not depending
-on outside context */
- bool IsReadyToFinish() {
- if ((flags_ & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
- if (CheckReadyToFinish()) {
- flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
- return true;
- }
- return false;
- } else {
- return true;
- }
- }
-
- /** Returns the stored current time relative to start if valid,
- * otherwise refreshes the stored time, sets it valid and returns the new
- * value */
- grpc_millis Now();
-
- /** Invalidates the stored time value. A new time value will be set on calling
- * Now() */
- void InvalidateNow() { now_is_valid_ = false; }
-
- /** To be used only by shutdown code in iomgr */
- void SetNowIomgrShutdown() {
- now_ = GRPC_MILLIS_INF_FUTURE;
- now_is_valid_ = true;
- }
-
- /** To be used only for testing.
- * Sets the now value
- */
- void TestOnlySetNow(grpc_millis new_val) {
- now_ = new_val;
- now_is_valid_ = true;
- }
-
- /** Finish any pending work for a grpc_exec_ctx. Must be called before
- * the instance is destroyed, or work may be lost. */
- void Finish();
-
- /** Global initialization for ExecCtx. Called by iomgr */
- static void GlobalInit(void);
-
- /** Global shutdown for ExecCtx. Called by iomgr */
- static void GlobalShutdown(void) { gpr_tls_destroy(&exec_ctx_); }
-
- /** Gets pointer to current exec_ctx */
- static ExecCtx* Get() {
- return reinterpret_cast<ExecCtx*>(gpr_tls_get(&exec_ctx_));
- }
-
- protected:
- /** Check if ready to finish */
- virtual bool CheckReadyToFinish() { return false; }
+struct grpc_exec_ctx {
+ grpc_closure_list closure_list;
+ /** currently active combiner: updated only via combiner.c */
+ grpc_combiner* active_combiner;
+ /** last active combiner in the active combiner list */
+ grpc_combiner* last_combiner;
+ uintptr_t flags;
+ unsigned starting_cpu;
+ void* check_ready_to_finish_arg;
+ bool (*check_ready_to_finish)(grpc_exec_ctx* exec_ctx, void* arg);
+
+ bool now_is_valid;
+ grpc_millis now;
+};
- private:
- /** Set exec_ctx_ to exec_ctx */
- void Set(ExecCtx* exec_ctx) {
- gpr_tls_set(&exec_ctx_, reinterpret_cast<intptr_t>(exec_ctx));
+/* initializer for grpc_exec_ctx:
+ prefer to use GRPC_EXEC_CTX_INIT whenever possible */
+#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
+ { \
+ GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \
+ finish_check_arg, finish_check, false, 0 \
}
- grpc_closure_list closure_list_ = GRPC_CLOSURE_LIST_INIT;
- CombinerData combiner_data_ = {nullptr, nullptr};
- uintptr_t flags_;
- unsigned starting_cpu_ = gpr_cpu_current_cpu();
+/* initialize an execution context at the top level of an API call into grpc
+ (this is safe to use elsewhere, though possibly not as efficient) */
+#define GRPC_EXEC_CTX_INIT \
+ GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL)
- bool now_is_valid_ = false;
- grpc_millis now_ = 0;
+extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx;
- GPR_TLS_CLASS_DECL(exec_ctx_);
- ExecCtx* last_exec_ctx_ = Get();
-};
-} // namespace grpc_core
+bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx);
+
+/** Flush any work that has been enqueued onto this grpc_exec_ctx.
+ * Caller must guarantee that no interfering locks are held.
+ * Returns true if work was performed, false otherwise. */
+bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx);
+/** Finish any pending work for a grpc_exec_ctx. Must be called before
+ * the instance is destroyed, or work may be lost. */
+void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx);
+/** Returns true if we'd like to leave this execution context as soon as
+ possible: useful for deciding whether to do something more or not depending
+ on outside context */
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx);
+/** A finish check that is never ready to finish */
+bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
+/** A finish check that is always ready to finish */
+bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
+
+void grpc_exec_ctx_global_init(void);
+
+void grpc_exec_ctx_global_init(void);
+void grpc_exec_ctx_global_shutdown(void);
+
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx);
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx);
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
#endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index b45223ce16..fabdbdf934 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -55,7 +55,7 @@ grpc_core::TraceFlag executor_trace(false, "executor");
static void executor_thread(void* arg);
-static size_t run_closures(grpc_closure_list list) {
+static size_t run_closures(grpc_exec_ctx* exec_ctx, grpc_closure_list list) {
size_t n = 0;
grpc_closure* c = list.head;
@@ -73,11 +73,11 @@ static size_t run_closures(grpc_closure_list list) {
#ifndef NDEBUG
c->scheduled = false;
#endif
- c->cb(c->cb_arg, error);
+ c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
n++;
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
}
return n;
@@ -87,7 +87,7 @@ bool grpc_executor_is_threaded() {
return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
}
-void grpc_executor_set_threading(bool threading) {
+void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool threading) {
gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
if (threading) {
if (cur_threads > 0) return;
@@ -125,25 +125,28 @@ void grpc_executor_set_threading(bool threading) {
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_destroy(&g_thread_state[i].mu);
gpr_cv_destroy(&g_thread_state[i].cv);
- run_closures(g_thread_state[i].elems);
+ run_closures(exec_ctx, g_thread_state[i].elems);
}
gpr_free(g_thread_state);
gpr_tls_destroy(&g_this_thread_state);
}
}
-void grpc_executor_init() {
+void grpc_executor_init(grpc_exec_ctx* exec_ctx) {
gpr_atm_no_barrier_store(&g_cur_threads, 0);
- grpc_executor_set_threading(true);
+ grpc_executor_set_threading(exec_ctx, true);
}
-void grpc_executor_shutdown() { grpc_executor_set_threading(false); }
+void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx) {
+ grpc_executor_set_threading(exec_ctx, false);
+}
static void executor_thread(void* arg) {
thread_state* ts = (thread_state*)arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
- grpc_core::ExecCtx exec_ctx(0);
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, nullptr);
size_t subtract_depth = 0;
for (;;) {
@@ -165,7 +168,7 @@ static void executor_thread(void* arg) {
gpr_mu_unlock(&ts->mu);
break;
}
- GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
+ GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
grpc_closure_list exec = ts->elems;
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
@@ -173,18 +176,19 @@ static void executor_thread(void* arg) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
- grpc_core::ExecCtx::Get()->InvalidateNow();
- subtract_depth = run_closures(exec);
+ grpc_exec_ctx_invalidate_now(&exec_ctx);
+ subtract_depth = run_closures(&exec_ctx, exec);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void executor_push(grpc_closure* closure, grpc_error* error,
- bool is_short) {
+static void executor_push(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error, bool is_short) {
bool retry_push;
if (is_short) {
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
} else {
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
do {
retry_push = false;
@@ -198,16 +202,14 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
#endif
}
- grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
- closure, error);
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
thread_state* ts = (thread_state*)gpr_tls_get(&g_this_thread_state);
if (ts == nullptr) {
- ts = &g_thread_state[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
- cur_thread_count)];
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
thread_state* orig_ts = ts;
@@ -243,7 +245,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
continue;
}
if (grpc_closure_list_empty(ts->elems)) {
- GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
+ GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
@@ -267,17 +269,19 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
gpr_spinlock_unlock(&g_adding_thread_lock);
}
if (retry_push) {
- GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
+ GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
}
} while (retry_push);
}
-static void executor_push_short(grpc_closure* closure, grpc_error* error) {
- executor_push(closure, error, true);
+static void executor_push_short(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
+ executor_push(exec_ctx, closure, error, true);
}
-static void executor_push_long(grpc_closure* closure, grpc_error* error) {
- executor_push(closure, error, false);
+static void executor_push_long(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
+ executor_push(exec_ctx, closure, error, false);
}
static const grpc_closure_scheduler_vtable executor_vtable_short = {
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index e16f11aa21..d349083eeb 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -31,18 +31,18 @@ typedef enum {
* This mechanism is meant to outsource work (grpc_closure instances) to a
* thread, for those cases where blocking isn't an option but there isn't a
* non-blocking solution available. */
-void grpc_executor_init();
+void grpc_executor_init(grpc_exec_ctx* exec_ctx);
grpc_closure_scheduler* grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
-void grpc_executor_shutdown();
+void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx);
/** Is the executor multi-threaded? */
bool grpc_executor_is_threaded();
/* enable/disable threading - must be called after grpc_executor_init and before
grpc_executor_shutdown */
-void grpc_executor_set_threading(bool enable);
+void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool enable);
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
diff --git a/src/core/lib/iomgr/fork_posix.cc b/src/core/lib/iomgr/fork_posix.cc
index cc131408af..f3cfd141b6 100644
--- a/src/core/lib/iomgr/fork_posix.cc
+++ b/src/core/lib/iomgr/fork_posix.cc
@@ -49,10 +49,10 @@ void grpc_prefork() {
return;
}
if (grpc_is_initialized()) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_timer_manager_set_threading(false);
- grpc_executor_set_threading(false);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_executor_set_threading(&exec_ctx, false);
+ grpc_exec_ctx_finish(&exec_ctx);
if (!gpr_await_threads(
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_seconds(3, GPR_TIMESPAN)))) {
@@ -64,17 +64,18 @@ void grpc_prefork() {
void grpc_postfork_parent() {
if (grpc_is_initialized()) {
grpc_timer_manager_set_threading(true);
- grpc_core::ExecCtx exec_ctx;
- grpc_executor_set_threading(true);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_executor_set_threading(&exec_ctx, true);
+ grpc_exec_ctx_finish(&exec_ctx);
}
}
void grpc_postfork_child() {
if (grpc_is_initialized()) {
grpc_timer_manager_set_threading(true);
- grpc_core::ExecCtx exec_ctx;
- grpc_executor_set_threading(true);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_executor_set_threading(&exec_ctx, true);
+ grpc_exec_ctx_finish(&exec_ctx);
}
}
diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc
index 0b6e6823b3..6bbe5669c7 100644
--- a/src/core/lib/iomgr/iocp_windows.cc
+++ b/src/core/lib/iomgr/iocp_windows.cc
@@ -42,18 +42,20 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static DWORD deadline_to_millis_timeout(grpc_millis deadline) {
+static DWORD deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
+ grpc_millis deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) {
return INFINITE;
}
- grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
if (deadline < now) return 0;
grpc_millis timeout = deadline - now;
if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE;
return static_cast<DWORD>(deadline - now);
}
-grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) {
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
+ grpc_millis deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@@ -61,11 +63,11 @@ grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) {
LPOVERLAPPED overlapped;
grpc_winsocket* socket;
grpc_winsocket_callback_info* info;
- GRPC_STATS_INC_SYSCALL_POLL();
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
success =
GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
- deadline_to_millis_timeout(deadline));
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ deadline_to_millis_timeout(exec_ctx, deadline));
+ grpc_exec_ctx_invalidate_now(exec_ctx);
if (success == 0 && overlapped == NULL) {
return GRPC_IOCP_WORK_TIMEOUT;
}
@@ -93,7 +95,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) {
info->bytes_transfered = bytes;
info->wsa_error = success ? 0 : WSAGetLastError();
GPR_ASSERT(overlapped == &info->overlapped);
- grpc_socket_become_ready(socket, info);
+ grpc_socket_become_ready(exec_ctx, socket, info);
return GRPC_IOCP_WORK_WORK;
}
@@ -113,22 +115,22 @@ void grpc_iocp_kick(void) {
}
void grpc_iocp_flush(void) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iocp_work_status work_status;
do {
- work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST);
+ work_status = grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_PAST);
} while (work_status == GRPC_IOCP_WORK_KICK ||
- grpc_core::ExecCtx::Get()->Flush());
+ grpc_exec_ctx_flush(&exec_ctx));
}
void grpc_iocp_shutdown(void) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (gpr_atm_acq_load(&g_custom_events)) {
- grpc_iocp_work(GRPC_MILLIS_INF_FUTURE);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_FUTURE);
+ grpc_exec_ctx_flush(&exec_ctx);
}
-
+ grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(CloseHandle(g_iocp));
}
diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h
index 75b0ff4a92..0e9c3481f7 100644
--- a/src/core/lib/iomgr/iocp_windows.h
+++ b/src/core/lib/iomgr/iocp_windows.h
@@ -33,7 +33,8 @@ typedef enum {
GRPC_IOCP_WORK_KICK
} grpc_iocp_work_status;
-grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline);
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
+ grpc_millis deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_flush(void);
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index dacf08ea9e..e077b35014 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -45,20 +45,20 @@ static gpr_cv g_rcv;
static int g_shutdown;
static grpc_iomgr_object g_root_object;
-void grpc_iomgr_init() {
- grpc_core::ExecCtx exec_ctx;
+void grpc_iomgr_init(grpc_exec_ctx* exec_ctx) {
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
- grpc_executor_init();
- grpc_timer_list_init();
+ grpc_exec_ctx_global_init();
+ grpc_executor_init(exec_ctx);
+ grpc_timer_list_init(exec_ctx);
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = (char*)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}
-void grpc_iomgr_start() { grpc_timer_manager_init(); }
+void grpc_iomgr_start(grpc_exec_ctx* exec_ctx) { grpc_timer_manager_init(); }
static size_t count_objects(void) {
grpc_iomgr_object* obj;
@@ -76,76 +76,75 @@ static void dump_objects(const char* kind) {
}
}
-void grpc_iomgr_shutdown() {
+void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx) {
gpr_timespec shutdown_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
- {
- grpc_timer_manager_shutdown();
- grpc_iomgr_platform_flush();
- grpc_executor_shutdown();
-
- gpr_mu_lock(&g_mu);
- g_shutdown = 1;
- while (g_root_object.next != &g_root_object) {
- if (gpr_time_cmp(
- gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
- gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
- if (g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG,
- "Waiting for %" PRIuPTR " iomgr objects to be destroyed",
- count_objects());
- }
- last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
+ grpc_timer_manager_shutdown();
+ grpc_iomgr_platform_flush();
+ grpc_executor_shutdown(exec_ctx);
+
+ gpr_mu_lock(&g_mu);
+ g_shutdown = 1;
+ while (g_root_object.next != &g_root_object) {
+ if (gpr_time_cmp(
+ gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+ if (g_root_object.next != &g_root_object) {
+ gpr_log(GPR_DEBUG,
+ "Waiting for %" PRIuPTR " iomgr objects to be destroyed",
+ count_objects());
}
- grpc_core::ExecCtx::Get()->SetNowIomgrShutdown();
- if (grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED) {
- gpr_mu_unlock(&g_mu);
- grpc_core::ExecCtx::Get()->Flush();
- grpc_iomgr_platform_flush();
- gpr_mu_lock(&g_mu);
- continue;
+ last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
+ }
+ exec_ctx->now_is_valid = true;
+ exec_ctx->now = GRPC_MILLIS_INF_FUTURE;
+ if (grpc_timer_check(exec_ctx, nullptr) == GRPC_TIMERS_FIRED) {
+ gpr_mu_unlock(&g_mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ grpc_iomgr_platform_flush();
+ gpr_mu_lock(&g_mu);
+ continue;
+ }
+ if (g_root_object.next != &g_root_object) {
+ if (grpc_iomgr_abort_on_leaks()) {
+ gpr_log(GPR_DEBUG,
+ "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
+ abort();
}
- if (g_root_object.next != &g_root_object) {
- if (grpc_iomgr_abort_on_leaks()) {
- gpr_log(GPR_DEBUG,
- "Failed to free %" PRIuPTR
- " iomgr objects before shutdown deadline: "
- "memory leaks are likely",
- count_objects());
- dump_objects("LEAKED");
- abort();
- }
- gpr_timespec short_deadline =
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(100, GPR_TIMESPAN));
- if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
- if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) >
- 0) {
- if (g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG,
- "Failed to free %" PRIuPTR
- " iomgr objects before shutdown deadline: "
- "memory leaks are likely",
- count_objects());
- dump_objects("LEAKED");
- }
- break;
+ gpr_timespec short_deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
+ if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
+ if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
+ if (g_root_object.next != &g_root_object) {
+ gpr_log(GPR_DEBUG,
+ "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
}
+ break;
}
}
}
- gpr_mu_unlock(&g_mu);
- grpc_timer_list_shutdown();
- grpc_core::ExecCtx::Get()->Flush();
}
+ gpr_mu_unlock(&g_mu);
+
+ grpc_timer_list_shutdown(exec_ctx);
+ grpc_exec_ctx_flush(exec_ctx);
/* ensure all threads have left g_mu */
gpr_mu_lock(&g_mu);
gpr_mu_unlock(&g_mu);
grpc_iomgr_platform_shutdown();
+ grpc_exec_ctx_global_shutdown();
grpc_network_status_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv);
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 3f238c660a..2f00c0343d 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -23,13 +23,13 @@
#include "src/core/lib/iomgr/port.h"
/** Initializes the iomgr. */
-void grpc_iomgr_init();
+void grpc_iomgr_init(grpc_exec_ctx* exec_ctx);
/** Starts any background threads for iomgr. */
-void grpc_iomgr_start();
+void grpc_iomgr_start(grpc_exec_ctx* exec_ctx);
/** Signals the intention to shutdown the iomgr. Expects to be able to flush
* exec_ctx. */
-void grpc_iomgr_shutdown();
+void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx);
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_H */
diff --git a/src/core/lib/iomgr/iomgr_uv.cc b/src/core/lib/iomgr/iomgr_uv.cc
index 9614c2e664..b8a10f2ae8 100644
--- a/src/core/lib/iomgr/iomgr_uv.cc
+++ b/src/core/lib/iomgr/iomgr_uv.cc
@@ -29,11 +29,12 @@
gpr_thd_id g_init_thread;
void grpc_iomgr_platform_init(void) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_global_init();
- grpc_executor_set_threading(false);
+ grpc_executor_set_threading(&exec_ctx, false);
g_init_thread = gpr_thd_currentid();
+ grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_iomgr_platform_flush(void) {}
void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index 7b194e3db5..f0e798e8d8 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -85,7 +85,7 @@ void LockfreeEvent::DestroyEvent() {
kShutdownBit /* shutdown, no error */));
}
-void LockfreeEvent::NotifyOn(grpc_closure* closure) {
+void LockfreeEvent::NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (grpc_polling_trace.enabled()) {
@@ -118,7 +118,7 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
closure when transitioning out of CLOSURE_NO_READY state (i.e there
is no other code that needs to 'happen-after' this) */
if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) {
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
return; /* Successful. Return */
}
@@ -131,7 +131,7 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
schedule the closure with the shutdown error */
if ((curr & kShutdownBit) > 0) {
grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
- GRPC_CLOSURE_SCHED(closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return;
@@ -149,7 +149,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
GPR_UNREACHABLE_CODE(return );
}
-bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) {
+bool LockfreeEvent::SetShutdown(grpc_exec_ctx* exec_ctx,
+ grpc_error* shutdown_err) {
gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit;
while (true) {
@@ -183,7 +184,7 @@ bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) {
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(&state_, curr, new_state)) {
- GRPC_CLOSURE_SCHED((grpc_closure*)curr,
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return true;
@@ -199,7 +200,7 @@ bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) {
GPR_UNREACHABLE_CODE(return false);
}
-void LockfreeEvent::SetReady() {
+void LockfreeEvent::SetReady(grpc_exec_ctx* exec_ctx) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
@@ -233,7 +234,7 @@ void LockfreeEvent::SetReady() {
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
- GRPC_CLOSURE_SCHED((grpc_closure*)curr, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 3bd3fd72f1..aec67a3399 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -44,9 +44,9 @@ class LockfreeEvent {
return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0;
}
- void NotifyOn(grpc_closure* closure);
- bool SetShutdown(grpc_error* error);
- void SetReady();
+ void NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure);
+ bool SetShutdown(grpc_exec_ctx* exec_ctx, grpc_error* error);
+ void SetReady(grpc_exec_ctx* exec_ctx);
private:
enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 };
diff --git a/src/core/lib/iomgr/polling_entity.cc b/src/core/lib/iomgr/polling_entity.cc
index 126f6f45d6..0ee4ea1255 100644
--- a/src/core/lib/iomgr/polling_entity.cc
+++ b/src/core/lib/iomgr/polling_entity.cc
@@ -56,28 +56,32 @@ bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent) {
return pollent->tag == GRPC_POLLS_NONE;
}
-void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
grpc_pollset_set* pss_dst) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != nullptr);
- grpc_pollset_set_add_pollset(pss_dst, pollent->pollent.pollset);
+ grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != nullptr);
- grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
+ grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
+ pollent->pollent.pollset_set);
} else {
gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
abort();
}
}
-void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
grpc_pollset_set* pss_dst) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != nullptr);
- grpc_pollset_set_del_pollset(pss_dst, pollent->pollent.pollset);
+ grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
} else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != nullptr);
- grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
+ grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
+ pollent->pollent.pollset_set);
} else {
gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
abort();
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 0102d32c11..dbe579e60d 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -55,12 +55,14 @@ bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent);
/** Add the pollset or pollset_set in \a pollent to the destination pollset_set
* \a * pss_dst */
-void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
grpc_pollset_set* pss_dst);
/** Delete the pollset or pollset_set in \a pollent from the destination
* pollset_set \a * pss_dst */
-void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
grpc_pollset_set* pss_dst);
#endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 6bb3cd3e0c..d5d78f3101 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -42,8 +42,9 @@ size_t grpc_pollset_size(void);
void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu);
/* Begin shutting down the pollset, and call closure when done.
* pollset's mutex must be held */
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure);
-void grpc_pollset_destroy(grpc_pollset* pollset);
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure);
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
@@ -67,13 +68,13 @@ void grpc_pollset_destroy(grpc_pollset* pollset);
May call grpc_closure_list_run on grpc_closure_list, without holding the
pollset
lock */
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_millis deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
-grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker)
GRPC_MUST_USE_RESULT;
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index a94d0afe75..089c15cc94 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -29,14 +29,19 @@
typedef struct grpc_pollset_set grpc_pollset_set;
grpc_pollset_set* grpc_pollset_set_create(void);
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set);
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item);
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item);
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_H */
diff --git a/src/core/lib/iomgr/pollset_set_uv.cc b/src/core/lib/iomgr/pollset_set_uv.cc
index ac5dade8a5..90186edbb7 100644
--- a/src/core/lib/iomgr/pollset_set_uv.cc
+++ b/src/core/lib/iomgr/pollset_set_uv.cc
@@ -26,18 +26,23 @@ grpc_pollset_set* grpc_pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_set_windows.cc b/src/core/lib/iomgr/pollset_set_windows.cc
index 85edc9dee1..2105a47ad4 100644
--- a/src/core/lib/iomgr/pollset_set_windows.cc
+++ b/src/core/lib/iomgr/pollset_set_windows.cc
@@ -27,18 +27,23 @@ grpc_pollset_set* grpc_pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {}
-void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
-void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
-void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
-void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
grpc_pollset_set* item) {}
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index d9e5ad81be..16132f3a80 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -88,7 +88,8 @@ void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
pollset->shutting_down = 0;
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(!pollset->shutting_down);
GRPC_UV_ASSERT_SAME_THREAD();
pollset->shutting_down = 1;
@@ -99,10 +100,10 @@ void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
// kick the loop once
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
}
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_close((uv_handle_t*)pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
@@ -114,14 +115,14 @@ void grpc_pollset_destroy(grpc_pollset* pollset) {
}
}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
uint64_t timeout;
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_unlock(&grpc_polling_mu);
if (grpc_pollset_work_run_loop) {
- grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
if (deadline >= now) {
timeout = deadline - now;
} else {
@@ -139,14 +140,14 @@ grpc_error* grpc_pollset_work(grpc_pollset* pollset,
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
}
- if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) {
- grpc_core::ExecCtx::Get()->Flush();
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_exec_ctx_flush(exec_ctx);
}
gpr_mu_lock(&grpc_polling_mu);
return GRPC_ERROR_NONE;
}
-grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index 6ef949aad7..95dd7d7ddd 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -92,19 +92,20 @@ void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
&pollset->root_worker;
}
-void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
pollset->shutting_down = 1;
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
pollset->on_shutdown = closure;
}
}
-void grpc_pollset_destroy(grpc_pollset* pollset) {}
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {}
-grpc_error* grpc_pollset_work(grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
@@ -125,8 +126,8 @@ grpc_error* grpc_pollset_work(grpc_pollset* pollset,
pollset->is_iocp_worker = 1;
g_active_poller = &worker;
gpr_mu_unlock(&grpc_polling_mu);
- grpc_iocp_work(deadline);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_iocp_work(exec_ctx, deadline);
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&grpc_polling_mu);
pollset->is_iocp_worker = 0;
g_active_poller = NULL;
@@ -144,7 +145,7 @@ grpc_error* grpc_pollset_work(grpc_pollset* pollset,
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
- GRPC_CLOSURE_SCHED(pollset->on_shutdown, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
pollset->on_shutdown = NULL;
}
goto done;
@@ -157,18 +158,18 @@ grpc_error* grpc_pollset_work(grpc_pollset* pollset,
while (!worker.kicked) {
if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ grpc_exec_ctx_invalidate_now(exec_ctx);
break;
}
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
} else {
pollset->kicked_without_pollers = 0;
}
done:
- if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) {
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
gpr_mu_unlock(&grpc_polling_mu);
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&grpc_polling_mu);
}
if (added_worker) {
@@ -180,7 +181,7 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error* grpc_pollset_kick(grpc_pollset* p,
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
grpc_pollset_worker* specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@@ -208,7 +209,7 @@ grpc_error* grpc_pollset_kick(grpc_pollset* p,
specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
- grpc_pollset_kick(p, specific_worker);
+ grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 12fc2ed088..5105020404 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -38,7 +38,8 @@ typedef struct {
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
-extern void (*grpc_resolve_address)(const char* addr, const char* default_port,
+extern void (*grpc_resolve_address)(grpc_exec_ctx* exec_ctx, const char* addr,
+ const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addresses);
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index cc3d4fd7cf..fb5fa9d422 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -42,7 +42,6 @@
static grpc_error* blocking_resolve_address_impl(
const char* name, const char* default_port,
grpc_resolved_addresses** addresses) {
- grpc_core::ExecCtx exec_ctx;
struct addrinfo hints;
struct addrinfo *result = nullptr, *resp;
char* host;
@@ -82,7 +81,7 @@ static grpc_error* blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
/* Retry if well-known service name is recognized */
@@ -91,7 +90,7 @@ static grpc_error* blocking_resolve_address_impl(
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, svc[i][1], &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
break;
}
}
@@ -153,10 +152,12 @@ typedef struct {
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
-static void do_request_thread(void* rp, grpc_error* error) {
+static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
+ grpc_error* error) {
request* r = (request*)rp;
- GRPC_CLOSURE_SCHED(r->on_done, grpc_blocking_resolve_address(
- r->name, r->default_port, r->addrs_out));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, r->on_done,
+ grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
@@ -169,7 +170,8 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
gpr_free(addrs);
}
-static void resolve_address_impl(const char* name, const char* default_port,
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
@@ -180,11 +182,11 @@ static void resolve_address_impl(const char* name, const char* default_port,
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addrs_out = addrs;
- GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
- const char* name, const char* default_port,
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_resolved_addresses** addrs) = resolve_address_impl;
diff --git a/src/core/lib/iomgr/resolve_address_uv.cc b/src/core/lib/iomgr/resolve_address_uv.cc
index 3eab04f3de..6d09fd1d02 100644
--- a/src/core/lib/iomgr/resolve_address_uv.cc
+++ b/src/core/lib/iomgr/resolve_address_uv.cc
@@ -114,7 +114,7 @@ static grpc_error* handle_addrinfo_result(int status, struct addrinfo* result,
static void getaddrinfo_callback(uv_getaddrinfo_t* req, int status,
struct addrinfo* res) {
request* r = (request*)req->data;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error* error;
int retry_status;
char* port = r->port;
@@ -130,8 +130,8 @@ static void getaddrinfo_callback(uv_getaddrinfo_t* req, int status,
/* Either no retry was attempted, or the retry failed. Either way, the
original error probably has more interesting information */
error = handle_addrinfo_result(status, res, r->addresses);
- GRPC_CLOSURE_SCHED(r->on_done, error);
-
+ GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
+ grpc_exec_ctx_finish(&exec_ctx);
gpr_free(r->hints);
gpr_free(r->host);
gpr_free(r->port);
@@ -224,7 +224,8 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
gpr_free(addrs);
}
-static void resolve_address_impl(const char* name, const char* default_port,
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
@@ -238,7 +239,7 @@ static void resolve_address_impl(const char* name, const char* default_port,
GRPC_UV_ASSERT_SAME_THREAD();
err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) {
- GRPC_CLOSURE_SCHED(on_done, err);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
gpr_free(host);
gpr_free(port);
return;
@@ -267,7 +268,7 @@ static void resolve_address_impl(const char* name, const char* default_port,
err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(s)));
- GRPC_CLOSURE_SCHED(on_done, err);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
gpr_free(r);
gpr_free(req);
gpr_free(hints);
@@ -277,7 +278,7 @@ static void resolve_address_impl(const char* name, const char* default_port,
}
void (*grpc_resolve_address)(
- const char* name, const char* default_port,
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_resolved_addresses** addrs) = resolve_address_impl;
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index ccb1dae689..d9fc17a9f4 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -51,7 +51,6 @@ typedef struct {
static grpc_error* blocking_resolve_address_impl(
const char* name, const char* default_port,
grpc_resolved_addresses** addresses) {
- grpc_core::ExecCtx exec_ctx;
struct addrinfo hints;
struct addrinfo *result = NULL, *resp;
char* host;
@@ -88,7 +87,7 @@ static grpc_error* blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getaddrinfo");
goto done;
@@ -133,7 +132,8 @@ grpc_error* (*grpc_blocking_resolve_address)(
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
-static void do_request_thread(void* rp, grpc_error* error) {
+static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
+ grpc_error* error) {
request* r = (request*)rp;
if (error == GRPC_ERROR_NONE) {
error =
@@ -141,7 +141,7 @@ static void do_request_thread(void* rp, grpc_error* error) {
} else {
GRPC_ERROR_REF(error);
}
- GRPC_CLOSURE_SCHED(r->on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error);
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
@@ -154,7 +154,8 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
gpr_free(addrs);
}
-static void resolve_address_impl(const char* name, const char* default_port,
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addresses) {
@@ -165,11 +166,11 @@ static void resolve_address_impl(const char* name, const char* default_port,
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
r->addresses = addresses;
- GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
void (*grpc_resolve_address)(
- const char* name, const char* default_port,
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_resolved_addresses** addresses) = resolve_address_impl;
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index cabe28e4e6..ccd8d9f379 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -154,7 +154,8 @@ struct grpc_resource_quota {
char* name;
};
-static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount);
+static void ru_unref_by(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, gpr_atm amount);
/*******************************************************************************
* list management
@@ -238,31 +239,35 @@ static void rulist_remove(grpc_resource_user* resource_user, grpc_rulist list) {
* resource quota state machine
*/
-static bool rq_alloc(grpc_resource_quota* resource_quota);
+static bool rq_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota);
static bool rq_reclaim_from_per_user_free_pool(
- grpc_resource_quota* resource_quota);
-static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive);
+ grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota);
+static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota, bool destructive);
-static void rq_step(void* rq, grpc_error* error) {
+static void rq_step(grpc_exec_ctx* exec_ctx, void* rq, grpc_error* error) {
grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
resource_quota->step_scheduled = false;
do {
- if (rq_alloc(resource_quota)) goto done;
- } while (rq_reclaim_from_per_user_free_pool(resource_quota));
+ if (rq_alloc(exec_ctx, resource_quota)) goto done;
+ } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota));
- if (!rq_reclaim(resource_quota, false)) {
- rq_reclaim(resource_quota, true);
+ if (!rq_reclaim(exec_ctx, resource_quota, false)) {
+ rq_reclaim(exec_ctx, resource_quota, true);
}
done:
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
}
-static void rq_step_sched(grpc_resource_quota* resource_quota) {
+static void rq_step_sched(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true;
grpc_resource_quota_ref_internal(resource_quota);
- GRPC_CLOSURE_SCHED(&resource_quota->rq_step_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure,
+ GRPC_ERROR_NONE);
}
/* update the atomically available resource estimate - use no barriers since
@@ -281,7 +286,8 @@ static void rq_update_estimate(grpc_resource_quota* resource_quota) {
}
/* returns true if all allocations are completed */
-static bool rq_alloc(grpc_resource_quota* resource_quota) {
+static bool rq_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
grpc_resource_user* resource_user;
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
@@ -301,9 +307,9 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
int64_t aborted_allocations = resource_user->outstanding_allocations;
resource_user->outstanding_allocations = 0;
resource_user->free_pool += aborted_allocations;
- GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
- ru_unref_by(resource_user, (gpr_atm)aborted_allocations);
+ ru_unref_by(exec_ctx, resource_user, (gpr_atm)aborted_allocations);
continue;
}
if (resource_user->free_pool < 0 &&
@@ -327,7 +333,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
resource_user->outstanding_allocations = 0;
- GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
} else {
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@@ -340,7 +346,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
/* returns true if any memory could be reclaimed from buffers */
static bool rq_reclaim_from_per_user_free_pool(
- grpc_resource_quota* resource_quota) {
+ grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota) {
grpc_resource_user* resource_user;
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
@@ -367,7 +373,8 @@ static bool rq_reclaim_from_per_user_free_pool(
}
/* returns true if reclamation is proceeding */
-static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
+static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota, bool destructive) {
if (resource_quota->reclaiming) return true;
grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
: GRPC_RULIST_RECLAIMER_BENIGN;
@@ -385,7 +392,7 @@ static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
resource_quota->debug_only_last_initiated_reclaimer = c;
resource_user->reclaimers[destructive] = nullptr;
- GRPC_CLOSURE_RUN(c, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE);
return true;
}
@@ -405,10 +412,10 @@ static void ru_slice_ref(void* p) {
gpr_ref(&rc->refs);
}
-static void ru_slice_unref(void* p) {
+static void ru_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
ru_slice_refcount* rc = (ru_slice_refcount*)p;
if (gpr_unref(&rc->refs)) {
- grpc_resource_user_free(rc->resource_user, rc->size);
+ grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
}
}
@@ -438,57 +445,61 @@ static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
* the combiner
*/
-static void ru_allocate(void* ru, grpc_error* error) {
+static void ru_allocate(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
}
-static void ru_add_to_free_pool(void* ru, grpc_error* error) {
+static void ru_add_to_free_pool(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
}
-static bool ru_post_reclaimer(grpc_resource_user* resource_user,
+static bool ru_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
bool destructive) {
grpc_closure* closure = resource_user->new_reclaimers[destructive];
GPR_ASSERT(closure != nullptr);
resource_user->new_reclaimers[destructive] = nullptr;
GPR_ASSERT(resource_user->reclaimers[destructive] == nullptr);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED);
return false;
}
resource_user->reclaimers[destructive] = closure;
return true;
}
-static void ru_post_benign_reclaimer(void* ru, grpc_error* error) {
+static void ru_post_benign_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
- if (!ru_post_reclaimer(resource_user, false)) return;
+ if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
rulist_empty(resource_user->resource_quota,
GRPC_RULIST_RECLAIMER_BENIGN)) {
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
}
-static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
+static void ru_post_destructive_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
- if (!ru_post_reclaimer(resource_user, true)) return;
+ if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -497,46 +508,51 @@ static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
GRPC_RULIST_RECLAIMER_BENIGN) &&
rulist_empty(resource_user->resource_quota,
GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
}
-static void ru_shutdown(void* ru, grpc_error* error) {
+static void ru_shutdown(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
}
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
- GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
- GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED);
resource_user->reclaimers[0] = nullptr;
resource_user->reclaimers[1] = nullptr;
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
if (resource_user->allocating) {
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
}
-static void ru_destroy(void* ru, grpc_error* error) {
+static void ru_destroy(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
grpc_resource_user* resource_user = (grpc_resource_user*)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
}
- GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
- GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED);
if (resource_user->free_pool != 0) {
resource_user->resource_quota->free_pool += resource_user->free_pool;
- rq_step_sched(resource_user->resource_quota);
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
}
- grpc_resource_quota_unref_internal(resource_user->resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_user->resource_quota);
gpr_mu_destroy(&resource_user->mu);
gpr_free(resource_user->name);
gpr_free(resource_user);
}
-static void ru_allocated_slices(void* arg, grpc_error* error) {
+static void ru_allocated_slices(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
grpc_resource_user_slice_allocator* slice_allocator =
(grpc_resource_user_slice_allocator*)arg;
if (error == GRPC_ERROR_NONE) {
@@ -546,7 +562,7 @@ static void ru_allocated_slices(void* arg, grpc_error* error) {
slice_allocator->length));
}
}
- GRPC_CLOSURE_RUN(&slice_allocator->on_done, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
}
/*******************************************************************************
@@ -560,22 +576,23 @@ typedef struct {
grpc_closure closure;
} rq_resize_args;
-static void rq_resize(void* args, grpc_error* error) {
+static void rq_resize(grpc_exec_ctx* exec_ctx, void* args, grpc_error* error) {
rq_resize_args* a = (rq_resize_args*)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
rq_update_estimate(a->resource_quota);
- rq_step_sched(a->resource_quota);
- grpc_resource_quota_unref_internal(a->resource_quota);
+ rq_step_sched(exec_ctx, a->resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, a->resource_quota);
gpr_free(a);
}
-static void rq_reclamation_done(void* rq, grpc_error* error) {
+static void rq_reclamation_done(grpc_exec_ctx* exec_ctx, void* rq,
+ grpc_error* error) {
grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
resource_quota->reclaiming = false;
- rq_step_sched(resource_quota);
- grpc_resource_quota_unref_internal(resource_quota);
+ rq_step_sched(exec_ctx, resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
}
/*******************************************************************************
@@ -611,9 +628,10 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
return resource_quota;
}
-void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
+void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
- GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
+ GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota");
gpr_free(resource_quota->name);
gpr_free(resource_quota);
}
@@ -621,8 +639,9 @@ void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
/* Public API */
void grpc_resource_quota_unref(grpc_resource_quota* resource_quota) {
- grpc_core::ExecCtx exec_ctx;
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+ grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota* grpc_resource_quota_ref_internal(
@@ -646,14 +665,15 @@ double grpc_resource_quota_get_memory_pressure(
/* Public API */
void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
size_t size) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
rq_resize_args* a = (rq_resize_args*)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
(gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_SCHED(&a->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
+ grpc_exec_ctx_finish(&exec_ctx);
}
size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
@@ -684,8 +704,8 @@ static void* rq_copy(void* rq) {
return rq;
}
-static void rq_destroy(void* rq) {
- grpc_resource_quota_unref_internal((grpc_resource_quota*)rq);
+static void rq_destroy(grpc_exec_ctx* exec_ctx, void* rq) {
+ grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota*)rq);
}
static int rq_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
@@ -753,12 +773,14 @@ static void ru_ref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0);
}
-static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) {
+static void ru_unref_by(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
if (old == amount) {
- GRPC_CLOSURE_SCHED(&resource_user->destroy_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure,
+ GRPC_ERROR_NONE);
}
}
@@ -766,13 +788,16 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user) {
ru_ref_by(resource_user, 1);
}
-void grpc_resource_user_unref(grpc_resource_user* resource_user) {
- ru_unref_by(resource_user, 1);
+void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
+ ru_unref_by(exec_ctx, resource_user, 1);
}
-void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
+void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
GRPC_CLOSURE_SCHED(
+ exec_ctx,
GRPC_CLOSURE_CREATE(
ru_shutdown, resource_user,
grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
@@ -780,7 +805,8 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
}
}
-void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
+void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done) {
gpr_mu_lock(&resource_user->mu);
ru_ref_by(resource_user, (gpr_atm)size);
@@ -796,16 +822,18 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
GRPC_ERROR_NONE);
if (!resource_user->allocating) {
resource_user->allocating = true;
- GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure,
+ GRPC_ERROR_NONE);
}
} else {
resource_user->outstanding_allocations -= (int64_t)size;
- GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
}
-void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
+void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += (int64_t)size;
@@ -818,29 +846,32 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) {
resource_user->added_to_free_pool = true;
- GRPC_CLOSURE_SCHED(&resource_user->add_to_free_pool_closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure,
GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
- ru_unref_by(resource_user, (gpr_atm)size);
+ ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
}
-void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
bool destructive,
grpc_closure* closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == nullptr);
resource_user->new_reclaimers[destructive] = closure;
- GRPC_CLOSURE_SCHED(&resource_user->post_reclaimer_closure[destructive],
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ &resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE);
}
-void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
GRPC_CLOSURE_SCHED(
- &resource_user->resource_quota->rq_reclamation_done_closure,
+ exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
GRPC_ERROR_NONE);
}
@@ -855,11 +886,12 @@ void grpc_resource_user_slice_allocator_init(
}
void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx* exec_ctx,
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest) {
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
- grpc_resource_user_alloc(slice_allocator->resource_user, count * length,
- &slice_allocator->on_allocated);
+ grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
+ count * length, &slice_allocator->on_allocated);
}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 39e3aabf18..787370307a 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -65,7 +65,8 @@ extern grpc_core::TraceFlag grpc_resource_quota_trace;
grpc_resource_quota* grpc_resource_quota_ref_internal(
grpc_resource_quota* resource_quota);
-void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota);
+void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota);
grpc_resource_quota* grpc_resource_quota_from_channel_args(
const grpc_channel_args* channel_args);
@@ -88,26 +89,32 @@ grpc_resource_quota* grpc_resource_user_quota(
grpc_resource_user* resource_user);
void grpc_resource_user_ref(grpc_resource_user* resource_user);
-void grpc_resource_user_unref(grpc_resource_user* resource_user);
-void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
+void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
+void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
/* Allocate from the resource user (and its quota).
If optional_on_done is NULL, then allocate immediately. This may push the
quota over-limit, at which point reclamation will kick in.
If optional_on_done is non-NULL, it will be scheduled when the allocation has
been granted by the quota. */
-void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
+void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done);
/* Release memory back to the quota */
-void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size);
+void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size);
/* Post a memory reclaimer to the resource user. Only one benign and one
destructive reclaimer can be posted at once. When executed, the reclaimer
MUST call grpc_resource_user_finish_reclamation before it completes, to
return control to the resource quota. */
-void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
bool destructive, grpc_closure* closure);
/* Finish a reclamation step */
-void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user);
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
/* Helper to allocate slices from a resource user */
typedef struct grpc_resource_user_slice_allocator {
@@ -134,11 +141,13 @@ void grpc_resource_user_slice_allocator_init(
/* Allocate \a count slices of length \a length into \a dest. Only one request
can be outstanding at a time. */
void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx* exec_ctx,
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest);
/* Allocate one slice of length \a size synchronously. */
-grpc_slice grpc_resource_user_slice_malloc(grpc_resource_user* resource_user,
+grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
size_t size);
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/socket_factory_posix.cc b/src/core/lib/iomgr/socket_factory_posix.cc
index bc7d0b12f3..40bfecd4c2 100644
--- a/src/core/lib/iomgr/socket_factory_posix.cc
+++ b/src/core/lib/iomgr/socket_factory_posix.cc
@@ -72,7 +72,7 @@ static void* socket_factory_arg_copy(void* p) {
return grpc_socket_factory_ref((grpc_socket_factory*)p);
}
-static void socket_factory_arg_destroy(void* p) {
+static void socket_factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
grpc_socket_factory_unref((grpc_socket_factory*)p);
}
diff --git a/src/core/lib/iomgr/socket_mutator.cc b/src/core/lib/iomgr/socket_mutator.cc
index 9d30e46b6b..ff6c0c70d8 100644
--- a/src/core/lib/iomgr/socket_mutator.cc
+++ b/src/core/lib/iomgr/socket_mutator.cc
@@ -63,7 +63,7 @@ static void* socket_mutator_arg_copy(void* p) {
return grpc_socket_mutator_ref((grpc_socket_mutator*)p);
}
-static void socket_mutator_arg_destroy(void* p) {
+static void socket_mutator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
grpc_socket_mutator_unref((grpc_socket_mutator*)p);
}
diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc
index 9bb6a75dd8..aee80f4b4c 100644
--- a/src/core/lib/iomgr/socket_windows.cc
+++ b/src/core/lib/iomgr/socket_windows.cc
@@ -109,34 +109,37 @@ void grpc_winsocket_destroy(grpc_winsocket* winsocket) {
-) The IOCP already completed in the background, and we need to call
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
-static void socket_notify_on_iocp(grpc_winsocket* socket, grpc_closure* closure,
+static void socket_notify_on_iocp(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* socket, grpc_closure* closure,
grpc_winsocket_callback_info* info) {
GPR_ASSERT(info->closure == NULL);
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
info->has_pending_iocp = 0;
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
info->closure = closure;
}
gpr_mu_unlock(&socket->state_mu);
}
-void grpc_socket_notify_on_write(grpc_winsocket* socket,
+void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* socket,
grpc_closure* closure) {
- socket_notify_on_iocp(socket, closure, &socket->write_info);
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info);
}
-void grpc_socket_notify_on_read(grpc_winsocket* socket, grpc_closure* closure) {
- socket_notify_on_iocp(socket, closure, &socket->read_info);
+void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+ grpc_closure* closure) {
+ socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info);
}
-void grpc_socket_become_ready(grpc_winsocket* socket,
+void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
grpc_winsocket_callback_info* info) {
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
if (info->closure) {
- GRPC_CLOSURE_SCHED(info->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE);
info->closure = NULL;
} else {
info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h
index cb28f2b8df..04e0a89d70 100644
--- a/src/core/lib/iomgr/socket_windows.h
+++ b/src/core/lib/iomgr/socket_windows.h
@@ -98,13 +98,16 @@ void grpc_winsocket_shutdown(grpc_winsocket* socket);
/* Destroy a socket. Should only be called if there's no pending operation. */
void grpc_winsocket_destroy(grpc_winsocket* socket);
-void grpc_socket_notify_on_write(grpc_winsocket* winsocket,
+void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
grpc_closure* closure);
-void grpc_socket_notify_on_read(grpc_winsocket* winsocket,
+void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
grpc_closure* closure);
-void grpc_socket_become_ready(grpc_winsocket* winsocket,
+void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
grpc_winsocket_callback_info* ci);
#endif
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index 5f55d30955..75e2fe0f36 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -30,7 +30,8 @@
NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
-void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* on_connect,
+ grpc_endpoint** endpoint,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 15062a52cd..4cb2ac49d5 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -96,7 +96,7 @@ done:
return err;
}
-static void tc_on_alarm(void* acp, grpc_error* error) {
+static void tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
int done;
async_connect* ac = (async_connect*)acp;
if (grpc_tcp_trace.enabled()) {
@@ -107,24 +107,26 @@ static void tc_on_alarm(void* acp, grpc_error* error) {
gpr_mu_lock(&ac->mu);
if (ac->fd != nullptr) {
grpc_fd_shutdown(
- ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
+ exec_ctx, ac->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
- grpc_channel_args_destroy(ac->channel_args);
+ grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_free(ac);
}
}
grpc_endpoint* grpc_tcp_client_create_from_fd(
- grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str) {
- return grpc_tcp_create(fd, channel_args, addr_str);
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
+ const char* addr_str) {
+ return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str);
}
-static void on_writable(void* acp, grpc_error* error) {
+static void on_writable(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
async_connect* ac = (async_connect*)acp;
int so_error = 0;
socklen_t so_error_size;
@@ -148,7 +150,7 @@ static void on_writable(void* acp, grpc_error* error) {
ac->fd = nullptr;
gpr_mu_unlock(&ac->mu);
- grpc_timer_cancel(&ac->alarm);
+ grpc_timer_cancel(exec_ctx, &ac->alarm);
gpr_mu_lock(&ac->mu);
if (error != GRPC_ERROR_NONE) {
@@ -170,8 +172,9 @@ static void on_writable(void* acp, grpc_error* error) {
switch (so_error) {
case 0:
- grpc_pollset_set_del_fd(ac->interested_parties, fd);
- *ep = grpc_tcp_client_create_from_fd(fd, ac->channel_args, ac->addr_str);
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
+ ac->addr_str);
fd = nullptr;
break;
case ENOBUFS:
@@ -191,7 +194,7 @@ static void on_writable(void* acp, grpc_error* error) {
don't do that! */
gpr_log(GPR_ERROR, "kernel out of buffers");
gpr_mu_unlock(&ac->mu);
- grpc_fd_notify_on_write(fd, &ac->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure);
return;
case ECONNREFUSED:
/* This error shouldn't happen for anything other than connect(). */
@@ -206,8 +209,8 @@ static void on_writable(void* acp, grpc_error* error) {
finish:
if (fd != nullptr) {
- grpc_pollset_set_del_fd(ac->interested_parties, fd);
- grpc_fd_orphan(fd, nullptr, nullptr, false /* already_closed */,
+ grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
+ grpc_fd_orphan(exec_ctx, fd, nullptr, nullptr, false /* already_closed */,
"tcp_client_orphan");
fd = nullptr;
}
@@ -230,13 +233,14 @@ finish:
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
- grpc_channel_args_destroy(ac->channel_args);
+ grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_free(ac);
}
- GRPC_CLOSURE_SCHED(closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
}
-static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
+static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
@@ -261,7 +265,7 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) {
- GRPC_CLOSURE_SCHED(closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return;
}
if (dsmode == GRPC_DSMODE_IPV4) {
@@ -270,7 +274,7 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
addr = &addr4_copy;
}
if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
- GRPC_CLOSURE_SCHED(closure, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
return;
}
@@ -285,19 +289,20 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- *ep = grpc_tcp_client_create_from_fd(fdobj, channel_args, addr_str);
- GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+ *ep =
+ grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
- grpc_fd_orphan(fdobj, nullptr, nullptr, false /* already_closed */,
- "tcp_client_connect_error");
- GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect"));
+ grpc_fd_orphan(exec_ctx, fdobj, nullptr, nullptr,
+ false /* already_closed */, "tcp_client_connect_error");
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
goto done;
}
- grpc_pollset_set_add_fd(interested_parties, fdobj);
+ grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
ac = (async_connect*)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
@@ -319,8 +324,8 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
gpr_mu_lock(&ac->mu);
GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm);
- grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
+ grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
done:
@@ -330,18 +335,19 @@ done:
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index 7d0f133a6b..2b1fe79e90 100644
--- a/src/core/lib/iomgr/tcp_client_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -24,6 +24,7 @@
#include "src/core/lib/iomgr/tcp_client.h"
grpc_endpoint* grpc_tcp_client_create_from_fd(
- grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str);
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
+ const char* addr_str);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
index 4e9c7cc11d..5cca0c9936 100644
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -46,15 +46,17 @@ typedef struct grpc_uv_tcp_connect {
grpc_resource_quota* resource_quota;
} grpc_uv_tcp_connect;
-static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect* connect) {
- grpc_resource_quota_unref_internal(connect->resource_quota);
+static void uv_tcp_connect_cleanup(grpc_exec_ctx* exec_ctx,
+ grpc_uv_tcp_connect* connect) {
+ grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota);
gpr_free(connect->addr_name);
gpr_free(connect);
}
static void tcp_close_callback(uv_handle_t* handle) { gpr_free(handle); }
-static void uv_tc_on_alarm(void* acp, grpc_error* error) {
+static void uv_tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp,
+ grpc_error* error) {
int done;
grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)acp;
if (grpc_tcp_trace.enabled()) {
@@ -70,17 +72,17 @@ static void uv_tc_on_alarm(void* acp, grpc_error* error) {
}
done = (--connect->refs == 0);
if (done) {
- uv_tcp_connect_cleanup(connect);
+ uv_tcp_connect_cleanup(exec_ctx, connect);
}
}
static void uv_tc_on_connect(uv_connect_t* req, int status) {
grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)req->data;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error* error = GRPC_ERROR_NONE;
int done;
grpc_closure* closure = connect->closure;
- grpc_timer_cancel(&connect->alarm);
+ grpc_timer_cancel(&exec_ctx, &connect->alarm);
if (status == 0) {
*connect->endpoint = grpc_tcp_create(
connect->tcp_handle, connect->resource_quota, connect->addr_name);
@@ -105,13 +107,15 @@ static void uv_tc_on_connect(uv_connect_t* req, int status) {
}
done = (--connect->refs == 0);
if (done) {
- grpc_core::ExecCtx::Get()->Flush();
- uv_tcp_connect_cleanup(connect);
+ grpc_exec_ctx_flush(&exec_ctx);
+ uv_tcp_connect_cleanup(&exec_ctx, connect);
}
- GRPC_CLOSURE_SCHED(closure, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
+static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* resolved_addr,
@@ -126,7 +130,7 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
@@ -153,23 +157,24 @@ static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
(const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
- grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
+ grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);
}
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 5521a0a9ae..5e30725e90 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -52,12 +52,13 @@ typedef struct {
grpc_channel_args* channel_args;
} async_connect;
-static void async_connect_unlock_and_cleanup(async_connect* ac,
+static void async_connect_unlock_and_cleanup(grpc_exec_ctx* exec_ctx,
+ async_connect* ac,
grpc_winsocket* socket) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
- grpc_channel_args_destroy(ac->channel_args);
+ grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac);
@@ -65,7 +66,7 @@ static void async_connect_unlock_and_cleanup(async_connect* ac,
if (socket != NULL) grpc_winsocket_destroy(socket);
}
-static void on_alarm(void* acp, grpc_error* error) {
+static void on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
async_connect* ac = (async_connect*)acp;
gpr_mu_lock(&ac->mu);
grpc_winsocket* socket = ac->socket;
@@ -73,10 +74,10 @@ static void on_alarm(void* acp, grpc_error* error) {
if (socket != NULL) {
grpc_winsocket_shutdown(socket);
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
}
-static void on_connect(void* acp, grpc_error* error) {
+static void on_connect(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
async_connect* ac = (async_connect*)acp;
grpc_endpoint** ep = ac->endpoint;
GPR_ASSERT(*ep == NULL);
@@ -89,7 +90,7 @@ static void on_connect(void* acp, grpc_error* error) {
ac->socket = NULL;
gpr_mu_unlock(&ac->mu);
- grpc_timer_cancel(&ac->alarm);
+ grpc_timer_cancel(exec_ctx, &ac->alarm);
gpr_mu_lock(&ac->mu);
@@ -104,7 +105,8 @@ static void on_connect(void* acp, grpc_error* error) {
if (!wsa_success) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
} else {
- *ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name);
+ *ep =
+ grpc_tcp_create(exec_ctx, socket, ac->channel_args, ac->addr_name);
socket = NULL;
}
} else {
@@ -112,20 +114,18 @@ static void on_connect(void* acp, grpc_error* error) {
}
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- GRPC_CLOSURE_SCHED(on_done, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
}
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
-static void tcp_client_connect_impl(grpc_closure* on_done,
- grpc_endpoint** endpoint,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
+static void tcp_client_connect_impl(
+ grpc_exec_ctx* exec_ctx, grpc_closure* on_done, grpc_endpoint** endpoint,
+ grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr, grpc_millis deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
@@ -205,8 +205,8 @@ static void tcp_client_connect_impl(grpc_closure* on_done,
GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm);
- grpc_socket_notify_on_write(socket, &ac->on_connect);
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
+ grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
failure:
@@ -222,23 +222,24 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- GRPC_CLOSURE_SCHED(on_done, final_error);
+ GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error);
}
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
- grpc_closure* closure, grpc_endpoint** ep,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
-void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) {
- grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
- addr, deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index 155329d2e8..d09cfca9af 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -108,31 +108,36 @@ typedef struct backup_poller {
static gpr_atm g_uncovered_notifications_pending;
static gpr_atm g_backup_poller; /* backup_poller* */
-static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
-static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
-static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
+ void* arg /* grpc_tcp */,
grpc_error* error);
-static void done_poller(void* bp, grpc_error* error_ignored) {
+static void done_poller(grpc_exec_ctx* exec_ctx, void* bp,
+ grpc_error* error_ignored) {
backup_poller* p = (backup_poller*)bp;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
- grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
+ grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
-static void run_poller(void* bp, grpc_error* error_ignored) {
+static void run_poller(grpc_exec_ctx* exec_ctx, void* bp,
+ grpc_error* error_ignored) {
backup_poller* p = (backup_poller*)bp;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
- grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 13 * GPR_MS_PER_SEC;
- GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
+ grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC;
+ GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
GRPC_LOG_IF_ERROR(
"backup_poller:pollset_work",
- grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), nullptr, deadline));
gpr_mu_unlock(p->pollset_mu);
/* last "uncovered" notification is the ref that keeps us polling, if we get
* there try a cas to release it */
@@ -147,18 +152,18 @@ static void run_poller(void* bp, grpc_error* error_ignored) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
}
- grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
+ grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
}
- GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
}
}
-static void drop_uncovered(grpc_tcp* tcp) {
+static void drop_uncovered(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
@@ -169,7 +174,7 @@ static void drop_uncovered(grpc_tcp* tcp) {
GPR_ASSERT(old_count != 1);
}
-static void cover_self(grpc_tcp* tcp) {
+static void cover_self(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
backup_poller* p;
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
@@ -178,7 +183,7 @@ static void cover_self(grpc_tcp* tcp) {
2 + (int)old_count);
}
if (old_count == 0) {
- GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
+ GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
p = (backup_poller*)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
@@ -186,6 +191,7 @@ static void cover_self(grpc_tcp* tcp) {
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
GRPC_CLOSURE_SCHED(
+ exec_ctx,
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
GRPC_ERROR_NONE);
@@ -198,38 +204,39 @@ static void cover_self(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
}
- grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+ grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) {
- drop_uncovered(tcp);
+ drop_uncovered(exec_ctx, tcp);
}
}
-static void notify_on_read(grpc_tcp* tcp) {
+static void notify_on_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
- grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
}
-static void notify_on_write(grpc_tcp* tcp) {
+static void notify_on_write(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
}
- cover_self(tcp);
+ cover_self(exec_ctx, tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure,
tcp_drop_uncovered_then_handle_write, tcp,
grpc_schedule_on_exec_ctx);
- grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
}
-static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
- drop_uncovered((grpc_tcp*)arg);
- tcp_handle_write(arg, error);
+ drop_uncovered(exec_ctx, (grpc_tcp*)arg);
+ tcp_handle_write(exec_ctx, arg, error);
}
static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
@@ -275,29 +282,33 @@ static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
grpc_slice_from_copied_string(tcp->peer_string));
}
-static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
-static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
-static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
+static void tcp_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
grpc_tcp* tcp = (grpc_tcp*)ep;
- grpc_fd_shutdown(tcp->em_fd, why);
- grpc_resource_user_shutdown(tcp->resource_user);
+ grpc_fd_shutdown(exec_ctx, tcp->em_fd, why);
+ grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
-static void tcp_free(grpc_tcp* tcp) {
- grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+ grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
false /* already_closed */, "tcp_unref_orphan");
- grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
- grpc_resource_user_unref(tcp->resource_user);
+ grpc_slice_buffer_destroy_internal(exec_ctx, &tcp->last_read_buffer);
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
#ifndef NDEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(cl, tcp, reason) \
+ tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
- int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -305,7 +316,7 @@ static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
val - 1);
}
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -320,25 +331,26 @@ static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp* tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
-static void tcp_destroy(grpc_endpoint* ep) {
+static void tcp_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
- grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
- TCP_UNREF(tcp, "destroy");
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer);
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
-static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
+static void call_read_cb(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
@@ -357,11 +369,11 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
tcp->read_cb = nullptr;
tcp->incoming_buffer = nullptr;
- GRPC_CLOSURE_RUN(cb, error);
+ GRPC_CLOSURE_RUN(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
-static void tcp_do_read(grpc_tcp* tcp) {
+static void tcp_do_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -384,12 +396,12 @@ static void tcp_do_read(grpc_tcp* tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
- GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
+ GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length);
+ GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count);
GPR_TIMER_BEGIN("recvmsg", 0);
do {
- GRPC_STATS_INC_SYSCALL_READ();
+ GRPC_STATS_INC_SYSCALL_READ(exec_ctx);
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GPR_TIMER_END("recvmsg", read_bytes >= 0);
@@ -400,22 +412,24 @@ static void tcp_do_read(grpc_tcp* tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
- notify_on_read(tcp);
+ notify_on_read(exec_ctx, tcp);
} else {
- grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
- call_read_cb(tcp,
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ tcp->incoming_buffer);
+ call_read_cb(exec_ctx, tcp,
tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
- TCP_UNREF(tcp, "read");
+ TCP_UNREF(exec_ctx, tcp, "read");
}
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
- grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
call_read_cb(
- tcp, tcp_annotate_error(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
- TCP_UNREF(tcp, "read");
+ exec_ctx, tcp,
+ tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
+ GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes);
add_to_estimate(tcp, (size_t)read_bytes);
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
@@ -425,47 +439,50 @@ static void tcp_do_read(grpc_tcp* tcp) {
&tcp->last_read_buffer);
}
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
- call_read_cb(tcp, GRPC_ERROR_NONE);
- TCP_UNREF(tcp, "read");
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE);
+ TCP_UNREF(exec_ctx, tcp, "read");
}
GPR_TIMER_END("tcp_continue_read", 0);
}
-static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
+static void tcp_read_allocation_done(grpc_exec_ctx* exec_ctx, void* tcpp,
+ grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
- grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
- grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
- call_read_cb(tcp, GRPC_ERROR_REF(error));
- TCP_UNREF(tcp, "read");
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ &tcp->last_read_buffer);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- tcp_do_read(tcp);
+ tcp_do_read(exec_ctx, tcp);
}
}
-static void tcp_continue_read(grpc_tcp* tcp) {
+static void tcp_continue_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
}
- grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
- tcp->incoming_buffer);
+ grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
+ target_read_size, 1, tcp->incoming_buffer);
} else {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
}
- tcp_do_read(tcp);
+ tcp_do_read(exec_ctx, tcp);
}
}
-static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)arg;
GPR_ASSERT(!tcp->finished_edge);
if (grpc_tcp_trace.enabled()) {
@@ -473,35 +490,37 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
}
if (error != GRPC_ERROR_NONE) {
- grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
- grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
- call_read_cb(tcp, GRPC_ERROR_REF(error));
- TCP_UNREF(tcp, "read");
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ &tcp->last_read_buffer);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
} else {
- tcp_continue_read(tcp);
+ tcp_continue_read(exec_ctx, tcp);
}
}
-static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
- grpc_closure* cb) {
+static void tcp_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* incoming_buffer, grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
GPR_ASSERT(tcp->read_cb == nullptr);
tcp->read_cb = cb;
tcp->incoming_buffer = incoming_buffer;
- grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, incoming_buffer);
grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
- notify_on_read(tcp);
+ notify_on_read(exec_ctx, tcp);
} else {
- GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
/* returns true if done, false if pending; if returning true, *error is set */
#define MAX_WRITE_IOVEC 1000
-static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
+static bool tcp_flush(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ grpc_error** error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
msg_iovlen_type iov_size;
@@ -543,13 +562,13 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
- GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
+ GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length);
+ GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size);
GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
- GRPC_STATS_INC_SYSCALL_WRITE();
+ GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx);
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
GPR_TIMER_END("sendmsg", 0);
@@ -561,18 +580,20 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
// point
for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
grpc_slice_unref_internal(
- grpc_slice_buffer_take_first(tcp->outgoing_buffer));
+ exec_ctx, grpc_slice_buffer_take_first(tcp->outgoing_buffer));
}
return false;
} else if (errno == EPIPE) {
*error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"),
GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE);
- grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ tcp->outgoing_buffer);
return true;
} else {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
- grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ tcp->outgoing_buffer);
return true;
}
}
@@ -595,29 +616,31 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
*error = GRPC_ERROR_NONE;
- grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
+ tcp->outgoing_buffer);
return true;
}
}
}
-static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)arg;
grpc_closure* cb;
if (error != GRPC_ERROR_NONE) {
cb = tcp->write_cb;
tcp->write_cb = nullptr;
- cb->cb(cb->cb_arg, error);
- TCP_UNREF(tcp, "write");
+ cb->cb(exec_ctx, cb->cb_arg, error);
+ TCP_UNREF(exec_ctx, tcp, "write");
return;
}
- if (!tcp_flush(tcp, &error)) {
+ if (!tcp_flush(exec_ctx, tcp, &error)) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- notify_on_write(tcp);
+ notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = nullptr;
@@ -626,13 +649,13 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
gpr_log(GPR_DEBUG, "write: %s", str);
}
- GRPC_CLOSURE_RUN(cb, error);
- TCP_UNREF(tcp, "write");
+ GRPC_CLOSURE_RUN(exec_ctx, cb, error);
+ TCP_UNREF(exec_ctx, tcp, "write");
}
}
-static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
- grpc_closure* cb) {
+static void tcp_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* buf, grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_error* error = GRPC_ERROR_NONE;
@@ -653,48 +676,51 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
GRPC_CLOSURE_SCHED(
- cb, grpc_fd_is_shutdown(tcp->em_fd)
- ? tcp_annotate_error(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
- : GRPC_ERROR_NONE);
+ exec_ctx, cb,
+ grpc_fd_is_shutdown(tcp->em_fd)
+ ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
+ tcp)
+ : GRPC_ERROR_NONE);
return;
}
tcp->outgoing_buffer = buf;
tcp->outgoing_byte_idx = 0;
- if (!tcp_flush(tcp, &error)) {
+ if (!tcp_flush(exec_ctx, tcp, &error)) {
TCP_REF(tcp, "write");
tcp->write_cb = cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- notify_on_write(tcp);
+ notify_on_write(exec_ctx, tcp);
} else {
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
- GRPC_CLOSURE_SCHED(cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
GPR_TIMER_END("tcp_write", 0);
}
-static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
+static void tcp_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
grpc_tcp* tcp = (grpc_tcp*)ep;
- grpc_pollset_add_fd(pollset, tcp->em_fd);
+ grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd);
}
-static void tcp_add_to_pollset_set(grpc_endpoint* ep,
+static void tcp_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
grpc_tcp* tcp = (grpc_tcp*)ep;
- grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
+ grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
}
-static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
+static void tcp_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
grpc_tcp* tcp = (grpc_tcp*)ep;
- grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
+ grpc_pollset_set_del_fd(exec_ctx, pollset_set, tcp->em_fd);
}
static char* tcp_get_peer(grpc_endpoint* ep) {
@@ -725,7 +751,7 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
-grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* em_fd,
const grpc_channel_args* channel_args,
const char* peer_string) {
int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
@@ -754,7 +780,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
grpc_channel_arg_get_integer(&channel_args->args[i], options);
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
@@ -791,7 +817,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
&tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
return &tcp->base;
}
@@ -802,15 +828,15 @@ int grpc_tcp_fd(grpc_endpoint* ep) {
return grpc_fd_wrapped_fd(tcp->em_fd);
}
-void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
- grpc_closure* done) {
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ int* fd, grpc_closure* done) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
- grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
- TCP_UNREF(tcp, "destroy");
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer);
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
#endif
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 4529c02beb..09051b7ed6 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -37,7 +37,8 @@ extern grpc_core::TraceFlag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint* grpc_tcp_create(grpc_fd* fd, const grpc_channel_args* args,
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ const grpc_channel_args* args,
const char* peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
@@ -49,7 +50,7 @@ int grpc_tcp_fd(grpc_endpoint* ep);
/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
* will be called when the endpoint is destroyed.
* Requires: ep must be a tcp endpoint and fd must not be NULL. */
-void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
- grpc_closure* done);
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ int* fd, grpc_closure* done);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 038c765c6c..a1757a2b3e 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -39,20 +39,22 @@ typedef struct grpc_tcp_server_acceptor {
/* Called for newly connected TCP connections.
Takes ownership of acceptor. */
-typedef void (*grpc_tcp_server_cb)(void* arg, grpc_endpoint* ep,
+typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* ep,
grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor);
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server);
/* Start listening to bound ports */
-void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
+ grpc_pollset** pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb, void* cb_arg);
/* Add a port to the server, returning the newly allocated port on success, or
@@ -90,9 +92,10 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
/* If the refcount drops to zero, enqueue calls on exec_ctx to
shutdown_listeners and delete s. */
-void grpc_tcp_server_unref(grpc_tcp_server* s);
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s);
/* Shutdown the fds of listeners. */
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s);
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 99e1c6cd06..6fed13c6c7 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -68,7 +68,8 @@ static void init(void) {
#endif
}
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
gpr_once_init(&check_init, init);
@@ -115,12 +116,12 @@ grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
return GRPC_ERROR_NONE;
}
-static void finish_shutdown(grpc_tcp_server* s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu);
if (s->shutdown_complete != nullptr) {
- GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -130,18 +131,19 @@ static void finish_shutdown(grpc_tcp_server* s) {
s->head = sp->next;
gpr_free(sp);
}
- grpc_channel_args_destroy(s->channel_args);
+ grpc_channel_args_destroy(exec_ctx, s->channel_args);
gpr_free(s);
}
-static void destroyed_port(void* server, grpc_error* error) {
+static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
+ grpc_error* error) {
grpc_tcp_server* s = (grpc_tcp_server*)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
} else {
GPR_ASSERT(s->destroyed_ports < s->nports);
gpr_mu_unlock(&s->mu);
@@ -151,7 +153,7 @@ static void destroyed_port(void* server, grpc_error* error) {
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_tcp_server* s) {
+static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -163,17 +165,17 @@ static void deactivated_all_ports(grpc_tcp_server* s) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
grpc_schedule_on_exec_ctx);
- grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr,
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, nullptr,
false /* already_closed */, "tcp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-static void tcp_server_destroy(grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -184,17 +186,18 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(
- sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
+ exec_ctx, sp->emfd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- deactivated_all_ports(s);
+ deactivated_all_ports(exec_ctx, s);
}
}
/* event manager callback when reads are ready */
-static void on_read(void* arg, grpc_error* err) {
+static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* err) {
grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
grpc_pollset* read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
@@ -220,7 +223,7 @@ static void on_read(void* arg, grpc_error* err) {
case EINTR:
continue;
case EAGAIN:
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
return;
default:
gpr_mu_lock(&sp->server->mu);
@@ -246,7 +249,7 @@ static void on_read(void* arg, grpc_error* err) {
grpc_fd* fdobj = grpc_fd_create(fd, name);
- grpc_pollset_add_fd(read_notifier_pollset, fdobj);
+ grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
grpc_tcp_server_acceptor* acceptor =
@@ -256,8 +259,8 @@ static void on_read(void* arg, grpc_error* err) {
acceptor->fd_index = sp->fd_index;
sp->server->on_accept_cb(
- sp->server->on_accept_cb_arg,
- grpc_tcp_create(fdobj, sp->server->channel_args, addr_str),
+ exec_ctx, sp->server->on_accept_cb_arg,
+ grpc_tcp_create(exec_ctx, fdobj, sp->server->channel_args, addr_str),
read_notifier_pollset, acceptor);
gpr_free(name);
@@ -270,7 +273,7 @@ error:
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
@@ -480,8 +483,8 @@ int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
return -1;
}
-void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
+ grpc_pollset** pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void* on_accept_cb_arg) {
size_t i;
@@ -501,20 +504,20 @@ void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
for (i = 0; i < pollset_count; i++) {
- grpc_pollset_add_fd(pollsets[i], sp->emfd);
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
sp = sp->next;
}
} else {
for (i = 0; i < pollset_count; i++) {
- grpc_pollset_add_fd(pollsets[i], sp->emfd);
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
s->active_ports++;
sp = sp->next;
}
@@ -535,24 +538,25 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
- grpc_tcp_server_shutdown_listeners(s);
+ grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
- tcp_server_destroy(s);
+ tcp_server_destroy(exec_ctx, s);
}
}
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
s->shutdown_listeners = true;
/* shutdown all fd's */
if (s->active_ports) {
grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
- grpc_fd_shutdown(sp->emfd,
+ grpc_fd_shutdown(exec_ctx, sp->emfd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"));
}
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc
index 1ac49190fb..ffadf0b1ab 100644
--- a/src/core/lib/iomgr/tcp_server_uv.cc
+++ b/src/core/lib/iomgr/tcp_server_uv.cc
@@ -73,7 +73,8 @@ struct grpc_tcp_server {
grpc_resource_quota* resource_quota;
};
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
@@ -81,11 +82,11 @@ grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
- grpc_resource_quota_unref_internal(s->resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
s->resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)args->args[i].value.pointer.p);
} else {
- grpc_resource_quota_unref_internal(s->resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
@@ -118,10 +119,10 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
GRPC_ERROR_NONE);
}
-static void finish_shutdown(grpc_tcp_server* s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
GPR_ASSERT(s->shutdown);
if (s->shutdown_complete != NULL) {
- GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
while (s->head) {
@@ -131,17 +132,18 @@ static void finish_shutdown(grpc_tcp_server* s) {
gpr_free(sp->handle);
gpr_free(sp);
}
- grpc_resource_quota_unref_internal(s->resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
}
static void handle_close_callback(uv_handle_t* handle) {
grpc_tcp_listener* sp = (grpc_tcp_listener*)handle->data;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
sp->server->open_ports--;
if (sp->server->open_ports == 0 && sp->server->shutdown) {
- finish_shutdown(sp->server);
+ finish_shutdown(&exec_ctx, sp->server);
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void close_listener(grpc_tcp_listener* sp) {
@@ -151,7 +153,7 @@ static void close_listener(grpc_tcp_listener* sp) {
}
}
-static void tcp_server_destroy(grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
int immediately_done = 0;
grpc_tcp_listener* sp;
@@ -166,22 +168,28 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
}
if (immediately_done) {
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
GRPC_UV_ASSERT_SAME_THREAD();
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
- grpc_core::ExecCtx exec_ctx;
- GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
- grpc_core::ExecCtx::Get()->Flush();
- tcp_server_destroy(s);
+ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting);
+ if (exec_ctx == NULL) {
+ grpc_exec_ctx_flush(&local_exec_ctx);
+ tcp_server_destroy(&local_exec_ctx, s);
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ } else {
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ tcp_server_destroy(exec_ctx, s);
+ }
}
}
-static void finish_accept(grpc_tcp_listener* sp) {
+static void finish_accept(grpc_exec_ctx* exec_ctx, grpc_tcp_listener* sp) {
grpc_tcp_server_acceptor* acceptor =
(grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
uv_tcp_t* client = NULL;
@@ -217,13 +225,14 @@ static void finish_accept(grpc_tcp_listener* sp) {
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
- sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor);
+ sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
+ acceptor);
gpr_free(peer_name_string);
}
static void on_connect(uv_stream_t* server, int status) {
grpc_tcp_listener* sp = (grpc_tcp_listener*)server->data;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (status < 0) {
switch (status) {
@@ -244,10 +253,11 @@ static void on_connect(uv_stream_t* server, int status) {
// Create acceptor.
if (sp->server->on_accept_cb) {
- finish_accept(sp);
+ finish_accept(&exec_ctx, sp);
} else {
sp->has_pending_connection = true;
}
+ grpc_exec_ctx_finish(&exec_ctx);
}
static grpc_error* add_addr_to_server(grpc_tcp_server* s,
@@ -444,8 +454,8 @@ grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
return error;
}
-void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
+ grpc_pollset** pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
grpc_tcp_listener* sp;
(void)pollsets;
@@ -460,12 +470,13 @@ void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
server->on_accept_cb_arg = cb_arg;
for (sp = server->head; sp; sp = sp->next) {
if (sp->has_pending_connection) {
- finish_accept(sp);
+ finish_accept(exec_ctx, sp);
sp->has_pending_connection = false;
}
}
}
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.cc b/src/core/lib/iomgr/tcp_server_windows.cc
index 8a30dfde43..f538194895 100644
--- a/src/core/lib/iomgr/tcp_server_windows.cc
+++ b/src/core/lib/iomgr/tcp_server_windows.cc
@@ -94,7 +94,8 @@ struct grpc_tcp_server {
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
@@ -113,7 +114,8 @@ grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
return GRPC_ERROR_NONE;
}
-static void destroy_server(void* arg, grpc_error* error) {
+static void destroy_server(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
grpc_tcp_server* s = (grpc_tcp_server*)arg;
/* Now that the accepts have been aborted, we can destroy the sockets.
@@ -126,16 +128,18 @@ static void destroy_server(void* arg, grpc_error* error) {
grpc_winsocket_destroy(sp->socket);
gpr_free(sp);
}
- grpc_channel_args_destroy(s->channel_args);
+ grpc_channel_args_destroy(exec_ctx, s->channel_args);
gpr_free(s);
}
-static void finish_shutdown_locked(grpc_tcp_server* s) {
+static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {
if (s->shutdown_complete != NULL) {
- GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
GRPC_CLOSURE_SCHED(
+ exec_ctx,
GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@@ -153,14 +157,14 @@ void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
gpr_mu_unlock(&s->mu);
}
-static void tcp_server_destroy(grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
grpc_tcp_listener* sp;
gpr_mu_lock(&s->mu);
/* First, shutdown all fd's. This will queue abortion calls for all
of the pending accepts due to the normal operation mechanism. */
if (s->active_ports == 0) {
- finish_shutdown_locked(s);
+ finish_shutdown_locked(exec_ctx, s);
} else {
for (sp = s->head; sp; sp = sp->next) {
sp->shutting_down = 1;
@@ -170,13 +174,13 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
- grpc_tcp_server_shutdown_listeners(s);
+ grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
- GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
gpr_mu_unlock(&s->mu);
- tcp_server_destroy(s);
+ tcp_server_destroy(exec_ctx, s);
}
}
@@ -230,17 +234,19 @@ failure:
return error;
}
-static void decrement_active_ports_and_notify_locked(grpc_tcp_listener* sp) {
+static void decrement_active_ports_and_notify_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_listener* sp) {
sp->shutting_down = 0;
GPR_ASSERT(sp->server->active_ports > 0);
if (0 == --sp->server->active_ports) {
- finish_shutdown_locked(sp->server);
+ finish_shutdown_locked(exec_ctx, sp->server);
}
}
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static grpc_error* start_accept_locked(grpc_tcp_listener* port) {
+static grpc_error* start_accept_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_listener* port) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
DWORD addrlen = sizeof(struct sockaddr_in6) + 16;
@@ -279,7 +285,7 @@ static grpc_error* start_accept_locked(grpc_tcp_listener* port) {
/* We're ready to do the accept. Calling grpc_socket_notify_on_read may
immediately process an accept that happened in the meantime. */
port->new_socket = sock;
- grpc_socket_notify_on_read(port->socket, &port->on_accept);
+ grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept);
port->outstanding_calls++;
return error;
@@ -290,7 +296,7 @@ failure:
}
/* Event manager callback when reads are ready. */
-static void on_accept(void* arg, grpc_error* error) {
+static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info* info = &sp->socket->read_info;
@@ -351,7 +357,7 @@ static void on_accept(void* arg, grpc_error* error) {
gpr_free(utf8_message);
}
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
- ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
+ ep = grpc_tcp_create(exec_ctx, grpc_winsocket_create(sock, fd_name),
sp->server->channel_args, peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
@@ -369,15 +375,17 @@ static void on_accept(void* arg, grpc_error* error) {
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
- sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor);
+ sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
+ acceptor);
}
/* As we were notified from the IOCP of one and exactly one accept,
the former socked we created has now either been destroy or assigned
to the new connection. We need to create a new one for the next
connection. */
- GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp)));
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
if (0 == --sp->outstanding_calls) {
- decrement_active_ports_and_notify_locked(sp);
+ decrement_active_ports_and_notify_locked(exec_ctx, sp);
}
gpr_mu_unlock(&sp->server->mu);
}
@@ -514,8 +522,8 @@ done:
return error;
}
-void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
- size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
+ grpc_pollset** pollset, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
void* on_accept_cb_arg) {
grpc_tcp_listener* sp;
@@ -526,12 +534,14 @@ void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
for (sp = s->head; sp; sp = sp->next) {
- GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp)));
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {}
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 2c26b60511..40f4006203 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -65,18 +65,19 @@ typedef struct {
grpc_pollset* pollset;
} grpc_tcp;
-static void tcp_free(grpc_tcp* tcp) {
- grpc_resource_user_unref(tcp->resource_user);
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->handle);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
#ifndef NDEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(exec_ctx, tcp, reason) \
+ tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
- int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -84,7 +85,7 @@ static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
val - 1);
}
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -99,11 +100,11 @@ static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp* tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -111,14 +112,15 @@ static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
static void uv_close_callback(uv_handle_t* handle) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp* tcp = (grpc_tcp*)handle->data;
- TCP_UNREF(tcp, "destroy");
+ TCP_UNREF(&exec_ctx, tcp, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
}
static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size,
uv_buf_t* buf) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp* tcp = (grpc_tcp*)handle->data;
(void)suggested_size;
/* Before calling uv_read_start, we allocate a buffer with exactly one slice
@@ -126,9 +128,11 @@ static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size,
* allocation was successful. So slices[0] should always exist here */
buf->base = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
+static void call_read_cb(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ grpc_error* error) {
grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
@@ -145,25 +149,25 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
}
tcp->read_slices = NULL;
tcp->read_cb = NULL;
- GRPC_CLOSURE_RUN(cb, error);
+ GRPC_CLOSURE_RUN(exec_ctx, cb, error);
}
static void read_callback(uv_stream_t* stream, ssize_t nread,
const uv_buf_t* buf) {
grpc_error* error;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp* tcp = (grpc_tcp*)stream->data;
grpc_slice_buffer garbage;
if (nread == 0) {
// Nothing happened. Wait for the next callback
return;
}
- TCP_UNREF(tcp, "read");
+ TCP_UNREF(&exec_ctx, tcp, "read");
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+ grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, tcp->read_slices);
} else if (nread > 0) {
// Successful read
error = GRPC_ERROR_NONE;
@@ -173,17 +177,19 @@ static void read_callback(uv_stream_t* stream, ssize_t nread,
grpc_slice_buffer_init(&garbage);
grpc_slice_buffer_trim_end(
tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
- grpc_slice_buffer_reset_and_unref_internal(&garbage);
+ grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &garbage);
}
} else {
// nread < 0: Error
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+ grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, tcp->read_slices);
}
- call_read_cb(tcp, error);
+ call_read_cb(&exec_ctx, tcp, error);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
+static void tcp_read_allocation_done(grpc_exec_ctx* exec_ctx, void* tcpp,
+ grpc_error* error) {
int status;
grpc_tcp* tcp = (grpc_tcp*)tcpp;
if (grpc_tcp_trace.enabled()) {
@@ -201,9 +207,9 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
}
}
if (error != GRPC_ERROR_NONE) {
- grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
- call_read_cb(tcp, GRPC_ERROR_REF(error));
- TCP_UNREF(tcp, "read");
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->read_slices);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
}
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
@@ -211,16 +217,16 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
}
}
-static void uv_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
- grpc_closure* cb) {
+static void uv_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* read_slices, grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->read_slices = read_slices;
- grpc_slice_buffer_reset_and_unref_internal(read_slices);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
TCP_REF(tcp, "read");
- grpc_resource_user_alloc_slices(&tcp->slice_allocator,
+ grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
tcp->read_slices);
}
@@ -228,10 +234,10 @@ static void uv_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
static void write_callback(uv_write_t* req, int status) {
grpc_tcp* tcp = (grpc_tcp*)req->data;
grpc_error* error;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure* cb = tcp->write_cb;
tcp->write_cb = NULL;
- TCP_UNREF(tcp, "write");
+ TCP_UNREF(&exec_ctx, tcp, "write");
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
@@ -242,10 +248,11 @@ static void write_callback(uv_write_t* req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
- GRPC_CLOSURE_SCHED(cb, error);
+ GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void uv_endpoint_write(grpc_endpoint* ep,
+static void uv_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
grpc_slice_buffer* write_slices,
grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
@@ -268,8 +275,9 @@ static void uv_endpoint_write(grpc_endpoint* ep,
}
if (tcp->shutting_down) {
- GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "TCP socket is shutting down"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP socket is shutting down"));
return;
}
@@ -279,7 +287,7 @@ static void uv_endpoint_write(grpc_endpoint* ep,
if (tcp->write_slices->count == 0) {
// No slices means we don't have to do anything,
// and libuv doesn't like empty writes
- GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
return;
}
@@ -300,31 +308,37 @@ static void uv_endpoint_write(grpc_endpoint* ep,
write_callback);
}
-static void uv_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
+static void uv_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
// No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
(void)ep;
(void)pollset;
grpc_tcp* tcp = (grpc_tcp*)ep;
tcp->pollset = pollset;
}
-static void uv_add_to_pollset_set(grpc_endpoint* ep,
+static void uv_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
grpc_pollset_set* pollset) {
// No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
(void)ep;
(void)pollset;
}
-static void uv_delete_from_pollset_set(grpc_endpoint* ep,
+static void uv_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pollset) {
// No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
(void)ep;
(void)pollset;
}
static void shutdown_callback(uv_shutdown_t* req, int status) {}
-static void uv_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
+static void uv_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
grpc_tcp* tcp = (grpc_tcp*)ep;
if (!tcp->shutting_down) {
if (grpc_tcp_trace.enabled()) {
@@ -334,12 +348,12 @@ static void uv_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
tcp->shutting_down = true;
uv_shutdown_t* req = &tcp->shutdown_req;
uv_shutdown(req, (uv_stream_t*)tcp->handle, shutdown_callback);
- grpc_resource_user_shutdown(tcp->resource_user);
+ grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
GRPC_ERROR_UNREF(why);
}
-static void uv_destroy(grpc_endpoint* ep) {
+static void uv_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
uv_close((uv_handle_t*)tcp->handle, uv_close_callback);
@@ -372,7 +386,7 @@ grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
grpc_resource_quota* resource_quota,
char* peer_string) {
grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
@@ -399,6 +413,7 @@ grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
uv_unref((uv_handle_t*)handle);
#endif
+ grpc_exec_ctx_finish(&exec_ctx);
return &tcp->base;
}
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 6d091b77bb..33868cdc7a 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -109,20 +109,21 @@ typedef struct grpc_tcp {
char* peer_string;
} grpc_tcp;
-static void tcp_free(grpc_tcp* tcp) {
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
- grpc_resource_user_unref(tcp->resource_user);
+ grpc_resource_user_unref(exec_ctx, tcp->resource_user);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
gpr_free(tcp);
}
#ifndef NDEBUG
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(exec_ctx, tcp, reason) \
+ tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
- int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -130,7 +131,7 @@ static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
val - 1);
}
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -145,11 +146,11 @@ static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
gpr_ref(&tcp->refcount);
}
#else
-#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_tcp* tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
- tcp_free(tcp);
+ tcp_free(exec_ctx, tcp);
}
}
@@ -157,7 +158,7 @@ static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_read(void* tcpp, grpc_error* error) {
+static void on_read(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp;
grpc_closure* cb = tcp->read_cb;
grpc_winsocket* socket = tcp->socket;
@@ -171,13 +172,13 @@ static void on_read(void* tcpp, grpc_error* error) {
char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message);
- grpc_slice_unref_internal(tcp->read_slice);
+ grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
} else {
if (info->bytes_transfered != 0 && !tcp->shutting_down) {
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
grpc_slice_buffer_add(tcp->read_slices, sub);
} else {
- grpc_slice_unref_internal(tcp->read_slice);
+ grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
error = tcp->shutting_down
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP stream shutting down", &tcp->shutdown_error, 1)
@@ -187,12 +188,12 @@ static void on_read(void* tcpp, grpc_error* error) {
}
tcp->read_cb = NULL;
- TCP_UNREF(tcp, "read");
- GRPC_CLOSURE_SCHED(cb, error);
+ TCP_UNREF(exec_ctx, tcp, "read");
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
-static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
- grpc_closure* cb) {
+static void win_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* read_slices, grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_winsocket* handle = tcp->socket;
grpc_winsocket_callback_info* info = &handle->read_info;
@@ -203,14 +204,15 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
- cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "TCP socket is shutting down", &tcp->shutdown_error, 1));
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "TCP socket is shutting down", &tcp->shutdown_error, 1));
return;
}
tcp->read_cb = cb;
tcp->read_slices = read_slices;
- grpc_slice_buffer_reset_and_unref_internal(read_slices);
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
tcp->read_slice = GRPC_SLICE_MALLOC(8192);
@@ -228,7 +230,7 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
- GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
return;
}
@@ -241,17 +243,17 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
- GRPC_CLOSURE_SCHED(&tcp->on_read,
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
return;
}
}
- grpc_socket_notify_on_read(tcp->socket, &tcp->on_read);
+ grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_write(void* tcpp, grpc_error* error) {
+static void on_write(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp;
grpc_winsocket* handle = tcp->socket;
grpc_winsocket_callback_info* info = &handle->write_info;
@@ -272,13 +274,13 @@ static void on_write(void* tcpp, grpc_error* error) {
}
}
- TCP_UNREF(tcp, "write");
- GRPC_CLOSURE_SCHED(cb, error);
+ TCP_UNREF(exec_ctx, tcp, "write");
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
/* Initiates a write. */
-static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb) {
+static void win_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_winsocket* socket = tcp->socket;
grpc_winsocket_callback_info* info = &socket->write_info;
@@ -292,8 +294,9 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
- cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "TCP socket is shutting down", &tcp->shutdown_error, 1));
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "TCP socket is shutting down", &tcp->shutdown_error, 1));
return;
}
@@ -324,7 +327,7 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_error* error = status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend");
- GRPC_CLOSURE_SCHED(cb, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
if (allocated) gpr_free(allocated);
return;
}
@@ -341,32 +344,35 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
- TCP_UNREF(tcp, "write");
- GRPC_CLOSURE_SCHED(cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
+ TCP_UNREF(exec_ctx, tcp, "write");
+ GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
return;
}
}
/* As all is now setup, we can now ask for the IOCP notification. It may
trigger the callback immediately however, but no matter. */
- grpc_socket_notify_on_write(socket, &tcp->on_write);
+ grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write);
}
-static void win_add_to_pollset(grpc_endpoint* ep, grpc_pollset* ps) {
+static void win_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* ps) {
grpc_tcp* tcp;
(void)ps;
tcp = (grpc_tcp*)ep;
grpc_iocp_add_socket(tcp->socket);
}
-static void win_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pss) {
+static void win_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pss) {
grpc_tcp* tcp;
(void)pss;
tcp = (grpc_tcp*)ep;
grpc_iocp_add_socket(tcp->socket);
}
-static void win_delete_from_pollset_set(grpc_endpoint* ep,
+static void win_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
grpc_pollset_set* pss) {}
/* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
@@ -375,7 +381,8 @@ static void win_delete_from_pollset_set(grpc_endpoint* ep,
we're not going to protect against these. However the IO Completion Port
callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */
-static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
+static void win_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
grpc_tcp* tcp = (grpc_tcp*)ep;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
@@ -388,13 +395,13 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
}
grpc_winsocket_shutdown(tcp->socket);
gpr_mu_unlock(&tcp->mu);
- grpc_resource_user_shutdown(tcp->resource_user);
+ grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
-static void win_destroy(grpc_endpoint* ep) {
+static void win_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
- TCP_UNREF(tcp, "destroy");
+ TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char* win_get_peer(grpc_endpoint* ep) {
@@ -420,14 +427,14 @@ static grpc_endpoint_vtable vtable = {win_read,
win_get_peer,
win_get_fd};
-grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
grpc_channel_args* channel_args,
const char* peer_string) {
grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
@@ -445,7 +452,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
- grpc_resource_quota_unref_internal(resource_quota);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
return &tcp->base;
}
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 8578a358ea..28287e2795 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -38,7 +38,7 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
grpc_channel_args* channel_args,
const char* peer_string);
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index 82049859c5..b9acce229e 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -40,8 +40,8 @@ typedef struct grpc_timer grpc_timer;
application code should check the error to determine how it was invoked. The
application callback is also responsible for maintaining information about
when to free up any user-level state. */
-void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
- grpc_closure* closure);
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure);
/* Initialize *timer without setting it. This can later be passed through
the regular init or cancel */
@@ -73,7 +73,7 @@ void grpc_timer_init_unset(grpc_timer* timer);
matches this aim.
Requires: cancel() must happen after init() on a given timer */
-void grpc_timer_cancel(grpc_timer* timer);
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer);
/* iomgr internal api for dealing with timers */
@@ -90,9 +90,10 @@ typedef enum {
*next is never guaranteed to be updated on any given execution; however,
with high probability at least one thread in the system will see an update
at any time slice. */
-grpc_timer_check_result grpc_timer_check(grpc_millis* next);
-void grpc_timer_list_init();
-void grpc_timer_list_shutdown();
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next);
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx);
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx);
/* Consume a kick issued by grpc_kick_poller */
void grpc_timer_consume_kick(void);
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 103144eb3b..fa95c43dbe 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -225,7 +225,8 @@ static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
return a + b;
}
-static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
+ gpr_atm now,
gpr_atm* next,
grpc_error* error);
@@ -235,7 +236,7 @@ static gpr_atm compute_min_deadline(timer_shard* shard) {
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init() {
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
uint32_t i;
g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
@@ -246,7 +247,7 @@ void grpc_timer_list_init() {
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
- g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now();
+ g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
@@ -266,10 +267,10 @@ void grpc_timer_list_init() {
INIT_TIMER_HASH_TABLE();
}
-void grpc_timer_list_shutdown() {
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {
size_t i;
run_some_expired_timers(
- GPR_ATM_MAX, nullptr,
+ exec_ctx, GPR_ATM_MAX, nullptr,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
@@ -322,8 +323,8 @@ static void note_deadline_change(timer_shard* shard) {
void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
-void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
- grpc_closure* closure) {
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure) {
int is_first_timer = 0;
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
timer->closure = closure;
@@ -336,12 +337,12 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG,
"TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
- deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
+ deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
}
if (!g_shared_mutables.initialized) {
timer->pending = false;
- GRPC_CLOSURE_SCHED(timer->closure,
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Attempt to create timer before initialization"));
return;
@@ -349,10 +350,10 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
gpr_mu_lock(&shard->mu);
timer->pending = true;
- grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
if (deadline <= now) {
timer->pending = false;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
/* early out */
return;
@@ -413,7 +414,7 @@ void grpc_timer_consume_kick(void) {
gpr_tls_set(&g_last_seen_min_timer, 0);
}
-void grpc_timer_cancel(grpc_timer* timer) {
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
if (!g_shared_mutables.initialized) {
/* must have already been cancelled, also the shard mutex is invalid */
return;
@@ -429,7 +430,7 @@ void grpc_timer_cancel(grpc_timer* timer) {
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@@ -515,14 +516,15 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_timers(timer_shard* shard, gpr_atm now,
- gpr_atm* new_min_deadline, grpc_error* error) {
+static size_t pop_timers(grpc_exec_ctx* exec_ctx, timer_shard* shard,
+ gpr_atm now, gpr_atm* new_min_deadline,
+ grpc_error* error) {
size_t n = 0;
grpc_timer* timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++;
}
*new_min_deadline = compute_min_deadline(shard);
@@ -534,7 +536,8 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
return n;
}
-static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
+ gpr_atm now,
gpr_atm* next,
grpc_error* error) {
grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
@@ -563,7 +566,8 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
/* For efficiency, we pop as many available timers as we can from the
shard. This may violate perfect timer deadline ordering, but that
shouldn't be a big deal because we don't make ordering guarantees. */
- if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) {
+ if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
+ error) > 0) {
result = GRPC_TIMERS_FIRED;
}
@@ -600,9 +604,10 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
return result;
}
-grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next) {
// prelude
- grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
@@ -641,7 +646,7 @@ grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
}
// actual code
grpc_timer_check_result r =
- run_some_expired_timers(now, next, shutdown_error);
+ run_some_expired_timers(exec_ctx, now, next, shutdown_error);
// tracing
if (grpc_timer_check_trace.enabled()) {
char* next_str;
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 8ca6a3c23e..87ed0e05dc 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -98,12 +98,13 @@ static void start_timer_thread_and_unlock(void) {
}
void grpc_timer_manager_tick() {
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_millis next = GRPC_MILLIS_INF_FUTURE;
- grpc_timer_check(&next);
+ grpc_timer_check(&exec_ctx, &next);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-static void run_some_timers() {
+static void run_some_timers(grpc_exec_ctx* exec_ctx) {
// if there's something to execute...
gpr_mu_lock(&g_mu);
// remove a waiter from the pool, and start another thread if necessary
@@ -125,7 +126,7 @@ static void run_some_timers() {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "flush exec_ctx");
}
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&g_mu);
// garbage collect any threads hanging out that are dead
gc_completed_threads();
@@ -137,7 +138,7 @@ static void run_some_timers() {
// wait until 'next' (or forever if there is already a timed waiter in the pool)
// returns true if the thread should continue executing (false if it should
// shutdown)
-static bool wait_until(grpc_millis next) {
+static bool wait_until(grpc_exec_ctx* exec_ctx, grpc_millis next) {
gpr_mu_lock(&g_mu);
// if we're not threaded anymore, leave
if (!g_threaded) {
@@ -178,7 +179,7 @@ static bool wait_until(grpc_millis next) {
g_timed_waiter_deadline = next;
if (grpc_timer_check_trace.enabled()) {
- grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
+ grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
wait_time);
}
@@ -219,15 +220,15 @@ static bool wait_until(grpc_millis next) {
return true;
}
-static void timer_main_loop() {
+static void timer_main_loop(grpc_exec_ctx* exec_ctx) {
for (;;) {
grpc_millis next = GRPC_MILLIS_INF_FUTURE;
- grpc_core::ExecCtx::Get()->InvalidateNow();
+ grpc_exec_ctx_invalidate_now(exec_ctx);
// check timer state, updates next to the next time to run a check
- switch (grpc_timer_check(&next)) {
+ switch (grpc_timer_check(exec_ctx, &next)) {
case GRPC_TIMERS_FIRED:
- run_some_timers();
+ run_some_timers(exec_ctx);
break;
case GRPC_TIMERS_NOT_CHECKED:
/* This case only happens under contention, meaning more than one timer
@@ -245,7 +246,7 @@ static void timer_main_loop() {
next = GRPC_MILLIS_INF_FUTURE;
/* fall through */
case GRPC_TIMERS_CHECKED_AND_EMPTY:
- if (!wait_until(next)) {
+ if (!wait_until(exec_ctx, next)) {
return;
}
break;
@@ -273,9 +274,10 @@ static void timer_thread_cleanup(completed_thread* ct) {
static void timer_thread(void* completed_thread_ptr) {
// this threads exec_ctx: we try to run things through to completion here
// since it's easy to spin up new threads
- grpc_core::ExecCtx exec_ctx(0);
- timer_main_loop();
-
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, nullptr);
+ timer_main_loop(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
timer_thread_cleanup((completed_thread*)completed_thread_ptr);
}
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index 5d238da089..fac2026fa9 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -42,27 +42,28 @@ static void stop_uv_timer(uv_timer_t* handle) {
void run_expired_timer(uv_timer_t* handle) {
grpc_timer* timer = (grpc_timer*)handle->data;
- grpc_core::ExecCtx exec_ctx;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(timer->pending);
timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
+ grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
- grpc_closure* closure) {
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure) {
uint64_t timeout;
uv_timer_t* uv_timer;
GRPC_UV_ASSERT_SAME_THREAD();
timer->closure = closure;
- if (deadline <= grpc_core::ExecCtx::Get()->Now()) {
+ if (deadline <= grpc_exec_ctx_now(exec_ctx)) {
timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
timer->pending = 1;
- timeout = (uint64_t)(deadline - grpc_core::ExecCtx::Get()->Now());
+ timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx));
uv_timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
uv_timer->data = timer;
@@ -76,21 +77,22 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = 0; }
-void grpc_timer_cancel(grpc_timer* timer) {
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
GRPC_UV_ASSERT_SAME_THREAD();
if (timer->pending) {
timer->pending = 0;
- GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t*)timer->uv_timer);
}
}
-grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next) {
return GRPC_TIMERS_NOT_CHECKED;
}
-void grpc_timer_list_init() {}
-void grpc_timer_list_shutdown() {}
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {}
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {}
void grpc_timer_consume_kick(void) {}
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 55e0b165ec..7b7d6946b1 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -150,30 +150,31 @@ grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) {
return s;
}
-static void shutdown_fd(void* args, grpc_error* error) {
+static void shutdown_fd(grpc_exec_ctx* exec_ctx, void* args,
+ grpc_error* error) {
struct shutdown_fd_args* shutdown_args = (struct shutdown_fd_args*)args;
grpc_udp_listener* sp = shutdown_args->sp;
gpr_log(GPR_DEBUG, "shutdown fd %d", sp->fd);
gpr_mu_lock(shutdown_args->server_mu);
- grpc_fd_shutdown(sp->emfd, GRPC_ERROR_REF(error));
+ grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_REF(error));
sp->already_shutdown = true;
if (!sp->notify_on_write_armed) {
// Re-arm write notification to notify listener with error. This is
// necessary to decrement active_ports.
sp->notify_on_write_armed = true;
- grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
}
gpr_mu_unlock(shutdown_args->server_mu);
gpr_free(shutdown_args);
}
-static void dummy_cb(void* arg, grpc_error* error) {
+static void dummy_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
// No-op.
}
-static void finish_shutdown(grpc_udp_server* s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
if (s->shutdown_complete != nullptr) {
- GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
@@ -192,13 +193,14 @@ static void finish_shutdown(grpc_udp_server* s) {
gpr_free(s);
}
-static void destroyed_port(void* server, grpc_error* error) {
+static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
+ grpc_error* error) {
grpc_udp_server* s = (grpc_udp_server*)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
} else {
gpr_mu_unlock(&s->mu);
}
@@ -207,7 +209,7 @@ static void destroyed_port(void* server, grpc_error* error) {
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_udp_server* s) {
+static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -228,19 +230,21 @@ static void deactivated_all_ports(grpc_udp_server* s) {
grpc_schedule_on_exec_ctx);
GPR_ASSERT(sp->orphan_cb);
gpr_log(GPR_DEBUG, "Orphan fd %d", sp->fd);
- sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data);
+ sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
+ sp->server->user_data);
}
- grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr,
+ grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, nullptr,
false /* already_closed */, "udp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- finish_shutdown(s);
+ finish_shutdown(exec_ctx, s);
}
}
-void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) {
+void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
+ grpc_closure* on_done) {
grpc_udp_listener* sp;
gpr_mu_lock(&s->mu);
@@ -260,13 +264,14 @@ void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) {
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
grpc_schedule_on_exec_ctx);
- sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data);
+ sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
+ sp->server->user_data);
sp->orphan_notified = true;
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
- deactivated_all_ports(s);
+ deactivated_all_ports(exec_ctx, s);
}
}
@@ -345,7 +350,7 @@ error:
return -1;
}
-static void do_read(void* arg, grpc_error* error) {
+static void do_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
GPR_ASSERT(sp->read_cb && error == GRPC_ERROR_NONE);
/* TODO: the reason we hold server->mu here is merely to prevent fd
@@ -353,28 +358,29 @@ static void do_read(void* arg, grpc_error* error) {
* read lock if available. */
gpr_mu_lock(&sp->server->mu);
/* Tell the registered callback that data is available to read. */
- if (!sp->already_shutdown && sp->read_cb(sp->emfd, sp->server->user_data)) {
+ if (!sp->already_shutdown &&
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data)) {
/* There maybe more packets to read. Schedule read_more_cb_ closure to run
* after finishing this event loop. */
- GRPC_CLOSURE_SCHED(&sp->do_read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &sp->do_read_closure, GRPC_ERROR_NONE);
} else {
/* Finish reading all the packets, re-arm the notification event so we can
* get another chance to read. Or fd already shutdown, re-arm to get a
* notification with shutdown error. */
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
}
gpr_mu_unlock(&sp->server->mu);
}
/* event manager callback when reads are ready */
-static void on_read(void* arg, grpc_error* error) {
+static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_udp_listener* sp = (grpc_udp_listener*)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
@@ -383,57 +389,59 @@ static void on_read(void* arg, grpc_error* error) {
/* Read once. If there is more data to read, off load the work to another
* thread to finish. */
GPR_ASSERT(sp->read_cb);
- if (sp->read_cb(sp->emfd, sp->server->user_data)) {
+ if (sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data)) {
/* There maybe more packets to read. Schedule read_more_cb_ closure to run
* after finishing this event loop. */
GRPC_CLOSURE_INIT(&sp->do_read_closure, do_read, arg,
grpc_executor_scheduler(GRPC_EXECUTOR_LONG));
- GRPC_CLOSURE_SCHED(&sp->do_read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &sp->do_read_closure, GRPC_ERROR_NONE);
} else {
/* Finish reading all the packets, re-arm the notification event so we can
* get another chance to read. Or fd already shutdown, re-arm to get a
* notification with shutdown error. */
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
}
gpr_mu_unlock(&sp->server->mu);
}
// Wrapper of grpc_fd_notify_on_write() with a grpc_closure callback interface.
-void fd_notify_on_write_wrapper(void* arg, grpc_error* error) {
+void fd_notify_on_write_wrapper(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&sp->server->mu);
if (!sp->notify_on_write_armed) {
- grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
sp->notify_on_write_armed = true;
}
gpr_mu_unlock(&sp->server->mu);
}
-static void do_write(void* arg, grpc_error* error) {
+static void do_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&(sp->server->mu));
if (sp->already_shutdown) {
// If fd has been shutdown, don't write any more and re-arm notification.
- grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
} else {
sp->notify_on_write_armed = false;
/* Tell the registered callback that the socket is writeable. */
GPR_ASSERT(sp->write_cb && error == GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&sp->notify_on_write_closure, fd_notify_on_write_wrapper,
arg, grpc_schedule_on_exec_ctx);
- sp->write_cb(sp->emfd, sp->server->user_data, &sp->notify_on_write_closure);
+ sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data,
+ &sp->notify_on_write_closure);
}
gpr_mu_unlock(&sp->server->mu);
}
-static void on_write(void* arg, grpc_error* error) {
+static void on_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_udp_listener* sp = (grpc_udp_listener*)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
- deactivated_all_ports(sp->server);
+ deactivated_all_ports(exec_ctx, sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
@@ -444,7 +452,7 @@ static void on_write(void* arg, grpc_error* error) {
GRPC_CLOSURE_INIT(&sp->do_write_closure, do_write, arg,
grpc_executor_scheduler(GRPC_EXECUTOR_LONG));
- GRPC_CLOSURE_SCHED(&sp->do_write_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &sp->do_write_closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&sp->server->mu);
}
@@ -585,8 +593,9 @@ int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index) {
return sp->fd;
}
-void grpc_udp_server_start(grpc_udp_server* s, grpc_pollset** pollsets,
- size_t pollset_count, void* user_data) {
+void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
+ grpc_pollset** pollsets, size_t pollset_count,
+ void* user_data) {
size_t i;
gpr_mu_lock(&s->mu);
grpc_udp_listener* sp;
@@ -597,16 +606,16 @@ void grpc_udp_server_start(grpc_udp_server* s, grpc_pollset** pollsets,
sp = s->head;
while (sp != nullptr) {
for (i = 0; i < pollset_count; i++) {
- grpc_pollset_add_fd(pollsets[i], sp->emfd);
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
grpc_schedule_on_exec_ctx);
- grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp,
grpc_schedule_on_exec_ctx);
sp->notify_on_write_armed = true;
- grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
/* Registered for both read and write callbacks: increment active_ports
* twice to account for this, and delay free-ing of memory until both
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index 02e3acb7f5..1bd6922de6 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -32,15 +32,18 @@ typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket.
* Return true if there is more data to read from fd. */
-typedef bool (*grpc_udp_server_read_cb)(grpc_fd* emfd, void* user_data);
+typedef bool (*grpc_udp_server_read_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
+ void* user_data);
/* Called when the socket is writeable. The given closure should be scheduled
* when the socket becomes blocked next time. */
-typedef void (*grpc_udp_server_write_cb)(grpc_fd* emfd, void* user_data,
+typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
+ void* user_data,
grpc_closure* notify_on_write_closure);
/* Called when the grpc_fd is about to be orphaned (and the FD closed). */
-typedef void (*grpc_udp_server_orphan_cb)(grpc_fd* emfd,
+typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx* exec_ctx,
+ grpc_fd* emfd,
grpc_closure* shutdown_fd_callback,
void* user_data);
@@ -48,8 +51,9 @@ typedef void (*grpc_udp_server_orphan_cb)(grpc_fd* emfd,
grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args);
/* Start listening to bound ports. user_data is passed to callbacks. */
-void grpc_udp_server_start(grpc_udp_server* udp_server, grpc_pollset** pollsets,
- size_t pollset_count, void* user_data);
+void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* udp_server,
+ grpc_pollset** pollsets, size_t pollset_count,
+ void* user_data);
int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index);
@@ -69,6 +73,7 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
grpc_udp_server_write_cb write_cb,
grpc_udp_server_orphan_cb orphan_cb);
-void grpc_udp_server_destroy(grpc_udp_server* server, grpc_closure* on_done);
+void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* server,
+ grpc_closure* on_done);
#endif /* GRPC_CORE_LIB_IOMGR_UDP_SERVER_H */