aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr/timer_generic.cc
diff options
context:
space:
mode:
authorGravatar Yash Tibrewal <yashkt@google.com>2017-10-13 16:07:13 -0700
committerGravatar Yash Tibrewal <yashkt@google.com>2017-10-18 17:12:19 -0700
commit0ee7574732a06e8cace4e099a678f4bd5dbff679 (patch)
treee43d5de442fdcc3d39cd5af687f319fa39612d3f /src/core/lib/iomgr/timer_generic.cc
parent6bf5f833efe2cb9e2ecc14358dd9699cd5d05263 (diff)
Removing instances of exec_ctx being passed around in functions in
src/core. exec_ctx is now a thread_local pointer of type ExecCtx instead of grpc_exec_ctx which is initialized whenever ExecCtx is instantiated. ExecCtx also keeps track of the previous exec_ctx so that nesting of exec_ctx is allowed. This means that there is only one exec_ctx being used at any time. Also, grpc_exec_ctx_finish is called in the destructor of the object, and the previous exec_ctx is restored to avoid breaking current functionality. The code still explicitly calls grpc_exec_ctx_finish because removing all such instances causes the code to break.
Diffstat (limited to 'src/core/lib/iomgr/timer_generic.cc')
-rw-r--r--src/core/lib/iomgr/timer_generic.cc47
1 files changed, 21 insertions, 26 deletions
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index b8e895de6f..c8cbd42a0c 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -227,8 +227,7 @@ static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
return a + b;
}
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
- gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
gpr_atm *next,
grpc_error *error);
@@ -238,13 +237,13 @@ static gpr_atm compute_min_deadline(timer_shard *shard) {
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
+void grpc_timer_list_init() {
uint32_t i;
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
- g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
+ g_shared_mutables.min_timer = grpc_exec_ctx_now();
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
grpc_register_tracer(&grpc_timer_trace);
@@ -266,10 +265,10 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
INIT_TIMER_HASH_TABLE();
}
-void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
+void grpc_timer_list_shutdown() {
int i;
run_some_expired_timers(
- exec_ctx, GPR_ATM_MAX, NULL,
+ GPR_ATM_MAX, NULL,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < NUM_SHARDS; i++) {
timer_shard *shard = &g_shards[i];
@@ -320,8 +319,8 @@ static void note_deadline_change(timer_shard *shard) {
void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
-void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- grpc_millis deadline, grpc_closure *closure) {
+void grpc_timer_init(grpc_timer *timer, grpc_millis deadline,
+ grpc_closure *closure) {
int is_first_timer = 0;
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
timer->closure = closure;
@@ -334,12 +333,12 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG,
"TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
- deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
+ deadline, grpc_exec_ctx_now(), closure, closure->cb);
}
if (!g_shared_mutables.initialized) {
timer->pending = false;
- GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
+ GRPC_CLOSURE_SCHED(timer->closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Attempt to create timer before initialization"));
return;
@@ -347,10 +346,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_mu_lock(&shard->mu);
timer->pending = true;
- grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis now = grpc_exec_ctx_now();
if (deadline <= now) {
timer->pending = false;
- GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
/* early out */
return;
@@ -410,7 +409,7 @@ void grpc_timer_consume_kick(void) {
gpr_tls_set(&g_last_seen_min_timer, 0);
}
-void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+void grpc_timer_cancel(grpc_timer *timer) {
if (!g_shared_mutables.initialized) {
/* must have already been cancelled, also the shard mutex is invalid */
return;
@@ -426,7 +425,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
- GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
+ GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@@ -512,15 +511,14 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
- gpr_atm now, gpr_atm *new_min_deadline,
- grpc_error *error) {
+static size_t pop_timers(timer_shard *shard, gpr_atm now,
+ gpr_atm *new_min_deadline, grpc_error *error) {
size_t n = 0;
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
- GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_REF(error));
n++;
}
*new_min_deadline = compute_min_deadline(shard);
@@ -532,8 +530,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
return n;
}
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
- gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
gpr_atm *next,
grpc_error *error) {
grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
@@ -562,8 +559,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
/* For efficiency, we pop as many available timers as we can from the
shard. This may violate perfect timer deadline ordering, but that
shouldn't be a big deal because we don't make ordering guarantees. */
- if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
- error) > 0) {
+ if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) {
result = GRPC_TIMERS_FIRED;
}
@@ -600,10 +596,9 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
return result;
}
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- grpc_millis *next) {
+grpc_timer_check_result grpc_timer_check(grpc_millis *next) {
// prelude
- grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis now = grpc_exec_ctx_now();
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
@@ -641,7 +636,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
}
// actual code
grpc_timer_check_result r =
- run_some_expired_timers(exec_ctx, now, next, shutdown_error);
+ run_some_expired_timers(now, next, shutdown_error);
// tracing
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
char *next_str;