aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib/iomgr')
-rw-r--r--src/core/lib/iomgr/call_combiner.cc31
-rw-r--r--src/core/lib/iomgr/call_combiner.h2
-rw-r--r--src/core/lib/iomgr/closure.cc4
-rw-r--r--src/core/lib/iomgr/closure.h2
-rw-r--r--src/core/lib/iomgr/combiner.cc17
-rw-r--r--src/core/lib/iomgr/combiner.h2
-rw-r--r--src/core/lib/iomgr/error.cc16
-rw-r--r--src/core/lib/iomgr/error.h2
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc52
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc72
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc14
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc10
-rw-r--r--src/core/lib/iomgr/ev_posix.cc9
-rw-r--r--src/core/lib/iomgr/ev_posix.h2
-rw-r--r--src/core/lib/iomgr/ev_windows.cc4
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc4
-rw-r--r--src/core/lib/iomgr/executor.cc16
-rw-r--r--src/core/lib/iomgr/iomgr_posix.cc1
-rw-r--r--src/core/lib/iomgr/iomgr_uv.cc2
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc8
-rw-r--r--src/core/lib/iomgr/pollset.h2
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc5
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc5
-rw-r--r--src/core/lib/iomgr/resource_quota.cc21
-rw-r--r--src/core/lib/iomgr/resource_quota.h2
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc8
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc6
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc50
-rw-r--r--src/core/lib/iomgr/tcp_posix.h2
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.cc8
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc18
-rw-r--r--src/core/lib/iomgr/tcp_uv.h2
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc6
-rw-r--r--src/core/lib/iomgr/timer_generic.cc37
-rw-r--r--src/core/lib/iomgr/timer_manager.cc25
-rw-r--r--src/core/lib/iomgr/timer_uv.cc5
37 files changed, 222 insertions, 252 deletions
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index d45719608b..752303e260 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -24,8 +24,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
-grpc_tracer_flag grpc_call_combiner_trace =
- GRPC_TRACER_INITIALIZER(false, "call_combiner");
+grpc_core::TraceFlag grpc_call_combiner_trace(false, "call_combiner");
static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
if (cancel_state & 1) {
@@ -63,7 +62,7 @@ void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
grpc_error* error DEBUG_ARGS,
const char* reason) {
GPR_TIMER_BEGIN("call_combiner_start", 0);
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
@@ -72,7 +71,7 @@ void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
}
size_t prev_size =
(size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
@@ -80,13 +79,13 @@ void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
if (prev_size == 0) {
GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx);
GPR_TIMER_MARK("call_combiner_initiate", 0);
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
} else {
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
}
// Queue was not empty, so add closure to queue.
@@ -100,21 +99,21 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) {
GPR_TIMER_BEGIN("call_combiner_stop", 0);
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason);
}
size_t prev_size =
(size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) {
while (true) {
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " checking queue");
}
bool empty;
@@ -123,19 +122,19 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
if (closure == NULL) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue returned no result; checking again");
}
continue;
}
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
break;
}
- } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ } else if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue empty");
}
GPR_TIMER_END("call_combiner_stop", 0);
@@ -152,7 +151,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
@@ -163,7 +162,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
} else {
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) {
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
call_combiner, closure);
}
@@ -172,7 +171,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
// up any resources they may be holding for the callback.
if (original_state != 0) {
closure = (grpc_closure*)original_state;
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
"call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure);
@@ -201,7 +200,7 @@ void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
encode_cancel_state_error(error))) {
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
- if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel);
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 527f84fce0..77420fa3e0 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -40,7 +40,7 @@ extern "C" {
// when it is done with the action that was kicked off by the original
// callback.
-extern grpc_tracer_flag grpc_call_combiner_trace;
+extern grpc_core::TraceFlag grpc_call_combiner_trace;
typedef struct {
gpr_atm size; // size_t, num closures in queue or currently executing
diff --git a/src/core/lib/iomgr/closure.cc b/src/core/lib/iomgr/closure.cc
index 00edefc6ae..47429b4587 100644
--- a/src/core/lib/iomgr/closure.cc
+++ b/src/core/lib/iomgr/closure.cc
@@ -24,9 +24,7 @@
#include "src/core/lib/profiling/timers.h"
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_closure = GRPC_TRACER_INITIALIZER(false, "closure");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_closure(false, "closure");
#ifndef NDEBUG
grpc_closure *grpc_closure_init(const char *file, int line,
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index cd32a4ba38..66484a210f 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -34,7 +34,7 @@ struct grpc_closure;
typedef struct grpc_closure grpc_closure;
#ifndef NDEBUG
-extern grpc_tracer_flag grpc_trace_closure;
+extern grpc_core::TraceFlag grpc_trace_closure;
#endif
typedef struct grpc_closure_list {
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index 53f4b7eaa7..30e3b9111f 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -29,14 +29,13 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
-grpc_tracer_flag grpc_combiner_trace =
- GRPC_TRACER_INITIALIZER(false, "combiner");
-
-#define GRPC_COMBINER_TRACE(fn) \
- do { \
- if (GRPC_TRACER_ON(grpc_combiner_trace)) { \
- fn; \
- } \
+grpc_core::TraceFlag grpc_combiner_trace(false, "combiner");
+
+#define GRPC_COMBINER_TRACE(fn) \
+ do { \
+ if (grpc_combiner_trace.enabled()) { \
+ fn; \
+ } \
} while (0)
#define STATE_UNORPHANED 1
@@ -106,7 +105,7 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
- if (GRPC_TRACER_ON(grpc_combiner_trace)) { \
+ if (grpc_combiner_trace.enabled()) { \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
"C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
gpr_atm_no_barrier_load(&lock->refs.count), \
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 10e5fb480d..8eeb0df06a 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -65,7 +65,7 @@ grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock);
bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
-extern grpc_tracer_flag grpc_combiner_trace;
+extern grpc_core::TraceFlag grpc_combiner_trace;
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc
index 2ea6cf1301..e1e2c653be 100644
--- a/src/core/lib/iomgr/error.cc
+++ b/src/core/lib/iomgr/error.cc
@@ -37,10 +37,8 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_error_refcount =
- GRPC_TRACER_INITIALIZER(false, "error_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_error_refcount(false,
+ "error_refcount");
static const char *error_int_name(grpc_error_ints key) {
switch (key) {
@@ -130,7 +128,7 @@ bool grpc_error_is_special(grpc_error *err) {
#ifndef NDEBUG
grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return err;
- if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line);
@@ -183,7 +181,7 @@ static void error_destroy(grpc_error *err) {
#ifndef NDEBUG
void grpc_error_unref(grpc_error *err, const char *file, int line) {
if (grpc_error_is_special(err)) return;
- if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line);
@@ -216,7 +214,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
*err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (grpc_trace_error_refcount.enabled()) {
if (*err != orig) {
gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
}
@@ -329,7 +327,7 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
return GRPC_ERROR_OOM;
}
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
}
#endif
@@ -411,7 +409,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
out = (grpc_error *)gpr_malloc(sizeof(*in) +
new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
+ if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
}
#endif
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index b36330a7ab..d4050c1138 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -39,7 +39,7 @@ extern "C" {
typedef struct grpc_error grpc_error;
#ifndef NDEBUG
-extern grpc_tracer_flag grpc_trace_error_refcount;
+extern grpc_core::TraceFlag grpc_trace_error_refcount;
#endif
typedef enum {
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 6126e2771c..4bda75ebb7 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -274,7 +274,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
}
#endif
@@ -650,7 +650,7 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
}
@@ -672,7 +672,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
@@ -691,7 +691,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state),
is_reassigning);
@@ -743,7 +743,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
@@ -760,7 +760,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_exec_ctx_invalidate_now(exec_ctx);
}
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
@@ -805,7 +805,7 @@ static bool check_neighborhood_for_available_poller(
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
inspect_worker);
}
@@ -816,7 +816,7 @@ static bool check_neighborhood_for_available_poller(
gpr_cv_signal(&inspect_worker->cv);
}
} else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
}
}
@@ -834,7 +834,7 @@ static bool check_neighborhood_for_available_poller(
} while (!found_worker && inspect_worker != inspect->root_worker);
}
if (!found_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
@@ -856,7 +856,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
GPR_TIMER_BEGIN("end_worker", 0);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
}
if (worker_hdl != NULL) *worker_hdl = NULL;
@@ -866,7 +866,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->state == UNKICKED) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
@@ -920,7 +920,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) {
@@ -992,7 +992,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_TIMER_BEGIN("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error *ret_err = GRPC_ERROR_NONE;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_strvec log;
gpr_strvec_init(&log);
char *tmp;
@@ -1025,7 +1025,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (root_worker == NULL) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
}
goto done;
@@ -1033,14 +1033,14 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
@@ -1051,7 +1051,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
@@ -1059,7 +1059,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
goto done;
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv);
@@ -1068,7 +1068,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
goto done;
} else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->state != DESIGNATED_POLLER) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(
GPR_ERROR,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
@@ -1082,7 +1082,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
@@ -1098,7 +1098,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
goto done;
@@ -1108,14 +1108,14 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
if (specific_worker->state == KICKED) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
@@ -1123,7 +1123,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
@@ -1131,7 +1131,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
goto done;
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
@@ -1139,7 +1139,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index fa6d79cbfc..89a5a22957 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -57,10 +57,8 @@
#define MAX_EPOLL_EVENTS 100
#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_pollable_refcount =
- GRPC_TRACER_INITIALIZER(false, "pollable_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
+ "pollable_refcount");
/*******************************************************************************
* pollable Declarations
@@ -261,7 +259,7 @@ static gpr_mu fd_freelist_mu;
unref_by(ec, fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
@@ -295,7 +293,7 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
#ifndef NDEBUG
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
@@ -357,7 +355,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
}
#endif
@@ -481,7 +479,7 @@ static grpc_error *pollable_create(pollable_type type, pollable **p) {
static pollable *pollable_ref(pollable *p) {
#else
static pollable *pollable_ref(pollable *p, int line, const char *reason) {
- if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
+ if (grpc_trace_pollable_refcount.enabled()) {
int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
"POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason);
@@ -496,7 +494,7 @@ static void pollable_unref(pollable *p) {
#else
static void pollable_unref(pollable *p, int line, const char *reason) {
if (p == NULL) return;
- if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
+ if (grpc_trace_pollable_refcount.enabled()) {
int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
"POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason);
@@ -514,7 +512,7 @@ static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
static const char *err_desc = "pollable_add_fd";
const int epfd = p->epfd;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
@@ -556,7 +554,7 @@ static void pollset_global_shutdown(void) {
/* pollset->mu must be held while calling this function */
static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)",
@@ -580,14 +578,14 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
GPR_ASSERT(specific_worker != NULL);
if (specific_worker->kicked) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
}
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
@@ -596,7 +594,7 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
}
if (specific_worker == p->root_worker) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
}
specific_worker->kicked = true;
@@ -605,7 +603,7 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
}
if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
}
specific_worker->kicked = true;
@@ -619,7 +617,7 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
@@ -630,7 +628,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (pollset->root_worker == NULL) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
}
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
@@ -656,7 +654,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
exec_ctx, pollset->root_worker->links[PWLINK_POLLSET].next);
}
} else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
}
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
@@ -763,7 +761,7 @@ static grpc_error *pollable_process_events(grpc_exec_ctx *exec_ctx,
struct epoll_event *ev = &pollable_obj->events[n];
void *data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
@@ -775,7 +773,7 @@ static grpc_error *pollable_process_events(grpc_exec_ctx *exec_ctx,
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p got fd %p: cancel=%d read=%d "
"write=%d",
@@ -803,7 +801,7 @@ static grpc_error *pollable_epoll(grpc_exec_ctx *exec_ctx, pollable *p,
grpc_millis deadline) {
int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
char *desc = pollable_desc(p);
gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_free(desc);
@@ -823,7 +821,7 @@ static grpc_error *pollable_epoll(grpc_exec_ctx *exec_ctx, pollable *p,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "POLLABLE:%p got %d events", p, r);
}
@@ -891,7 +889,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
gpr_mu_unlock(&pollset->mu);
- if (GRPC_TRACER_ON(grpc_polling_trace) &&
+ if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker,
@@ -900,18 +898,18 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
} else if (worker->kicked) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
do_poll = false;
- } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
+ } else if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
@@ -982,7 +980,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
#ifndef NDEBUG
WORKER_PTR->originator = gettid();
#endif
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
" deadline=%" PRIdPTR " kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
@@ -1023,7 +1021,7 @@ static grpc_error *pollset_transition_pollable_from_empty_to_fd_locked(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) {
static const char *err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error *error = GRPC_ERROR_NONE;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
@@ -1039,7 +1037,7 @@ static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *and_add_fd) {
static const char *err_desc = "pollset_transition_pollable_from_fd_to_multi";
grpc_error *error = GRPC_ERROR_NONE;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
@@ -1189,7 +1187,7 @@ static void pollset_set_unref(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss) {
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
grpc_error *error = GRPC_ERROR_NONE;
@@ -1213,7 +1211,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
grpc_fd *fd) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
}
pss = pss_lock_adam(pss);
@@ -1234,7 +1232,7 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
}
pss = pss_lock_adam(pss);
@@ -1285,7 +1283,7 @@ static grpc_error *add_fds_to_pollsets(grpc_exec_ctx *exec_ctx, grpc_fd **fds,
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pss, grpc_pollset *ps) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
}
grpc_error *error = GRPC_ERROR_NONE;
@@ -1322,7 +1320,7 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *a,
grpc_pollset_set *b) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
}
grpc_error *error = GRPC_ERROR_NONE;
@@ -1356,7 +1354,7 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
if (b_size > a_size) {
GPR_SWAP(grpc_pollset_set *, a, b);
}
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
}
gpr_ref(&a->refs);
@@ -1453,10 +1451,6 @@ const grpc_event_engine_vtable *grpc_init_epollex_linux(
return NULL;
}
-#ifndef NDEBUG
- grpc_register_tracer(&grpc_trace_pollable_refcount);
-#endif
-
fd_global_init();
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 035bdc4cb5..ad7b27b279 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -53,9 +53,9 @@
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-#define GRPC_POLLING_TRACE(...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, __VA_ARGS__); \
+#define GRPC_POLLING_TRACE(...) \
+ if (grpc_polling_trace.enabled()) { \
+ gpr_log(GPR_INFO, __VA_ARGS__); \
}
static int grpc_wakeup_signal = -1;
@@ -288,7 +288,7 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
#ifndef NDEBUG
static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
@@ -299,7 +299,7 @@ static void pi_add_ref_dbg(polling_island *pi, const char *reason,
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
@@ -730,7 +730,7 @@ static gpr_mu fd_freelist_mu;
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
@@ -747,7 +747,7 @@ static void ref_by(grpc_fd *fd, int n) {
#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 036a35690c..ad4d0164bd 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -288,7 +288,7 @@ cv_fd_table g_cvfds;
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
@@ -305,7 +305,7 @@ static void ref_by(grpc_fd *fd, int n) {
#ifndef NDEBUG
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ if (grpc_trace_fd_refcount.enabled()) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
@@ -992,7 +992,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
}
@@ -1016,7 +1016,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
}
work_combine_error(
@@ -1026,7 +1026,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 677ee675a6..87fcfd2e04 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -36,12 +36,11 @@
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/support/env.h"
-grpc_tracer_flag grpc_polling_trace =
- GRPC_TRACER_INITIALIZER(false, "polling"); /* Disabled by default */
+grpc_core::TraceFlag grpc_polling_trace(false,
+ "polling"); /* Disabled by default */
#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_fd_refcount =
- GRPC_TRACER_INITIALIZER(false, "fd_refcount");
+grpc_core::TraceFlag grpc_trace_fd_refcount(false, "fd_refcount");
#endif
/** Default poll() function - a pointer so that it can be overridden by some
@@ -153,8 +152,6 @@ const grpc_event_engine_vtable *grpc_get_event_engine_test_only() {
const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
void grpc_event_engine_init(void) {
- grpc_register_tracer(&grpc_polling_trace);
-
char *s = gpr_getenv("GRPC_POLL_STRATEGY");
if (s == NULL) {
s = gpr_strdup("all");
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index bc4456c2a2..d4d6d98cae 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -31,7 +31,7 @@
extern "C" {
#endif
-extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
+extern grpc_core::TraceFlag grpc_polling_trace; /* Disabled by default */
typedef struct grpc_fd grpc_fd;
diff --git a/src/core/lib/iomgr/ev_windows.cc b/src/core/lib/iomgr/ev_windows.cc
index c24dfaeaf7..cb66de0a06 100644
--- a/src/core/lib/iomgr/ev_windows.cc
+++ b/src/core/lib/iomgr/ev_windows.cc
@@ -22,7 +22,7 @@
#include "src/core/lib/debug/trace.h"
-grpc_tracer_flag grpc_polling_trace =
- GRPC_TRACER_INITIALIZER(false, "polling"); /* Disabled by default */
+grpc_core::TraceFlag grpc_polling_trace(false,
+ "polling"); /* Disabled by default */
#endif // GRPC_WINSOCK_SOCKET
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 0394a00f3e..e4d956dbef 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -60,7 +60,7 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
#ifndef NDEBUG
closure->scheduled = false;
- if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]",
closure, closure->file_created, closure->line_created,
closure->run ? "run" : "scheduled", closure->file_initiated,
@@ -69,7 +69,7 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
#endif
closure->cb(exec_ctx, closure->cb_arg, error);
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_closure)) {
+ if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
}
#endif
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index 92c3e70301..59c7818347 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -51,8 +51,7 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
-static grpc_tracer_flag executor_trace =
- GRPC_TRACER_INITIALIZER(false, "executor");
+static grpc_core::TraceFlag executor_trace(false, "executor");
static void executor_thread(void *arg);
@@ -63,7 +62,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
@@ -134,7 +133,6 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
- grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@@ -152,7 +150,7 @@ static void executor_thread(void *arg) {
size_t subtract_depth = 0;
for (;;) {
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
(int)(ts - g_thread_state), subtract_depth);
}
@@ -163,7 +161,7 @@ static void executor_thread(void *arg) {
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
(int)(ts - g_thread_state));
}
@@ -174,7 +172,7 @@ static void executor_thread(void *arg) {
grpc_closure_list exec = ts->elems;
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
@@ -196,7 +194,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
retry_push = false;
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count == 0) {
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
@@ -217,7 +215,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
bool try_new_thread;
for (;;) {
- if (GRPC_TRACER_ON(executor_trace)) {
+ if (executor_trace.enabled()) {
#ifndef NDEBUG
gpr_log(
GPR_DEBUG,
diff --git a/src/core/lib/iomgr/iomgr_posix.cc b/src/core/lib/iomgr/iomgr_posix.cc
index f5875a247e..f8f6fe2353 100644
--- a/src/core/lib/iomgr/iomgr_posix.cc
+++ b/src/core/lib/iomgr/iomgr_posix.cc
@@ -28,7 +28,6 @@
void grpc_iomgr_platform_init(void) {
grpc_wakeup_fd_global_init();
grpc_event_engine_init();
- grpc_register_tracer(&grpc_tcp_trace);
}
void grpc_iomgr_platform_flush(void) {}
diff --git a/src/core/lib/iomgr/iomgr_uv.cc b/src/core/lib/iomgr/iomgr_uv.cc
index df5d23af3b..b8a10f2ae8 100644
--- a/src/core/lib/iomgr/iomgr_uv.cc
+++ b/src/core/lib/iomgr/iomgr_uv.cc
@@ -31,7 +31,7 @@ gpr_thd_id g_init_thread;
void grpc_iomgr_platform_init(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_global_init();
- grpc_register_tracer(&grpc_tcp_trace);
+
grpc_executor_set_threading(&exec_ctx, false);
g_init_thread = gpr_thd_currentid();
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index f967b22ba9..a4aeacccf9 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -22,7 +22,7 @@
#include "src/core/lib/debug/trace.h"
-extern grpc_tracer_flag grpc_polling_trace;
+extern grpc_core::TraceFlag grpc_polling_trace;
/* 'state' holds the to call when the fd is readable or writable respectively.
It can contain one of the following values:
@@ -82,7 +82,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure, const char *variable) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
state, (void *)curr, closure);
}
@@ -148,7 +148,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
(void *)curr, grpc_error_string(shutdown_err));
}
@@ -198,7 +198,7 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ if (grpc_polling_trace.enabled()) {
gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
(void *)curr);
}
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 799fae154c..d48f7f8574 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -30,7 +30,7 @@ extern "C" {
#endif
#ifndef NDEBUG
-extern grpc_tracer_flag grpc_trace_fd_refcount;
+extern grpc_core::TraceFlag grpc_trace_fd_refcount;
#endif
/* A grpc_pollset is a set of file descriptors that a higher level item is
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index b9901bf8ef..64e47e677e 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -34,10 +34,7 @@
#include "src/core/lib/debug/trace.h"
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_fd_refcount =
- GRPC_TRACER_INITIALIZER(false, "fd_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount(false, "fd_refcount");
struct grpc_pollset {
uv_timer_t timer;
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index bb4df83fc1..07bd586f29 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -30,10 +30,7 @@
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_fd_refcount =
- GRPC_TRACER_INITIALIZER(false, "fd_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount(false, "fd_refcount");
gpr_mu grpc_polling_mu;
static grpc_pollset_worker *g_active_poller;
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index ecb5747da8..28098ecd08 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -31,8 +31,7 @@
#include "src/core/lib/iomgr/combiner.h"
-grpc_tracer_flag grpc_resource_quota_trace =
- GRPC_TRACER_INITIALIZER(false, "resource_quota");
+grpc_core::TraceFlag grpc_resource_quota_trace(false, "resource_quota");
#define MEMORY_USAGE_ESTIMATION_MAX 65536
@@ -294,7 +293,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
@@ -319,13 +318,13 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
resource_user->free_pool = 0;
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
- } else if (GRPC_TRACER_ON(grpc_resource_quota_trace) &&
+ } else if (grpc_resource_quota_trace.enabled() &&
resource_user->free_pool >= 0) {
gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name);
@@ -356,7 +355,7 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_user->free_pool = 0;
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
@@ -379,7 +378,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
: GRPC_RULIST_RECLAIMER_BENIGN;
grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == NULL) return false;
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
resource_quota->name, resource_user->name,
destructive ? "destructive" : "benign");
@@ -513,7 +512,7 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
}
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
@@ -811,7 +810,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
ru_ref_by(resource_user, (gpr_atm)size);
resource_user->free_pool -= (int64_t)size;
resource_user->outstanding_allocations += (int64_t)size;
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
@@ -836,7 +835,7 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&resource_user->mu);
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += (int64_t)size;
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
@@ -865,7 +864,7 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
- if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 1d4249b7e2..fdebe89b86 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -65,7 +65,7 @@ extern "C" {
maintain lists of users (which users arrange to leave before they are
destroyed) */
-extern grpc_tracer_flag grpc_resource_quota_trace;
+extern grpc_core::TraceFlag grpc_resource_quota_trace;
grpc_resource_quota *grpc_resource_quota_ref_internal(
grpc_resource_quota *resource_quota);
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 5611dd9062..05c2adae20 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -43,7 +43,7 @@
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
-extern grpc_tracer_flag grpc_tcp_trace;
+extern grpc_core::TraceFlag grpc_tcp_trace;
typedef struct {
gpr_mu mu;
@@ -99,7 +99,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
async_connect *ac = (async_connect *)acp;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
@@ -137,7 +137,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
GRPC_ERROR_REF(error);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str, str);
@@ -317,7 +317,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str, fdobj);
}
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
index f3e9366299..b2aefc282e 100644
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -32,7 +32,7 @@
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/iomgr/timer.h"
-extern grpc_tracer_flag grpc_tcp_trace;
+extern grpc_core::TraceFlag grpc_tcp_trace;
typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
@@ -59,7 +59,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)acp;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
@@ -147,7 +147,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->connect_req.data = connect;
connect->refs = 2; // One for the connect operation, one for the timer.
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
connect->addr_name);
}
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index dbcc976ae9..3fa175df95 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -61,7 +61,7 @@ typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
typedef size_t msg_iovlen_type;
#endif
-grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp");
+grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
typedef struct {
grpc_endpoint base;
@@ -121,7 +121,7 @@ static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
@@ -131,7 +131,7 @@ static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
@@ -147,18 +147,18 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
gpr_mu_unlock(p->pollset_mu);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
}
grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
}
GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
@@ -169,7 +169,7 @@ static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
(int)old_count - 1);
}
@@ -180,14 +180,14 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p;
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
2 + (int)old_count);
}
if (old_count == 0) {
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
p = (backup_poller *)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
}
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
@@ -202,7 +202,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
// spin waiting for backup poller
}
}
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
}
grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
@@ -212,7 +212,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
@@ -221,7 +221,7 @@ static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
}
cover_self(exec_ctx, tcp);
@@ -233,7 +233,7 @@ static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
drop_uncovered(exec_ctx, (grpc_tcp *)arg);
@@ -310,7 +310,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
@@ -323,7 +323,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
@@ -354,7 +354,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_error *error) {
grpc_closure *cb = tcp->read_cb;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
@@ -450,7 +450,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)tcpp;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
@@ -469,13 +469,13 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
}
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
}
tcp_do_read(exec_ctx, tcp);
@@ -486,7 +486,7 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
}
@@ -624,14 +624,14 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
if (!tcp_flush(exec_ctx, tcp, &error)) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
}
notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
@@ -646,7 +646,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_error *error = GRPC_ERROR_NONE;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
size_t i;
for (i = 0; i < buf->count; i++) {
@@ -677,12 +677,12 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (!tcp_flush(exec_ctx, tcp, &error)) {
TCP_REF(tcp, "write");
tcp->write_cb = cb;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed");
}
notify_on_write(exec_ctx, tcp);
} else {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 47e78fa67e..d9963c2f2f 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -37,7 +37,7 @@
extern "C" {
#endif
-extern grpc_tracer_flag grpc_tcp_trace;
+extern grpc_core::TraceFlag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 06612d639c..777b96eaac 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -242,7 +242,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
addr_str = grpc_sockaddr_to_uri(&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc
index 348838c495..2880fe2250 100644
--- a/src/core/lib/iomgr/tcp_server_uv.cc
+++ b/src/core/lib/iomgr/tcp_server_uv.cc
@@ -213,7 +213,7 @@ static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) {
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(err));
}
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
if (peer_name_string) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string);
@@ -247,7 +247,7 @@ static void on_connect(uv_stream_t *server, int status) {
GPR_ASSERT(!sp->has_pending_connection);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server);
}
@@ -403,7 +403,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
gpr_free(allocated_addr);
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
char *port_string;
grpc_sockaddr_to_string(&port_string, addr, 0);
const char *str = grpc_error_string(error);
@@ -435,7 +435,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
(void)pollsets;
(void)pollset_count;
GRPC_UV_ASSERT_SAME_THREAD();
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_START %p", server);
}
GPR_ASSERT(on_accept_cb);
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index e311964dbc..b245ce4ecf 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -38,7 +38,7 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
-grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp");
+grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
typedef struct {
grpc_endpoint base;
@@ -79,7 +79,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
@@ -92,7 +92,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
@@ -158,7 +158,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
grpc_slice_buffer_add(tcp->read_slices, sub);
tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user);
error = GRPC_ERROR_NONE;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -200,7 +200,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_from_static_string(uv_strerror(status)));
GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
}
@@ -218,7 +218,7 @@ static void write_callback(uv_write_t *req, int status) {
} else {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
}
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
@@ -240,7 +240,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
uv_write_t *write_req;
GRPC_UV_ASSERT_SAME_THREAD();
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
size_t j;
for (j = 0; j < write_slices->count; j++) {
@@ -310,7 +310,7 @@ static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *why) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
const char *str = grpc_error_string(why);
gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str);
}
@@ -351,7 +351,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
}
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index 3399535b42..3d4afe0fb8 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -34,7 +34,7 @@
#include <uv.h>
-extern grpc_tracer_flag grpc_tcp_trace;
+extern grpc_core::TraceFlag grpc_tcp_trace;
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index dc84e564a9..99304bb764 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -49,7 +49,7 @@
#define GRPC_FIONBIO FIONBIO
#endif
-grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp");
+grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
static grpc_error *set_non_block(SOCKET sock) {
int status;
@@ -124,7 +124,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
@@ -137,7 +137,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
int line) {
- if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ if (grpc_tcp_trace.enabled()) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index b8e895de6f..1735aeb19f 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -44,9 +44,8 @@
#define MAX_QUEUE_WINDOW_DURATION 1
extern "C" {
-grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer");
-grpc_tracer_flag grpc_timer_check_trace =
- GRPC_TRACER_INITIALIZER(false, "timer_check");
+grpc_core::TraceFlag grpc_timer_trace(false, "timer");
+grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check");
}
/* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
@@ -247,8 +246,6 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
- grpc_register_tracer(&grpc_timer_trace);
- grpc_register_tracer(&grpc_timer_check_trace);
for (i = 0; i < NUM_SHARDS; i++) {
timer_shard *shard = &g_shards[i];
@@ -331,7 +328,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->hash_table_next = NULL;
#endif
- if (GRPC_TRACER_ON(grpc_timer_trace)) {
+ if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG,
"TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
@@ -367,7 +364,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
timer->heap_index = INVALID_HEAP_INDEX;
list_join(&shard->list, timer);
}
- if (GRPC_TRACER_ON(grpc_timer_trace)) {
+ if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s",
(int)(shard - g_shards), shard->queue_deadline_cap,
@@ -388,7 +385,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
grpc_timer_check. */
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
- if (GRPC_TRACER_ON(grpc_timer_trace)) {
+ if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
shard->min_deadline);
}
@@ -418,7 +415,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
- if (GRPC_TRACER_ON(grpc_timer_trace)) {
+ if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
@@ -459,7 +456,7 @@ static int refill_heap(timer_shard *shard, gpr_atm now) {
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
(gpr_atm)(deadline_delta * 1000.0));
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
(int)(shard - g_shards), shard->queue_deadline_cap);
}
@@ -467,7 +464,7 @@ static int refill_heap(timer_shard *shard, gpr_atm now) {
next = timer->next;
if (timer->deadline < shard->queue_deadline_cap) {
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
timer->deadline);
}
@@ -484,7 +481,7 @@ static int refill_heap(timer_shard *shard, gpr_atm now) {
static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
grpc_timer *timer;
for (;;) {
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
(int)(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
@@ -494,13 +491,13 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
if (!refill_heap(shard, now)) return NULL;
}
timer = grpc_timer_heap_top(&shard->heap);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
" .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
timer->deadline, now);
}
if (timer->deadline > now) return NULL;
- if (GRPC_TRACER_ON(grpc_timer_trace)) {
+ if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
timer, now - timer->deadline,
timer->closure->scheduler->vtable->name);
@@ -525,7 +522,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
(int)(shard - g_shards), n);
}
@@ -549,7 +546,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&g_shared_mutables.mu);
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
(int)(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
@@ -567,7 +564,7 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
result = GRPC_TIMERS_FIRED;
}
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
" .. result --> %d"
", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
@@ -612,7 +609,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
if (next != NULL) {
*next = GPR_MIN(*next, min_timer);
}
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
"TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
min_timer);
@@ -626,7 +623,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
: GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
// tracing
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
char *next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
@@ -643,7 +640,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
grpc_timer_check_result r =
run_some_expired_timers(exec_ctx, now, next, shutdown_error);
// tracing
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
char *next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 1248f82189..9f2dcb52d3 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -33,7 +33,7 @@ typedef struct completed_thread {
struct completed_thread *next;
} completed_thread;
-extern "C" grpc_tracer_flag grpc_timer_check_trace;
+extern grpc_core::TraceFlag grpc_timer_check_trace;
// global mutex
static gpr_mu g_mu;
@@ -81,7 +81,7 @@ static void start_timer_thread_and_unlock(void) {
++g_waiter_count;
++g_thread_count;
gpr_mu_unlock(&g_mu);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "Spawn timer thread");
}
gpr_thd_options opt = gpr_thd_options_default();
@@ -115,7 +115,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
// if there's no thread waiting with a timeout, kick an existing
// waiter so that the next deadline is not missed
if (!g_has_timed_waiter) {
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "kick untimed waiter");
}
gpr_cv_signal(&g_cv_wait);
@@ -123,7 +123,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
gpr_mu_unlock(&g_mu);
}
// without our lock, flush the exec_ctx
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "flush exec_ctx");
}
grpc_exec_ctx_flush(exec_ctx);
@@ -178,7 +178,7 @@ static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
g_has_timed_waiter = true;
g_timed_waiter_deadline = next;
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
wait_time);
@@ -188,15 +188,14 @@ static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
}
}
- if (GRPC_TRACER_ON(grpc_timer_check_trace) &&
- next == GRPC_MILLIS_INF_FUTURE) {
+ if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) {
gpr_log(GPR_DEBUG, "sleep until kicked");
}
gpr_cv_wait(&g_cv_wait, &g_mu,
grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME));
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked);
@@ -240,7 +239,7 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
Consequently, we can just sleep forever here and be happy at some
saved wakeup cycles. */
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
}
next = GRPC_MILLIS_INF_FUTURE;
@@ -266,7 +265,7 @@ static void timer_thread_cleanup(completed_thread *ct) {
ct->next = g_completed_threads;
g_completed_threads = ct;
gpr_mu_unlock(&g_mu);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "End timer thread");
}
}
@@ -309,18 +308,18 @@ void grpc_timer_manager_init(void) {
static void stop_threads(void) {
gpr_mu_lock(&g_mu);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded);
}
if (g_threaded) {
g_threaded = false;
gpr_cv_broadcast(&g_cv_wait);
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
}
while (g_thread_count > 0) {
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
- if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
}
gc_completed_threads();
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index ccbbe357ae..c4d46ff48f 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -30,9 +30,8 @@
#include <uv.h>
extern "C" {
-grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer");
-grpc_tracer_flag grpc_timer_check_trace =
- GRPC_TRACER_INITIALIZER(false, "timer_check");
+grpc_core::TraceFlag grpc_timer_trace(false, "timer");
+grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check");
}
static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); }