aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-05-03 13:06:35 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-05-03 13:06:35 -0700
commit84f75d448e2eb9c76301598cfaf8630603ef4a98 (patch)
tree150ff5087af2e10cab8565551084a4ce6e433d7e /src/core/ext/filters/client_channel
parent4d03ffbdc63ada0ff7ef05f036d5b53fc18c0a44 (diff)
Fix ASAN/TSAN failures
- trace system is now thread safe when run with TSAN - fix a race in client_auth_filter.c - allow timer manager to run in single threaded mode for fuzzers
Diffstat (limited to 'src/core/ext/filters/client_channel')
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.c2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c40
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c22
3 files changed, 32 insertions, 32 deletions
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c
index 62f58fb278..f83670db82 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.c
@@ -132,7 +132,7 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
gpr_mu_lock(&w->mu);
if (due_to_completion) {
- if (grpc_trace_operation_failures) {
+ if (GRPC_TRACER_ON(grpc_trace_operation_failures)) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index 37468101f0..a15476dc23 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -138,7 +138,7 @@
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
-int grpc_lb_glb_trace = 0;
+grpc_tracer_flag grpc_lb_glb_trace;
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
@@ -224,7 +224,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
} else {
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
@@ -575,7 +575,7 @@ static bool update_lb_connectivity_status_locked(
GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
}
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Setting grpclb's state to %s from new RR policy %p state.",
grpc_connectivity_state_name(new_rr_state),
@@ -600,7 +600,7 @@ static bool pick_from_internal_rr_locked(
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
@@ -686,7 +686,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
if (!replace_old_rr) {
/* dispose of the new RR policy that won't be used after all */
GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Keeping old RR policy (%p) despite new serverlist: new RR "
"policy was in %s connectivity state.",
@@ -696,7 +696,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
return;
}
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
(void *)new_rr_policy, (void *)glb_policy->rr_policy);
}
@@ -741,7 +741,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
pp->wrapped_on_complete_arg.client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
@@ -755,7 +755,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
@@ -908,7 +908,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
glb_policy->server_name);
}
@@ -1093,7 +1093,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
bool pick_done;
if (glb_policy->rr_policy != NULL) {
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
(void *)glb_policy, (void *)glb_policy->rr_policy);
}
@@ -1116,7 +1116,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
"picks",
@@ -1347,7 +1347,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
lb_call_init_locked(exec_ctx, glb_policy);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
(void *)glb_policy, (void *)glb_policy->lb_call);
}
@@ -1453,7 +1453,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
grpc_grpclb_duration_to_timespec(
&response->client_stats_report_interval));
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
"client load reporting interval = %" PRId64 ".%09d sec",
@@ -1466,7 +1466,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_policy->client_load_report_timer_pending = true;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(exec_ctx, glb_policy);
- } else if (grpc_lb_glb_trace) {
+ } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
"client load reporting NOT enabled");
@@ -1478,7 +1478,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
@@ -1495,7 +1495,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (serverlist->num_servers > 0) {
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
serverlist)) {
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
@@ -1513,7 +1513,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
rr_handover_locked(exec_ctx, glb_policy);
}
} else {
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Received empty server list. Picks will stay pending until "
"a response with > 0 servers is received");
@@ -1555,7 +1555,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
glb_lb_policy *glb_policy = arg;
if (!glb_policy->shutting_down) {
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
}
@@ -1572,7 +1572,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_DEBUG,
@@ -1591,7 +1591,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
- if (grpc_lb_glb_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index 4c17f9c082..075b7caf5c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -74,7 +74,7 @@
typedef struct round_robin_lb_policy round_robin_lb_policy;
-int grpc_lb_round_robin_trace = 0;
+grpc_tracer_flag grpc_lb_round_robin_trace;
/** List of entities waiting for a pick.
*
@@ -198,7 +198,7 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
}
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
"CSC %p)",
@@ -228,7 +228,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev->next = new_elem;
p->ready_list.prev = new_elem;
}
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (Conn. SC %p)",
(void *)new_elem, (void *)sd->subchannel);
}
@@ -256,7 +256,7 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
node->next->prev = node->prev;
}
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", (void *)node,
(void *)node->subchannel);
}
@@ -276,7 +276,7 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
}
@@ -312,7 +312,7 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pending_pick *pp;
size_t i;
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
@@ -421,7 +421,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *pp;
ready_list *selected;
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
}
@@ -434,7 +434,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (user_data != NULL) {
*user_data = selected->user_data;
}
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
(void *)*target, (void *)selected);
@@ -566,7 +566,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
@@ -724,7 +724,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
char *address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "Created subchannel %p for address uri %s",
@@ -768,7 +768,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
- if (grpc_lb_round_robin_trace) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}