aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters')
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc2
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc291
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h2
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc270
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc93
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc94
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc69
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc10
-rw-r--r--src/core/ext/filters/client_channel/resolver.h4
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc2
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.cc1
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc4
16 files changed, 418 insertions, 444 deletions
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index d3627a237f..7eaf5d98cd 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -122,7 +122,7 @@ static void partly_done(grpc_exec_ctx* exec_ctx, state_watcher* w,
gpr_mu_lock(&w->mu);
if (due_to_completion) {
- if (GRPC_TRACER_ON(grpc_trace_operation_failures)) {
+ if (grpc_trace_operation_failures.enabled()) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 8b8d512432..a4936120de 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -56,8 +56,7 @@
/* Client channel implementation */
-grpc_tracer_flag grpc_client_channel_trace =
- GRPC_TRACER_INITIALIZER(false, "client_channel");
+grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
/*************************************************************************
* METHOD-CONFIG TABLE
@@ -115,7 +114,7 @@ static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
buf[len - 1] = '\0'; // Remove trailing 's'.
char* decimal_point = strchr(buf, '.');
int nanos = 0;
- if (decimal_point != nullptr) {
+ if (decimal_point != NULL) {
*decimal_point = '\0';
nanos = gpr_parse_nonnegative_int(decimal_point + 1);
if (nanos == -1) {
@@ -141,14 +140,14 @@ static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
static void* method_parameters_create_from_json(const grpc_json* json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
grpc_millis timeout = 0;
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) continue;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
- if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
- if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
+ if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
+ if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
- if (timeout > 0) return nullptr; // Duplicate.
- if (!parse_timeout(field, &timeout)) return nullptr;
+ if (timeout > 0) return NULL; // Duplicate.
+ if (!parse_timeout(field, &timeout)) return NULL;
}
}
method_parameters* value =
@@ -234,7 +233,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
* - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
* - Hand over pending picks from old policies during the switch that happens
* when resolver provides an update. */
- if (chand->lb_policy != nullptr) {
+ if (chand->lb_policy != NULL) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* cancel picks with wait_for_ready=false */
grpc_lb_policy_cancel_picks_locked(
@@ -248,7 +247,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
GRPC_ERROR_REF(error));
}
}
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
grpc_connectivity_state_name(state));
}
@@ -262,16 +261,15 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state));
}
- if (publish_state == GRPC_CHANNEL_SHUTDOWN &&
- w->chand->resolver != nullptr) {
+ if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
- w->chand->lb_policy = nullptr;
+ w->chand->lb_policy = NULL;
}
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
GRPC_ERROR_REF(error), "lb_changed");
@@ -300,7 +298,7 @@ static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
static void start_resolving_locked(grpc_exec_ctx* exec_ctx,
channel_data* chand) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
}
GPR_ASSERT(!chand->started_resolving);
@@ -319,13 +317,13 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
service_config_parsing_state* parsing_state =
(service_config_parsing_state*)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
- if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
+ if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
int max_milli_tokens = 0;
int milli_token_ratio = 0;
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ for (grpc_json* sub_field = field->child; sub_field != NULL;
sub_field = sub_field->next) {
- if (sub_field->key == nullptr) return;
+ if (sub_field->key == NULL) return;
if (strcmp(sub_field->key, "maxTokens") == 0) {
if (max_milli_tokens != 0) return; // Duplicate.
if (sub_field->type != GRPC_JSON_NUMBER) return;
@@ -340,7 +338,7 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
const char* decimal_point = strchr(sub_field->value, '.');
- if (decimal_point != nullptr) {
+ if (decimal_point != NULL) {
whole_len = (size_t)(decimal_point - sub_field->value);
multiplier = 1000;
size_t decimal_len = strlen(decimal_point + 1);
@@ -373,24 +371,24 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
channel_data* chand = (channel_data*)arg;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
- char* lb_policy_name_dup = nullptr;
+ char* lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
- grpc_lb_policy* new_lb_policy = nullptr;
- char* service_config_json = nullptr;
- grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
- grpc_slice_hash_table* method_params_table = nullptr;
- if (chand->resolver_result != nullptr) {
+ grpc_lb_policy* new_lb_policy = NULL;
+ char* service_config_json = NULL;
+ grpc_server_retry_throttle_data* retry_throttle_data = NULL;
+ grpc_slice_hash_table* method_params_table = NULL;
+ if (chand->resolver_result != NULL) {
// Find LB policy name.
- const char* lb_policy_name = nullptr;
+ const char* lb_policy_name = NULL;
const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
- if (channel_arg != nullptr) {
+ if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
lb_policy_name = channel_arg->value.string;
}
@@ -398,7 +396,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
// the grpclb policy, regardless of what the resolver actually specified.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
+ if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses* addresses =
(grpc_lb_addresses*)channel_arg->value.pointer.p;
bool found_balancer_address = false;
@@ -409,8 +407,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
}
}
if (found_balancer_address) {
- if (lb_policy_name != nullptr &&
- strcmp(lb_policy_name, "grpclb") != 0) {
+ if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
@@ -421,7 +418,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
- if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
+ if (lb_policy_name == NULL) lb_policy_name = "pick_first";
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
@@ -432,9 +429,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
// only thing that modifies its value, and it can only be invoked
// once at any given time.
lb_policy_name_changed =
- chand->info_lb_policy_name == nullptr ||
+ chand->info_lb_policy_name == NULL ||
strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
- if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
+ if (chand->lb_policy != NULL && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
lb_policy_updated = true;
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
@@ -442,22 +439,22 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
// Instantiate new LB policy.
new_lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
- if (new_lb_policy == nullptr) {
+ if (new_lb_policy == NULL) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
}
}
// Find service config.
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
- if (channel_arg != nullptr) {
+ if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config_json = gpr_strdup(channel_arg->value.string);
grpc_service_config* service_config =
grpc_service_config_create(service_config_json);
- if (service_config != nullptr) {
+ if (service_config != NULL) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
- GPR_ASSERT(channel_arg != nullptr);
+ GPR_ASSERT(channel_arg != NULL);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
grpc_uri* uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
@@ -481,9 +478,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
// The copy will be saved in chand->lb_policy_name below.
lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
- chand->resolver_result = nullptr;
+ chand->resolver_result = NULL;
}
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
@@ -496,22 +493,22 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
//
// First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
- if (lb_policy_name_dup != nullptr) {
+ if (lb_policy_name_dup != NULL) {
gpr_free(chand->info_lb_policy_name);
chand->info_lb_policy_name = lb_policy_name_dup;
}
- if (service_config_json != nullptr) {
+ if (service_config_json != NULL) {
gpr_free(chand->info_service_config_json);
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
// Swap out the retry throttle data.
- if (chand->retry_throttle_data != nullptr) {
+ if (chand->retry_throttle_data != NULL) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
chand->retry_throttle_data = retry_throttle_data;
// Swap out the method params table.
- if (chand->method_params_table != nullptr) {
+ if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
chand->method_params_table = method_params_table;
@@ -521,10 +518,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
// Note that we do NOT do this if either (a) we updated the existing
// LB policy above or (b) we failed to create the new LB policy (in
// which case we want to continue using the most recent one we had).
- if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
- chand->resolver == nullptr) {
- if (chand->lb_policy != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (new_lb_policy != NULL || error != GRPC_ERROR_NONE ||
+ chand->resolver == NULL) {
+ if (chand->lb_policy != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
chand->lb_policy);
}
@@ -537,17 +534,17 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
}
// Now that we've swapped out the relevant fields of chand, check for
// error or shutdown.
- if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (error != GRPC_ERROR_NONE || chand->resolver == NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
}
- if (chand->resolver != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (chand->resolver != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
}
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
- chand->resolver = nullptr;
+ chand->resolver = NULL;
}
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
@@ -564,8 +561,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
- if (new_lb_policy != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (new_lb_policy != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
}
GRPC_ERROR_UNREF(state_error);
@@ -601,46 +598,46 @@ static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
(grpc_channel_element*)op->handler_private.extra_arg;
channel_data* chand = (channel_data*)elem->channel_data;
- if (op->on_connectivity_state_change != nullptr) {
+ if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &chand->state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
- op->on_connectivity_state_change = nullptr;
- op->connectivity_state = nullptr;
+ op->on_connectivity_state_change = NULL;
+ op->connectivity_state = NULL;
}
- if (op->send_ping != nullptr) {
- if (chand->lb_policy == nullptr) {
+ if (op->send_ping != NULL) {
+ if (chand->lb_policy == NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, op->send_ping,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
- op->bind_pollset = nullptr;
+ op->bind_pollset = NULL;
}
- op->send_ping = nullptr;
+ op->send_ping = NULL;
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
- if (chand->resolver != nullptr) {
+ if (chand->resolver != NULL) {
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
- chand->resolver = nullptr;
+ chand->resolver = NULL;
if (!chand->started_resolving) {
grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
GRPC_ERROR_REF(op->disconnect_with_error));
GRPC_CLOSURE_LIST_SCHED(exec_ctx,
&chand->waiting_for_resolver_result_closures);
}
- if (chand->lb_policy != nullptr) {
+ if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
- chand->lb_policy = nullptr;
+ chand->lb_policy = NULL;
}
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
@@ -656,7 +653,7 @@ static void cc_start_transport_op(grpc_exec_ctx* exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
- if (op->bind_pollset != nullptr) {
+ if (op->bind_pollset != NULL) {
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
op->bind_pollset);
}
@@ -675,15 +672,15 @@ static void cc_get_channel_info(grpc_exec_ctx* exec_ctx,
const grpc_channel_info* info) {
channel_data* chand = (channel_data*)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
- if (info->lb_policy_name != nullptr) {
- *info->lb_policy_name = chand->info_lb_policy_name == nullptr
- ? nullptr
+ if (info->lb_policy_name != NULL) {
+ *info->lb_policy_name = chand->info_lb_policy_name == NULL
+ ? NULL
: gpr_strdup(chand->info_lb_policy_name);
}
- if (info->service_config_json != nullptr) {
+ if (info->service_config_json != NULL) {
*info->service_config_json =
- chand->info_service_config_json == nullptr
- ? nullptr
+ chand->info_service_config_json == NULL
+ ? NULL
: gpr_strdup(chand->info_service_config_json);
}
gpr_mu_unlock(&chand->info_mu);
@@ -702,7 +699,7 @@ static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
- chand->external_connectivity_watcher_list_head = nullptr;
+ chand->external_connectivity_watcher_list_head = NULL;
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
chand->owning_stack = args->channel_stack;
@@ -716,7 +713,7 @@ static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
// Record client channel factory.
const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
- if (arg == nullptr) {
+ if (arg == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing client channel factory in args for client channel filter");
}
@@ -730,7 +727,7 @@ static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
(grpc_client_channel_factory*)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
- if (arg == nullptr) {
+ if (arg == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing server uri in args for client channel filter");
}
@@ -738,18 +735,18 @@ static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"server uri arg must be a string");
}
- char* proxy_name = nullptr;
- grpc_channel_args* new_args = nullptr;
+ char* proxy_name = NULL;
+ grpc_channel_args* new_args = NULL;
grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
&proxy_name, &new_args);
// Instantiate resolver.
chand->resolver = grpc_resolver_create(
- exec_ctx, proxy_name != nullptr ? proxy_name : arg->value.string,
- new_args != nullptr ? new_args : args->channel_args,
+ exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
+ new_args != NULL ? new_args : args->channel_args,
chand->interested_parties, chand->combiner);
- if (proxy_name != nullptr) gpr_free(proxy_name);
- if (new_args != nullptr) grpc_channel_args_destroy(exec_ctx, new_args);
- if (chand->resolver == nullptr) {
+ if (proxy_name != NULL) gpr_free(proxy_name);
+ if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
+ if (chand->resolver == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
chand->deadline_checking_enabled =
@@ -768,17 +765,17 @@ static void shutdown_resolver_locked(grpc_exec_ctx* exec_ctx, void* arg,
static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
- if (chand->resolver != nullptr) {
+ if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
- if (chand->client_channel_factory != nullptr) {
+ if (chand->client_channel_factory != NULL) {
grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
}
- if (chand->lb_policy != nullptr) {
+ if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
@@ -786,10 +783,10 @@ static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
}
gpr_free(chand->info_lb_policy_name);
gpr_free(chand->info_service_config_json);
- if (chand->retry_throttle_data != nullptr) {
+ if (chand->retry_throttle_data != NULL) {
grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
}
- if (chand->method_params_table != nullptr) {
+ if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
@@ -872,7 +869,7 @@ grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
static void waiting_for_pick_batches_add(
call_data* calld, grpc_transport_stream_op_batch* batch) {
if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->initial_metadata_batch == nullptr);
+ GPR_ASSERT(calld->initial_metadata_batch == NULL);
calld->initial_metadata_batch = batch;
} else {
GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
@@ -899,7 +896,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
@@ -914,7 +911,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
GRPC_ERROR_REF(error),
"waiting_for_pick_batches_fail");
}
- if (calld->initial_metadata_batch != nullptr) {
+ if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
calld->call_combiner);
@@ -942,7 +939,7 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
@@ -958,7 +955,7 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
GRPC_ERROR_NONE,
"waiting_for_pick_batches_resume");
}
- GPR_ASSERT(calld->initial_metadata_batch != nullptr);
+ GPR_ASSERT(calld->initial_metadata_batch != NULL);
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
calld->initial_metadata_batch);
}
@@ -969,18 +966,18 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
}
- if (chand->retry_throttle_data != nullptr) {
+ if (chand->retry_throttle_data != NULL) {
calld->retry_throttle_data =
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
- if (chand->method_params_table != nullptr) {
+ if (chand->method_params_table != NULL) {
calld->method_params = (method_parameters*)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
- if (calld->method_params != nullptr) {
+ if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
@@ -1015,7 +1012,7 @@ static void create_subchannel_call_locked(grpc_exec_ctx* exec_ctx,
grpc_error* new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args,
&calld->subchannel_call);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
@@ -1033,7 +1030,7 @@ static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_error* error) {
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- if (calld->connected_subchannel == nullptr) {
+ if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
calld->error = error == GRPC_ERROR_NONE
@@ -1041,7 +1038,7 @@ static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
"Call dropped by load balancing policy")
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
calld, grpc_error_string(calld->error));
@@ -1074,8 +1071,8 @@ static void pick_callback_cancel_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (calld->lb_policy != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (calld->lb_policy != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, calld->lb_policy);
}
@@ -1093,13 +1090,13 @@ static void pick_callback_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_call_element* elem = (grpc_call_element*)arg;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
- GPR_ASSERT(calld->lb_policy != nullptr);
+ GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
+ calld->lb_policy = NULL;
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
@@ -1110,7 +1107,7 @@ static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
@@ -1125,7 +1122,7 @@ static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
const bool wait_for_ready_set_from_service_config =
- calld->method_params != nullptr &&
+ calld->method_params != NULL &&
calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
@@ -1145,15 +1142,15 @@ static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
- calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
+ calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
chand, calld);
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
- calld->lb_policy = nullptr;
+ calld->lb_policy = NULL;
} else {
GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
grpc_call_combiner_set_notify_on_cancel(
@@ -1193,7 +1190,7 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem = args->elem;
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
chand, calld);
@@ -1217,7 +1214,7 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
gpr_free(args);
@@ -1228,13 +1225,13 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else if (chand->lb_policy != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ } else if (chand->lb_policy != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
@@ -1254,9 +1251,9 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
// removed in https://github.com/grpc/grpc/pull/12297. Need to figure
// out what is actually causing this to occur and then figure out the
// right way to deal with it.
- else if (chand->resolver != nullptr) {
+ else if (chand->resolver != NULL) {
// No LB policy, so try again.
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: resolver returned but no LB policy, "
"trying again",
@@ -1264,7 +1261,7 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
}
pick_after_resolver_result_start_locked(exec_ctx, elem);
} else {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
calld);
}
@@ -1277,7 +1274,7 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
@@ -1301,8 +1298,8 @@ static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
channel_data* chand = (channel_data*)elem->channel_data;
- GPR_ASSERT(calld->connected_subchannel == nullptr);
- if (chand->lb_policy != nullptr) {
+ GPR_ASSERT(calld->connected_subchannel == NULL);
+ if (chand->lb_policy != NULL) {
// We already have an LB policy, so ask it for a pick.
if (pick_callback_start_locked(exec_ctx, elem)) {
// Pick completed synchronously.
@@ -1311,7 +1308,7 @@ static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
}
} else {
// We do not yet have an LB policy, so wait for a resolver result.
- if (chand->resolver == nullptr) {
+ if (chand->resolver == NULL) {
pick_done_locked(exec_ctx, elem,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
return;
@@ -1333,7 +1330,7 @@ static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
call_data* calld = (call_data*)elem->call_data;
- if (calld->retry_throttle_data != nullptr) {
+ if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
calld->retry_throttle_data);
@@ -1362,7 +1359,7 @@ static void cc_start_transport_stream_op_batch(
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
// If we've previously been cancelled, immediately fail any new batches.
if (calld->error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->error));
}
@@ -1378,13 +1375,13 @@ static void cc_start_transport_stream_op_batch(
// error to the caller when the first batch does get passed down.
GRPC_ERROR_UNREF(calld->error);
calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->error));
}
// If we have a subchannel call, send the cancellation batch down.
// Otherwise, fail all pending batches.
- if (calld->subchannel_call != nullptr) {
+ if (calld->subchannel_call != NULL) {
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
} else {
waiting_for_pick_batches_add(calld, batch);
@@ -1396,7 +1393,7 @@ static void cc_start_transport_stream_op_batch(
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (batch->recv_trailing_metadata) {
- GPR_ASSERT(batch->on_complete != nullptr);
+ GPR_ASSERT(batch->on_complete != NULL);
calld->original_on_complete = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
grpc_schedule_on_exec_ctx);
@@ -1406,8 +1403,8 @@ static void cc_start_transport_stream_op_batch(
// Note that once we have completed the pick, we do not need to enter
// the channel combiner, which is more efficient (especially for
// streaming calls).
- if (calld->subchannel_call != nullptr) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (calld->subchannel_call != NULL) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
calld, calld->subchannel_call);
@@ -1421,7 +1418,7 @@ static void cc_start_transport_stream_op_batch(
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
if (batch->send_initial_metadata) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
chand, calld);
}
@@ -1432,7 +1429,7 @@ static void cc_start_transport_stream_op_batch(
GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: saved batch, yeilding call combiner", chand,
calld);
@@ -1475,25 +1472,25 @@ static void cc_destroy_call_elem(grpc_exec_ctx* exec_ctx,
grpc_deadline_state_destroy(exec_ctx, elem);
}
grpc_slice_unref_internal(exec_ctx, calld->path);
- if (calld->method_params != nullptr) {
+ if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
GRPC_ERROR_UNREF(calld->error);
- if (calld->subchannel_call != nullptr) {
+ if (calld->subchannel_call != NULL) {
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
- then_schedule_closure = nullptr;
+ then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
"client_channel_destroy_call");
}
- GPR_ASSERT(calld->lb_policy == nullptr);
+ GPR_ASSERT(calld->lb_policy == NULL);
GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
- if (calld->connected_subchannel != nullptr) {
+ if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
"picked");
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
- if (calld->subchannel_call_context[i].value != nullptr) {
+ if (calld->subchannel_call_context[i].value != NULL) {
calld->subchannel_call_context[i].destroy(
calld->subchannel_call_context[i].value);
}
@@ -1529,11 +1526,11 @@ const grpc_channel_filter grpc_client_channel_filter = {
static void try_to_connect_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
channel_data* chand = (channel_data*)arg;
- if (chand->lb_policy != nullptr) {
+ if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
chand->exit_idle_when_lb_policy_arrives = true;
- if (!chand->started_resolving && chand->resolver != nullptr) {
+ if (!chand->started_resolving && chand->resolver != NULL) {
start_resolving_locked(exec_ctx, chand);
}
}
@@ -1571,7 +1568,7 @@ static external_connectivity_watcher* lookup_external_connectivity_watcher(
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
- while (w != nullptr && w->on_complete != on_complete) {
+ while (w != NULL && w->on_complete != on_complete) {
w = w->next;
}
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
@@ -1601,7 +1598,7 @@ static void external_connectivity_watcher_list_remove(
}
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
- while (w != nullptr) {
+ while (w != NULL) {
if (w->next == too_remove) {
w->next = w->next->next;
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
@@ -1620,7 +1617,7 @@ int grpc_client_channel_num_external_connectivity_watchers(
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
- while (w != nullptr) {
+ while (w != NULL) {
count++;
w = w->next;
}
@@ -1645,8 +1642,8 @@ static void on_external_watch_complete_locked(grpc_exec_ctx* exec_ctx,
static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error_ignored) {
external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
- external_connectivity_watcher* found = nullptr;
- if (w->state != nullptr) {
+ external_connectivity_watcher* found = NULL;
+ if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
@@ -1654,12 +1651,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
} else {
- GPR_ASSERT(w->watcher_timer_init == nullptr);
+ GPR_ASSERT(w->watcher_timer_init == NULL);
found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
if (found) {
GPR_ASSERT(found->on_complete == w->on_complete);
grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &found->chand->state_tracker, nullptr, &found->my_closure);
+ exec_ctx, &found->chand->state_tracker, NULL, &found->my_closure);
}
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index 27862cf239..f58a8c1424 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -23,7 +23,7 @@
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
-extern grpc_tracer_flag grpc_client_channel_trace;
+extern grpc_core::TraceFlag grpc_client_channel_trace;
// Channel arg key for server URI string.
#define GRPC_ARG_SERVER_URI "grpc.server_uri"
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index eebef6827c..c1b57d0ada 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -78,10 +78,6 @@ extern "C" void grpc_client_channel_init(void) {
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void*)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
- grpc_register_tracer(&grpc_client_channel_trace);
-#ifndef NDEBUG
- grpc_register_tracer(&grpc_trace_resolver_refcount);
-#endif
}
extern "C" void grpc_client_channel_shutdown(void) {
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 387c26ed5c..6276c3e952 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -21,10 +21,8 @@
#define WEAK_REF_BITS 16
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_lb_policy_refcount =
- GRPC_TRACER_INITIALIZER(false, "lb_policy_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
+ false, "lb_policy_refcount");
void grpc_lb_policy_init(grpc_lb_policy* policy,
const grpc_lb_policy_vtable* vtable,
@@ -52,7 +50,7 @@ static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_lb_policy_refcount)) {
+ if (grpc_trace_lb_policy_refcount.enabled()) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 590094e67e..cd40b4dcf7 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -33,9 +33,7 @@ typedef struct grpc_lb_policy grpc_lb_policy;
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
typedef struct grpc_lb_policy_args grpc_lb_policy_args;
-#ifndef NDEBUG
-extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
-#endif
+extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
struct grpc_lb_policy {
const grpc_lb_policy_vtable* vtable;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index e9bef8a921..f863bb9479 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -126,14 +126,14 @@
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
-grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
+grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
static grpc_error* initial_metadata_add_lb_token(
grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
- GPR_ASSERT(lb_token_mdelem_storage != nullptr);
+ GPR_ASSERT(lb_token_mdelem_storage != NULL);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
lb_token_mdelem_storage, lb_token);
@@ -190,14 +190,14 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
- if (wc_arg->rr_policy != nullptr) {
+ if (wc_arg->rr_policy != NULL) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
- if (*wc_arg->target != nullptr) {
+ if (*wc_arg->target != NULL) {
if (!GRPC_MDISNULL(wc_arg->lb_token)) {
initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage,
@@ -211,19 +211,19 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
+ GPR_ASSERT(wc_arg->client_stats != NULL);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
} else {
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
- GPR_ASSERT(wc_arg->free_when_done != nullptr);
+ GPR_ASSERT(wc_arg->free_when_done != NULL);
gpr_free(wc_arg->free_when_done);
}
@@ -455,12 +455,12 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
/* vtable for LB tokens in grpc_lb_addresses. */
static void* lb_token_copy(void* token) {
- return token == nullptr
- ? nullptr
+ return token == NULL
+ ? NULL
: (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
- if (token != nullptr) {
+ if (token != NULL) {
GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
@@ -543,7 +543,7 @@ static grpc_lb_addresses* process_serverlist_locked(
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
false /* is_balancer */,
- nullptr /* balancer_name */, user_data);
+ NULL /* balancer_name */, user_data);
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
@@ -569,7 +569,7 @@ static grpc_lb_addresses* extract_backend_addresses_locked(
const grpc_resolved_address* addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
- nullptr /* balancer_name */,
+ NULL /* balancer_name */,
(void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
@@ -623,7 +623,7 @@ static void update_lb_connectivity_status_locked(
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
}
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(
GPR_INFO,
"[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
@@ -645,7 +645,7 @@ static bool pick_from_internal_rr_locked(
const grpc_lb_policy_pick_args* pick_args, bool force_async,
grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
// Check for drops if we are not using fallback backend addresses.
- if (glb_policy->serverlist != nullptr) {
+ if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server* server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
@@ -654,7 +654,7 @@ static bool pick_from_internal_rr_locked(
}
if (server->drop) {
// Not using the RR policy, so unref it.
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
wc_arg->rr_policy);
}
@@ -664,12 +664,12 @@ static bool pick_from_internal_rr_locked(
// the client_load_reporting filter, because we do not create a
// subchannel call (and therefore no client_load_reporting filter)
// for dropped calls.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
+ GPR_ASSERT(wc_arg->client_stats != NULL);
grpc_grpclb_client_stats_add_call_dropped_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
@@ -684,7 +684,7 @@ static bool pick_from_internal_rr_locked(
(void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
wc_arg->rr_policy);
}
@@ -694,11 +694,11 @@ static bool pick_from_internal_rr_locked(
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
// Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != nullptr);
+ GPR_ASSERT(wc_arg->client_stats != NULL);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
@@ -715,7 +715,7 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
grpc_lb_addresses* addresses;
- if (glb_policy->serverlist != nullptr) {
+ if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
@@ -723,10 +723,10 @@ static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
- GPR_ASSERT(addresses != nullptr);
+ GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
@@ -751,11 +751,11 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error);
static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) {
- GPR_ASSERT(glb_policy->rr_policy == nullptr);
+ GPR_ASSERT(glb_policy->rr_policy == NULL);
grpc_lb_policy* new_rr_policy =
grpc_lb_policy_create(exec_ctx, "round_robin", args);
- if (new_rr_policy == nullptr) {
+ if (new_rr_policy == NULL) {
gpr_log(GPR_ERROR,
"[grpclb %p] Failure creating a RoundRobin policy for serverlist "
"update with %" PRIuPTR
@@ -767,7 +767,7 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
return;
}
glb_policy->rr_policy = new_rr_policy;
- grpc_error* rr_state_error = nullptr;
+ grpc_error* rr_state_error = NULL;
const grpc_connectivity_state rr_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
&rr_state_error);
@@ -806,7 +806,7 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
pp->wrapped_on_complete_arg.client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Pending pick about to (async) PICK from RR %p",
glb_policy, glb_policy->rr_policy);
@@ -821,7 +821,7 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy);
}
@@ -835,16 +835,16 @@ static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
if (glb_policy->shutting_down) return;
grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
- GPR_ASSERT(args != nullptr);
- if (glb_policy->rr_policy != nullptr) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ GPR_ASSERT(args != NULL);
+ if (glb_policy->rr_policy != NULL) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
glb_policy->rr_policy);
}
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
} else {
create_rr_locked(exec_ctx, glb_policy, args);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
glb_policy->rr_policy);
}
@@ -868,7 +868,7 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
* sink, policies can't transition back from it. .*/
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
"rr_connectivity_shutdown");
- glb_policy->rr_policy = nullptr;
+ glb_policy->rr_policy = NULL;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"glb_rr_connectivity_cb");
gpr_free(rr_connectivity);
@@ -923,7 +923,7 @@ static grpc_channel_args* build_lb_channel_args(
* instantiated and used in that case. Otherwise, something has gone wrong. */
GPR_ASSERT(num_grpclb_addrs > 0);
grpc_lb_addresses* lb_addresses =
- grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
+ grpc_lb_addresses_create(num_grpclb_addrs, NULL);
grpc_slice_hash_table_entry* targets_info_entries =
(grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
num_grpclb_addrs);
@@ -931,7 +931,7 @@ static grpc_channel_args* build_lb_channel_args(
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) continue;
- if (addresses->addresses[i].user_data != nullptr) {
+ if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
@@ -945,7 +945,7 @@ static grpc_channel_args* build_lb_channel_args(
grpc_lb_addresses_set_address(
lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
addresses->addresses[i].address.len, false /* is balancer */,
- addresses->addresses[i].balancer_name, nullptr /* user data */);
+ addresses->addresses[i].balancer_name, NULL /* user data */);
}
GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
grpc_slice_hash_table* targets_info =
@@ -970,18 +970,18 @@ static grpc_channel_args* build_lb_channel_args(
static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
- GPR_ASSERT(glb_policy->pending_picks == nullptr);
- GPR_ASSERT(glb_policy->pending_pings == nullptr);
+ GPR_ASSERT(glb_policy->pending_picks == NULL);
+ GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
- if (glb_policy->client_stats != nullptr) {
+ if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
- if (glb_policy->serverlist != nullptr) {
+ if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- if (glb_policy->fallback_backend_addresses != nullptr) {
+ if (glb_policy->fallback_backend_addresses != NULL) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
@@ -1002,8 +1002,8 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */
- if (lb_call != nullptr) {
- grpc_call_cancel(lb_call, nullptr);
+ if (lb_call != NULL) {
+ grpc_call_cancel(lb_call, NULL);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
if (glb_policy->retry_timer_active) {
@@ -1016,27 +1016,27 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
}
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
+ glb_policy->pending_picks = NULL;
pending_ping* pping = glb_policy->pending_pings;
- glb_policy->pending_pings = nullptr;
- if (glb_policy->rr_policy != nullptr) {
+ glb_policy->pending_pings = NULL;
+ if (glb_policy->rr_policy != NULL) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
// We destroy the LB channel here because
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
// instance. Destroying the lb channel in glb_destroy would likely result in
// a callback invocation without a valid glb_policy arg.
- if (glb_policy->lb_channel != nullptr) {
+ if (glb_policy->lb_channel != NULL) {
grpc_channel_destroy(glb_policy->lb_channel);
- glb_policy->lb_channel = nullptr;
+ glb_policy->lb_channel = NULL;
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
- while (pp != nullptr) {
+ while (pp != NULL) {
pending_pick* next = pp->next;
- *pp->target = nullptr;
+ *pp->target = NULL;
GRPC_CLOSURE_SCHED(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
@@ -1044,7 +1044,7 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pp = next;
}
- while (pping != nullptr) {
+ while (pping != NULL) {
pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED(
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
@@ -1069,11 +1069,11 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- while (pp != nullptr) {
+ glb_policy->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = nullptr;
+ *target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
@@ -1083,7 +1083,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
}
pp = next;
}
- if (glb_policy->rr_policy != nullptr) {
+ if (glb_policy->rr_policy != NULL) {
grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
GRPC_ERROR_REF(error));
}
@@ -1107,8 +1107,8 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = nullptr;
- while (pp != nullptr) {
+ glb_policy->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
@@ -1121,7 +1121,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
}
pp = next;
}
- if (glb_policy->rr_policy != nullptr) {
+ if (glb_policy->rr_policy != NULL) {
grpc_lb_policy_cancel_picks_locked(
exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
initial_metadata_flags_eq, GRPC_ERROR_REF(error));
@@ -1137,7 +1137,7 @@ static void start_picking_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
+ glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
grpc_millis deadline =
grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
@@ -1166,8 +1166,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
- if (pick_args->lb_token_mdelem_storage == nullptr) {
- *target = nullptr;
+ if (pick_args->lb_token_mdelem_storage == NULL) {
+ *target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No mdelem storage for the LB token. Load reporting "
@@ -1176,17 +1176,17 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
}
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
bool pick_done = false;
- if (glb_policy->rr_policy != nullptr) {
+ if (glb_policy->rr_policy != NULL) {
const grpc_connectivity_state rr_connectivity_state =
- grpc_lb_policy_check_connectivity_locked(
- exec_ctx, glb_policy->rr_policy, nullptr);
+ grpc_lb_policy_check_connectivity_locked(exec_ctx,
+ glb_policy->rr_policy, NULL);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event
// (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
// need to make sure we aren't trying to pick from a RR policy instance
// that's in shutdown.
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
glb_policy, glb_policy->rr_policy,
@@ -1196,7 +1196,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
on_complete);
pick_done = false;
} else { // RR not in shutdown
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
glb_policy->rr_policy);
}
@@ -1208,7 +1208,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->context = context;
- GPR_ASSERT(glb_policy->client_stats != nullptr);
+ GPR_ASSERT(glb_policy->client_stats != NULL);
wc_arg->client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
wc_arg->wrapped_closure = on_complete;
@@ -1221,7 +1221,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
false /* force_async */, target, wc_arg);
}
} else { // glb_policy->rr_policy == NULL
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
glb_policy);
@@ -1270,9 +1270,9 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_policy->retry_timer_active = false;
- if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
+ if (!glb_policy->shutting_down && glb_policy->lb_call == NULL &&
error == GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
}
query_for_backends_locked(exec_ctx, glb_policy);
@@ -1293,7 +1293,7 @@ static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
.next_attempt_start_time;
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
glb_policy);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
@@ -1337,8 +1337,8 @@ static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
- glb_policy->client_load_report_payload = nullptr;
- if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
+ glb_policy->client_load_report_payload = NULL;
+ if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
@@ -1356,23 +1356,23 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
request->client_stats.num_calls_finished_with_client_failed_to_send ==
0 &&
request->client_stats.num_calls_finished_known_received == 0 &&
- (drop_entries == nullptr || drop_entries->num_entries == 0);
+ (drop_entries == NULL || drop_entries->num_entries == 0);
}
static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
+ if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
- if (glb_policy->lb_call == nullptr) {
+ if (glb_policy->lb_call == NULL) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
return;
}
// Construct message payload.
- GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
+ GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
grpc_grpclb_request* request =
grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
// Skip client load report if the counters were all zero in the last
@@ -1415,9 +1415,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error);
static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->server_name != nullptr);
+ GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
- GPR_ASSERT(glb_policy->lb_call == nullptr);
+ GPR_ASSERT(glb_policy->lb_call == NULL);
GPR_ASSERT(!glb_policy->shutting_down);
/* Note the following LB call progresses every time there's activity in \a
@@ -1429,13 +1429,13 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
? GRPC_MILLIS_INF_FUTURE
: grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
- exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
+ exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
- &host, deadline, nullptr);
+ &host, deadline, NULL);
grpc_slice_unref_internal(exec_ctx, host);
- if (glb_policy->client_stats != nullptr) {
+ if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
glb_policy->client_stats = grpc_grpclb_client_stats_create();
@@ -1471,9 +1471,9 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_call != nullptr);
+ GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_unref(glb_policy->lb_call);
- glb_policy->lb_call = nullptr;
+ glb_policy->lb_call = NULL;
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
@@ -1491,17 +1491,17 @@ static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
*/
static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_channel != nullptr);
+ GPR_ASSERT(glb_policy->lb_channel != NULL);
if (glb_policy->shutting_down) return;
lb_call_init_locked(exec_ctx, glb_policy);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
}
- GPR_ASSERT(glb_policy->lb_call != nullptr);
+ GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
grpc_op ops[3];
@@ -1511,22 +1511,22 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
&glb_policy->lb_initial_metadata_recv;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
- GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
+ GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = glb_policy->lb_request_payload;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), nullptr);
+ call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
+ ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@@ -1537,7 +1537,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->data.recv_status_on_client.status_details =
&glb_policy->lb_call_status_details;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_server_status_received_locked */
@@ -1552,7 +1552,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
/* take another weak ref to be unref'd/reused in
* lb_on_response_received_locked */
@@ -1569,7 +1569,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op* op = ops;
- if (glb_policy->lb_response_payload != nullptr) {
+ if (glb_policy->lb_response_payload != NULL) {
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
@@ -1579,15 +1579,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
- grpc_grpclb_initial_response* response = nullptr;
+ grpc_grpclb_initial_response* response = NULL;
if (!glb_policy->seen_initial_response &&
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
- nullptr) {
+ NULL) {
if (response->has_client_stats_report_interval) {
glb_policy->client_stats_report_interval = GPR_MAX(
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
&response->client_stats_report_interval));
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; "
"client load reporting interval = %" PRIdPTR " milliseconds",
@@ -1599,7 +1599,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
glb_policy->client_load_report_timer_pending = true;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
schedule_next_client_load_report(exec_ctx, glb_policy);
- } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ } else if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; client load "
"reporting NOT enabled",
@@ -1610,9 +1610,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
} else {
grpc_grpclb_serverlist* serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
- if (serverlist != nullptr) {
- GPR_ASSERT(glb_policy->lb_call != nullptr);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (serverlist != NULL) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Serverlist with %" PRIuPTR " servers received",
glb_policy, serverlist->num_servers);
@@ -1630,7 +1630,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
if (serverlist->num_servers > 0) {
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
serverlist)) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Incoming server list identical to current, "
"ignoring.",
@@ -1638,14 +1638,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
- if (glb_policy->serverlist != nullptr) {
+ if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
- glb_policy->fallback_backend_addresses = nullptr;
+ glb_policy->fallback_backend_addresses = NULL;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
@@ -1659,7 +1659,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
rr_handover_locked(exec_ctx, glb_policy);
}
} else {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received empty server list, ignoring.",
glb_policy);
@@ -1679,7 +1679,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
- op->reserved = nullptr;
+ op->reserved = NULL;
op++;
/* reuse the "lb_on_response_received_locked" weak ref taken in
* query_for_backends_locked() */
@@ -1705,14 +1705,14 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
- if (glb_policy->serverlist == nullptr) {
+ if (glb_policy->serverlist == NULL) {
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Falling back to use backends from resolver",
glb_policy);
}
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
}
}
@@ -1723,8 +1723,8 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- GPR_ASSERT(glb_policy->lb_call != nullptr);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ GPR_ASSERT(glb_policy->lb_call != NULL);
+ if (grpc_lb_glb_trace.enabled()) {
char* status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_INFO,
@@ -1747,7 +1747,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy,
const grpc_lb_addresses* addresses) {
- GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
@@ -1762,8 +1762,8 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- if (glb_policy->lb_channel == nullptr) {
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ if (glb_policy->lb_channel == NULL) {
// If we don't have a current channel to the LB, go into TRANSIENT
// FAILURE.
grpc_connectivity_state_set(
@@ -1783,10 +1783,10 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
(const grpc_lb_addresses*)arg->value.pointer.p;
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
- if (glb_policy->serverlist == nullptr) {
+ if (glb_policy->serverlist == NULL) {
fallback_update_locked(exec_ctx, glb_policy, addresses);
}
- GPR_ASSERT(glb_policy->lb_channel != nullptr);
+ GPR_ASSERT(glb_policy->lb_channel != NULL);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
grpc_channel_args* lb_channel_args = build_lb_channel_args(
@@ -1809,7 +1809,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_polling_entity_create_from_pollset_set(
glb_policy->base.interested_parties),
&glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, nullptr);
+ &glb_policy->lb_channel_on_connectivity_changed, NULL);
}
}
@@ -1837,7 +1837,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
grpc_polling_entity_create_from_pollset_set(
glb_policy->base.interested_parties),
&glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, nullptr);
+ &glb_policy->lb_channel_on_connectivity_changed, NULL);
break;
}
case GRPC_CHANNEL_IDLE:
@@ -1845,9 +1845,9 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
// call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
- if (glb_policy->lb_call != nullptr) {
+ if (glb_policy->lb_call != NULL) {
glb_policy->updating_lb_call = true;
- grpc_call_cancel(glb_policy->lb_call, nullptr);
+ grpc_call_cancel(glb_policy->lb_call, NULL);
// lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
} else if (glb_policy->started_picking && !glb_policy->shutting_down) {
@@ -1886,27 +1886,27 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
/* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- return nullptr;
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ return NULL;
}
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
- if (num_grpclb_addrs == 0) return nullptr;
+ if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
- GPR_ASSERT(arg != nullptr);
+ GPR_ASSERT(arg != NULL);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Will use '%s' as the server name for LB request.",
glb_policy, glb_policy->server_name);
@@ -1914,7 +1914,7 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
grpc_uri_destroy(uri);
glb_policy->cc_factory = args->client_channel_factory;
- GPR_ASSERT(glb_policy->cc_factory != nullptr);
+ GPR_ASSERT(glb_policy->cc_factory != NULL);
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
@@ -1952,11 +1952,11 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
- if (glb_policy->lb_channel == nullptr) {
+ if (glb_policy->lb_channel == NULL) {
gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
- return nullptr;
+ return NULL;
}
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
@@ -1990,20 +1990,16 @@ static bool maybe_add_client_load_reporting_filter(
grpc_channel_stack_builder_get_channel_arguments(builder);
const grpc_arg* channel_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
+ if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
return true;
}
extern "C" void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
- grpc_register_tracer(&grpc_lb_glb_trace);
-#ifndef NDEBUG
- grpc_register_tracer(&grpc_trace_lb_policy_refcount);
-#endif
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index caa6aee9a6..4388d6a83e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -29,8 +29,7 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
-grpc_tracer_flag grpc_lb_pick_first_trace =
- GRPC_TRACER_INITIALIZER(false, "pick_first");
+grpc_core::TraceFlag grpc_lb_pick_first_trace(false, "pick_first");
typedef struct pending_pick {
struct pending_pick* next;
@@ -60,42 +59,42 @@ typedef struct {
static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- GPR_ASSERT(p->subchannel_list == nullptr);
- GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
- GPR_ASSERT(p->pending_picks == nullptr);
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
+ GPR_ASSERT(p->pending_picks == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p);
grpc_subchannel_index_unref();
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void*)p);
}
}
static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
grpc_error* error) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
+ while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
- *pp->target = nullptr;
+ *pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"shutdown");
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_shutdown");
- p->subchannel_list = nullptr;
+ p->subchannel_list = NULL;
}
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
}
GRPC_ERROR_UNREF(error);
}
@@ -110,11 +109,11 @@ static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = nullptr;
- while (pp != nullptr) {
+ p->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = nullptr;
+ *target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
@@ -134,8 +133,8 @@ static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = nullptr;
- while (pp != nullptr) {
+ p->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
@@ -155,8 +154,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
pick_first_lb_policy* p) {
p->started_picking = true;
- if (p->subchannel_list != nullptr &&
- p->subchannel_list->num_subchannels > 0) {
+ if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
grpc_lb_subchannel_list_ref_for_connectivity_watch(
p->subchannel_list, "connectivity_watch+start_picking");
@@ -179,7 +177,7 @@ static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_closure* on_complete) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
- if (p->selected != nullptr) {
+ if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
return 1;
@@ -243,8 +241,8 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
- if (p->subchannel_list == nullptr) {
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ if (p->subchannel_list == NULL) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
@@ -261,7 +259,7 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
}
const grpc_lb_addresses* addresses =
(const grpc_lb_addresses*)arg->value.pointer.p;
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
(void*)p, (unsigned long)addresses->num_addresses);
}
@@ -275,18 +273,18 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // Empty list.
- p->selected = nullptr;
+ p->selected = NULL;
return;
}
- if (p->selected == nullptr) {
+ if (p->selected == NULL) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_update_before_selected");
}
@@ -298,7 +296,7 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Pick First %p found already selected subchannel %p "
"at update index %" PRIuPTR " of %" PRIuPTR "; update done",
@@ -308,12 +306,12 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_lb_subchannel_list_ref_for_connectivity_watch(
subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "pf_update_includes_selected");
}
p->subchannel_list = subchannel_list;
- if (p->selected->connected_subchannel != nullptr) {
+ if (p->selected->connected_subchannel != NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "pf_update_includes_selected");
}
@@ -322,11 +320,11 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"pf_update_includes_selected+outdated");
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
}
return;
}
@@ -335,8 +333,8 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
// pending subchannel list to the new subchannel list. We will wait
// for it to report READY before swapping it into the current
// subchannel list.
- if (p->latest_pending_subchannel_list != nullptr) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
@@ -363,7 +361,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
@@ -403,12 +401,12 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
- p->latest_pending_subchannel_list != nullptr) {
- p->selected = nullptr;
+ p->latest_pending_subchannel_list != NULL) {
+ p->selected = NULL;
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
@@ -446,11 +444,11 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// Case 2. Promote p->latest_pending_subchannel_list to
// p->subchannel_list.
if (sd->subchannel_list == p->latest_pending_subchannel_list) {
- GPR_ASSERT(p->subchannel_list != nullptr);
+ GPR_ASSERT(p->subchannel_list != NULL);
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"finish_update");
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
}
// Cases 1 and 2.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
@@ -460,7 +458,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
p->selected = sd;
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
(void*)sd->subchannel);
}
@@ -472,7 +470,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "picked");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
(void*)p->selected);
@@ -492,7 +490,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
- } while (sd->subchannel == nullptr);
+ } while (sd->subchannel == NULL);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
@@ -529,7 +527,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
- } while (sd->subchannel == nullptr && sd != original_sd);
+ } while (sd->subchannel == NULL && sd != original_sd);
if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
@@ -569,9 +567,9 @@ static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != nullptr);
+ GPR_ASSERT(args->client_channel_factory != NULL);
pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
pf_update_locked(exec_ctx, &p->base, args);
@@ -595,7 +593,6 @@ static grpc_lb_policy_factory* pick_first_lb_factory_create() {
extern "C" void grpc_lb_policy_pick_first_init() {
grpc_register_lb_policy(pick_first_lb_factory_create());
- grpc_register_tracer(&grpc_lb_pick_first_trace);
}
extern "C" void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 6ea1f025df..f4ad746157 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -39,8 +39,7 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
-grpc_tracer_flag grpc_lb_round_robin_trace =
- GRPC_TRACER_INITIALIZER(false, "round_robin");
+grpc_core::TraceFlag grpc_lb_round_robin_trace(false, "round_robin");
/** List of entities waiting for a pick.
*
@@ -100,8 +99,8 @@ typedef struct round_robin_lb_policy {
* The caller must do that if it returns a pick. */
static size_t get_next_ready_subchannel_index_locked(
const round_robin_lb_policy* p) {
- GPR_ASSERT(p->subchannel_list != nullptr);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ GPR_ASSERT(p->subchannel_list != NULL);
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
@@ -111,7 +110,7 @@ static size_t get_next_ready_subchannel_index_locked(
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
const size_t index = (i + p->last_ready_subchannel_index + 1) %
p->subchannel_list->num_subchannels;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
@@ -123,7 +122,7 @@ static size_t get_next_ready_subchannel_index_locked(
}
if (p->subchannel_list->subchannels[index].curr_connectivity_state ==
GRPC_CHANNEL_READY) {
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] found next ready subchannel (%p) at index %lu of "
"subchannel_list %p",
@@ -134,7 +133,7 @@ static size_t get_next_ready_subchannel_index_locked(
return index;
}
}
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void*)p);
}
return p->subchannel_list->num_subchannels;
@@ -145,7 +144,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void*)p, (unsigned long)last_ready_index,
@@ -157,12 +156,12 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void*)pol, (void*)pol);
}
- GPR_ASSERT(p->subchannel_list == nullptr);
- GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
gpr_free(p);
@@ -170,30 +169,30 @@ static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
grpc_error* error) {
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick* pp;
- while ((pp = p->pending_picks) != nullptr) {
+ while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
- *pp->target = nullptr;
+ *pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_rr_shutdown");
- p->subchannel_list = nullptr;
+ p->subchannel_list = NULL;
}
- if (p->latest_pending_subchannel_list != nullptr) {
+ if (p->latest_pending_subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"sl_shutdown_pending_rr_shutdown");
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
}
GRPC_ERROR_UNREF(error);
}
@@ -209,11 +208,11 @@ static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = nullptr;
- while (pp != nullptr) {
+ p->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = nullptr;
+ *target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
@@ -233,12 +232,12 @@ static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = nullptr;
- while (pp != nullptr) {
+ p->pending_picks = NULL;
+ while (pp != NULL) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- *pp->target = nullptr;
+ *pp->target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
@@ -276,12 +275,12 @@ static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
@@ -289,10 +288,10 @@ static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
- if (user_data != nullptr) {
+ if (user_data != NULL) {
*user_data = sd->user_data;
}
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
@@ -393,7 +392,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
"rr_shutdown");
p->shutdown = true;
new_state = GRPC_CHANNEL_SHUTDOWN;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] Shutting down: all subchannels have gone into shutdown",
(void*)p);
@@ -419,7 +418,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
round_robin_lb_policy* p =
(round_robin_lb_policy*)sd->subchannel_list->policy;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
@@ -472,7 +471,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
}
} else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
- if (sd->connected_subchannel == nullptr) {
+ if (sd->connected_subchannel == NULL) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
@@ -484,9 +483,9 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// for sds belonging to outdated subchannel lists.
GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list);
GPR_ASSERT(!sd->subchannel_list->shutting_down);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
const unsigned long num_subchannels =
- p->subchannel_list != nullptr
+ p->subchannel_list != NULL
? (unsigned long)p->subchannel_list->num_subchannels
: 0;
gpr_log(GPR_DEBUG,
@@ -495,13 +494,13 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
(void*)p, (void*)p->subchannel_list, num_subchannels,
(void*)sd->subchannel_list, num_subchannels);
}
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
}
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = nullptr;
+ p->latest_pending_subchannel_list = NULL;
}
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
@@ -510,7 +509,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
- if (p->pending_picks != nullptr) {
+ if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -520,10 +519,10 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
- if (pp->user_data != nullptr) {
+ if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
@@ -577,11 +576,11 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update).
- if (p->subchannel_list == nullptr) {
+ if (p->subchannel_list == NULL) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
@@ -590,7 +589,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
return;
}
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
}
@@ -602,7 +601,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
@@ -610,8 +609,8 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
return;
}
if (p->started_picking) {
- if (p->latest_pending_subchannel_list != nullptr) {
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
@@ -635,7 +634,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
} else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
- if (p->subchannel_list != nullptr) {
+ if (p->subchannel_list != NULL) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
}
@@ -662,14 +661,14 @@ static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != nullptr);
+ GPR_ASSERT(args->client_channel_factory != NULL);
round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
(unsigned long)p->subchannel_list->num_subchannels);
}
@@ -691,7 +690,6 @@ static grpc_lb_policy_factory* round_robin_lb_factory_create() {
extern "C" void grpc_lb_policy_round_robin_init() {
grpc_register_lb_policy(round_robin_lb_factory_create());
- grpc_register_tracer(&grpc_lb_round_robin_trace);
}
extern "C" void grpc_lb_policy_round_robin_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index 27d9598ac4..ec3fb4e7a2 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -31,38 +31,38 @@
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_data* sd,
const char* reason) {
- if (sd->subchannel != nullptr) {
- if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ if (sd->subchannel != NULL) {
+ if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
- sd->subchannel = nullptr;
- if (sd->connected_subchannel != nullptr) {
+ sd->subchannel = NULL;
+ if (sd->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
reason);
- sd->connected_subchannel = nullptr;
+ sd->connected_subchannel = NULL;
}
- if (sd->user_data != nullptr) {
- GPR_ASSERT(sd->user_data_vtable != nullptr);
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = nullptr;
+ sd->user_data = NULL;
}
}
}
void grpc_lb_subchannel_data_start_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
- if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): requesting connectivity change notification",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
@@ -76,11 +76,11 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
- if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
@@ -90,15 +90,15 @@ void grpc_lb_subchannel_data_stop_connectivity_watch(
}
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
- grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list* subchannel_list =
(grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
- if (GRPC_TRACER_ON(*tracer)) {
+ if (tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
- tracer->name, p, subchannel_list, addresses->num_addresses);
+ tracer->name(), p, subchannel_list, addresses->num_addresses);
}
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
@@ -126,26 +126,26 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
- if (subchannel == nullptr) {
+ if (subchannel == NULL) {
// Subchannel could not be created.
- if (GRPC_TRACER_ON(*tracer)) {
+ if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
"ignoring",
- tracer->name, subchannel_list->policy, address_uri);
+ tracer->name(), subchannel_list->policy, address_uri);
gpr_free(address_uri);
}
continue;
}
- if (GRPC_TRACER_ON(*tracer)) {
+ if (tracer->enabled()) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
- tracer->name, p, subchannel_list, subchannel_index, subchannel,
+ tracer->name(), p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
@@ -162,7 +162,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
- if (sd->user_data_vtable != nullptr) {
+ if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
@@ -174,9 +174,9 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_list* subchannel_list) {
- if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ if (subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
- subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
@@ -191,10 +191,10 @@ static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
- subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
reason);
}
@@ -204,10 +204,10 @@ void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ if (subchannel_list->tracer->enabled()) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
- subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
reason);
}
@@ -231,26 +231,25 @@ void grpc_lb_subchannel_list_unref_for_connectivity_watch(
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
- if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ if (sd->subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, nullptr,
- nullptr,
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
const char* reason) {
- if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ if (subchannel_list->tracer->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
- subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list->tracer->name(), subchannel_list->policy,
subchannel_list, reason);
}
GPR_ASSERT(!subchannel_list->shutting_down);
@@ -262,7 +261,7 @@ void grpc_lb_subchannel_list_shutdown_and_unref(
// Otherwise, unref the subchannel directly.
if (sd->connectivity_notification_pending) {
subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
- } else if (sd->subchannel != nullptr) {
+ } else if (sd->subchannel != NULL) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index e18ad490e8..6538bd0673 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -88,7 +88,7 @@ struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
grpc_lb_policy* policy;
- grpc_tracer_flag* tracer;
+ grpc_core::TraceFlag* tracer;
/** all our subchannels */
size_t num_subchannels;
@@ -121,7 +121,7 @@ struct grpc_lb_subchannel_list {
};
grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
- grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb);
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index 7e84b98cb9..c16b1515c7 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -19,10 +19,8 @@
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/iomgr/combiner.h"
-#ifndef NDEBUG
-grpc_tracer_flag grpc_trace_resolver_refcount =
- GRPC_TRACER_INITIALIZER(false, "resolver_refcount");
-#endif
+grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false,
+ "resolver_refcount");
void grpc_resolver_init(grpc_resolver* resolver,
const grpc_resolver_vtable* vtable,
@@ -35,7 +33,7 @@ void grpc_resolver_init(grpc_resolver* resolver,
#ifndef NDEBUG
void grpc_resolver_ref(grpc_resolver* resolver, const char* file, int line,
const char* reason) {
- if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ if (grpc_trace_resolver_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"RESOLVER:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
@@ -50,7 +48,7 @@ void grpc_resolver_ref(grpc_resolver* resolver) {
#ifndef NDEBUG
void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
const char* file, int line, const char* reason) {
- if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
+ if (grpc_trace_resolver_refcount.enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"RESOLVER:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", resolver,
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index a0eb0bcfdf..b5806ad8d7 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -29,9 +29,7 @@ extern "C" {
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
-#ifndef NDEBUG
-extern grpc_tracer_flag grpc_trace_resolver_refcount;
-#endif
+extern grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount;
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 2720e68040..58e294d597 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -199,7 +199,7 @@ static gpr_atm ref_mutate(grpc_subchannel* c, gpr_atm delta,
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
+ if (grpc_trace_stream_refcount.enabled()) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
purpose, old_val, old_val + delta, reason);
diff --git a/src/core/ext/filters/http/http_filters_plugin.cc b/src/core/ext/filters/http/http_filters_plugin.cc
index 69dbde409c..ac31ace35d 100644
--- a/src/core/ext/filters/http/http_filters_plugin.cc
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -65,7 +65,6 @@ static bool maybe_add_required_filter(grpc_exec_ctx* exec_ctx,
}
extern "C" void grpc_http_filters_init(void) {
- grpc_register_tracer(&grpc_compression_trace);
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_optional_filter, &compress_filter);
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 8d95133d8f..d070b56b6a 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -243,7 +243,7 @@ static void finish_send_message(grpc_exec_ctx* exec_ctx,
bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
&calld->slices, &tmp);
if (did_compress) {
- if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ if (grpc_compression_trace.enabled()) {
const char* algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
@@ -258,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx* exec_ctx,
grpc_slice_buffer_swap(&calld->slices, &tmp);
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
- if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ if (grpc_compression_trace.enabled()) {
const char* algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));