aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel/lb_policy
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/ext/filters/client_channel/lb_policy')
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc4
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc208
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc6
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc6
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc70
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc58
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc22
8 files changed, 194 insertions, 194 deletions
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index d93a9c3710..6d9fadaf30 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -72,8 +72,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) {
call_data* calld = (call_data*)elem->call_data;
// Get stats object from context and take a ref.
- GPR_ASSERT(args->context != NULL);
- GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
+ GPR_ASSERT(args->context != nullptr);
+ GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr);
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 01b243bc3e..a3282b5d93 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -133,7 +133,7 @@ grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
static grpc_error* initial_metadata_add_lb_token(
grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
- GPR_ASSERT(lb_token_mdelem_storage != NULL);
+ GPR_ASSERT(lb_token_mdelem_storage != nullptr);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
lb_token_mdelem_storage, lb_token);
@@ -186,14 +186,14 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
- if (wc_arg->rr_policy != NULL) {
+ if (wc_arg->rr_policy != nullptr) {
/* if *target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
- if (*wc_arg->target != NULL) {
+ if (*wc_arg->target != nullptr) {
if (!GRPC_MDISNULL(wc_arg->lb_token)) {
initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage,
@@ -206,7 +206,7 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != NULL);
+ GPR_ASSERT(wc_arg->client_stats != nullptr);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
} else {
@@ -217,7 +217,7 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
- GPR_ASSERT(wc_arg->free_when_done != NULL);
+ GPR_ASSERT(wc_arg->free_when_done != nullptr);
gpr_free(wc_arg->free_when_done);
}
@@ -454,7 +454,7 @@ static void* lb_token_copy(void* token) {
: (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
- if (token != NULL) {
+ if (token != nullptr) {
GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
@@ -537,7 +537,7 @@ static grpc_lb_addresses* process_serverlist_locked(
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
false /* is_balancer */,
- NULL /* balancer_name */, user_data);
+ nullptr /* balancer_name */, user_data);
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
@@ -637,7 +637,7 @@ static bool pick_from_internal_rr_locked(
const grpc_lb_policy_pick_args* pick_args, bool force_async,
grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
// Check for drops if we are not using fallback backend addresses.
- if (glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != nullptr) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server* server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
@@ -660,7 +660,7 @@ static bool pick_from_internal_rr_locked(
server->load_balance_token, wc_arg->client_stats);
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
@@ -685,11 +685,11 @@ static bool pick_from_internal_rr_locked(
pick_args->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
// Pass on client stats via context. Passes ownership of the reference.
- GPR_ASSERT(wc_arg->client_stats != NULL);
+ GPR_ASSERT(wc_arg->client_stats != nullptr);
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
gpr_free(wc_arg->free_when_done);
return false;
@@ -706,7 +706,7 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
grpc_lb_addresses* addresses;
- if (glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != nullptr) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
} else {
@@ -714,10 +714,10 @@ static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
// serverlist from the balancer, we use the fallback backends returned by
// the resolver. Note that the fallback backend list may be empty, in which
// case the new round_robin policy will keep the requested picks pending.
- GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
- GPR_ASSERT(addresses != NULL);
+ GPR_ASSERT(addresses != nullptr);
grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
@@ -742,11 +742,11 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error);
static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
grpc_lb_policy_args* args) {
- GPR_ASSERT(glb_policy->rr_policy == NULL);
+ GPR_ASSERT(glb_policy->rr_policy == nullptr);
grpc_lb_policy* new_rr_policy =
grpc_lb_policy_create(exec_ctx, "round_robin", args);
- if (new_rr_policy == NULL) {
+ if (new_rr_policy == nullptr) {
gpr_log(GPR_ERROR,
"Failure creating a RoundRobin policy for serverlist update with "
"%lu entries. The previous RR instance (%p), if any, will continue "
@@ -757,7 +757,7 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
return;
}
glb_policy->rr_policy = new_rr_policy;
- grpc_error* rr_state_error = NULL;
+ grpc_error* rr_state_error = nullptr;
const grpc_connectivity_state rr_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
&rr_state_error);
@@ -824,8 +824,8 @@ static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
if (glb_policy->shutting_down) return;
grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
- GPR_ASSERT(args != NULL);
- if (glb_policy->rr_policy != NULL) {
+ GPR_ASSERT(args != nullptr);
+ if (glb_policy->rr_policy != nullptr) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
(void*)glb_policy->rr_policy);
@@ -857,7 +857,7 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
* sink, policies can't transition back from it. .*/
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
"rr_connectivity_shutdown");
- glb_policy->rr_policy = NULL;
+ glb_policy->rr_policy = nullptr;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"glb_rr_connectivity_cb");
gpr_free(rr_connectivity);
@@ -912,7 +912,7 @@ static grpc_channel_args* build_lb_channel_args(
* instantiated and used in that case. Otherwise, something has gone wrong. */
GPR_ASSERT(num_grpclb_addrs > 0);
grpc_lb_addresses* lb_addresses =
- grpc_lb_addresses_create(num_grpclb_addrs, NULL);
+ grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
grpc_slice_hash_table_entry* targets_info_entries =
(grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
num_grpclb_addrs);
@@ -920,7 +920,7 @@ static grpc_channel_args* build_lb_channel_args(
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (!addresses->addresses[i].is_balancer) continue;
- if (addresses->addresses[i].user_data != NULL) {
+ if (addresses->addresses[i].user_data != nullptr) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
@@ -934,7 +934,7 @@ static grpc_channel_args* build_lb_channel_args(
grpc_lb_addresses_set_address(
lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
addresses->addresses[i].address.len, false /* is balancer */,
- addresses->addresses[i].balancer_name, NULL /* user data */);
+ addresses->addresses[i].balancer_name, nullptr /* user data */);
}
GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
grpc_slice_hash_table* targets_info =
@@ -959,18 +959,18 @@ static grpc_channel_args* build_lb_channel_args(
static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
- GPR_ASSERT(glb_policy->pending_picks == NULL);
- GPR_ASSERT(glb_policy->pending_pings == NULL);
+ GPR_ASSERT(glb_policy->pending_picks == nullptr);
+ GPR_ASSERT(glb_policy->pending_pings == nullptr);
gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
- if (glb_policy->client_stats != NULL) {
+ if (glb_policy->client_stats != nullptr) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
- if (glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != nullptr) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- if (glb_policy->fallback_backend_addresses != NULL) {
+ if (glb_policy->fallback_backend_addresses != nullptr) {
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
@@ -991,8 +991,8 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
* of query_for_backends_locked, which can only be invoked while
* glb_policy->shutting_down is false. */
- if (lb_call != NULL) {
- grpc_call_cancel(lb_call, NULL);
+ if (lb_call != nullptr) {
+ grpc_call_cancel(lb_call, nullptr);
/* lb_on_server_status_received will pick up the cancel and clean up */
}
if (glb_policy->retry_timer_active) {
@@ -1005,27 +1005,27 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
}
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = NULL;
+ glb_policy->pending_picks = nullptr;
pending_ping* pping = glb_policy->pending_pings;
- glb_policy->pending_pings = NULL;
- if (glb_policy->rr_policy != NULL) {
+ glb_policy->pending_pings = nullptr;
+ if (glb_policy->rr_policy != nullptr) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
// We destroy the LB channel here because
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
// instance. Destroying the lb channel in glb_destroy would likely result in
// a callback invocation without a valid glb_policy arg.
- if (glb_policy->lb_channel != NULL) {
+ if (glb_policy->lb_channel != nullptr) {
grpc_channel_destroy(glb_policy->lb_channel);
- glb_policy->lb_channel = NULL;
+ glb_policy->lb_channel = nullptr;
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
- while (pp != NULL) {
+ while (pp != nullptr) {
pending_pick* next = pp->next;
- *pp->target = NULL;
+ *pp->target = nullptr;
GRPC_CLOSURE_SCHED(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
@@ -1033,7 +1033,7 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pp = next;
}
- while (pping != NULL) {
+ while (pping != nullptr) {
pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED(
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
@@ -1058,11 +1058,11 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = NULL;
- while (pp != NULL) {
+ glb_policy->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = NULL;
+ *target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
@@ -1072,7 +1072,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
}
pp = next;
}
- if (glb_policy->rr_policy != NULL) {
+ if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
GRPC_ERROR_REF(error));
}
@@ -1096,8 +1096,8 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
pending_pick* pp = glb_policy->pending_picks;
- glb_policy->pending_picks = NULL;
- while (pp != NULL) {
+ glb_policy->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
@@ -1110,7 +1110,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
}
pp = next;
}
- if (glb_policy->rr_policy != NULL) {
+ if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_cancel_picks_locked(
exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
initial_metadata_flags_eq, GRPC_ERROR_REF(error));
@@ -1126,7 +1126,7 @@ static void start_picking_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
- glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
+ glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
grpc_millis deadline =
grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
@@ -1155,8 +1155,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
- if (pick_args->lb_token_mdelem_storage == NULL) {
- *target = NULL;
+ if (pick_args->lb_token_mdelem_storage == nullptr) {
+ *target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No mdelem storage for the LB token. Load reporting "
@@ -1165,10 +1165,10 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
}
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
bool pick_done = false;
- if (glb_policy->rr_policy != NULL) {
+ if (glb_policy->rr_policy != nullptr) {
const grpc_connectivity_state rr_connectivity_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx,
- glb_policy->rr_policy, NULL);
+ glb_policy->rr_policy, nullptr);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event
// (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
@@ -1197,7 +1197,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
wc_arg->rr_policy = glb_policy->rr_policy;
wc_arg->target = target;
wc_arg->context = context;
- GPR_ASSERT(glb_policy->client_stats != NULL);
+ GPR_ASSERT(glb_policy->client_stats != nullptr);
wc_arg->client_stats =
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
wc_arg->wrapped_closure = on_complete;
@@ -1264,7 +1264,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void*)glb_policy);
}
- GPR_ASSERT(glb_policy->lb_call == NULL);
+ GPR_ASSERT(glb_policy->lb_call == nullptr);
query_for_backends_locked(exec_ctx, glb_policy);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
@@ -1325,8 +1325,8 @@ static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
- glb_policy->client_load_report_payload = NULL;
- if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
+ glb_policy->client_load_report_payload = nullptr;
+ if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
@@ -1344,23 +1344,23 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
request->client_stats.num_calls_finished_with_client_failed_to_send ==
0 &&
request->client_stats.num_calls_finished_known_received == 0 &&
- (drop_entries == NULL || drop_entries->num_entries == 0);
+ (drop_entries == nullptr || drop_entries->num_entries == 0);
}
static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
+ if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
- if (glb_policy->lb_call == NULL) {
+ if (glb_policy->lb_call == nullptr) {
maybe_restart_lb_call(exec_ctx, glb_policy);
}
return;
}
// Construct message payload.
- GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
+ GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
grpc_grpclb_request* request =
grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
// Skip client load report if the counters were all zero in the last
@@ -1403,9 +1403,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error);
static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->server_name != NULL);
+ GPR_ASSERT(glb_policy->server_name != nullptr);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
- GPR_ASSERT(glb_policy->lb_call == NULL);
+ GPR_ASSERT(glb_policy->lb_call == nullptr);
GPR_ASSERT(!glb_policy->shutting_down);
/* Note the following LB call progresses every time there's activity in \a
@@ -1417,13 +1417,13 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
? GRPC_MILLIS_INF_FUTURE
: grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
- exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
+ exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
- &host, deadline, NULL);
+ &host, deadline, nullptr);
grpc_slice_unref_internal(exec_ctx, host);
- if (glb_policy->client_stats != NULL) {
+ if (glb_policy->client_stats != nullptr) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
}
glb_policy->client_stats = grpc_grpclb_client_stats_create();
@@ -1459,9 +1459,9 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_call != NULL);
+ GPR_ASSERT(glb_policy->lb_call != nullptr);
grpc_call_unref(glb_policy->lb_call);
- glb_policy->lb_call = NULL;
+ glb_policy->lb_call = nullptr;
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
@@ -1479,7 +1479,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
*/
static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy) {
- GPR_ASSERT(glb_policy->lb_channel != NULL);
+ GPR_ASSERT(glb_policy->lb_channel != nullptr);
if (glb_policy->shutting_down) return;
lb_call_init_locked(exec_ctx, glb_policy);
@@ -1490,7 +1490,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
(void*)glb_policy, (void*)glb_policy->lb_channel,
(void*)glb_policy->lb_call);
}
- GPR_ASSERT(glb_policy->lb_call != NULL);
+ GPR_ASSERT(glb_policy->lb_call != nullptr);
grpc_call_error call_error;
grpc_op ops[3];
@@ -1500,22 +1500,22 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
&glb_policy->lb_initial_metadata_recv;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
- GPR_ASSERT(glb_policy->lb_request_payload != NULL);
+ GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = glb_policy->lb_request_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
- ops, (size_t)(op - ops), NULL);
+ ops, (size_t)(op - ops), nullptr);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@@ -1526,7 +1526,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->data.recv_status_on_client.status_details =
&glb_policy->lb_call_status_details;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_server_status_received_locked */
@@ -1541,7 +1541,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
/* take another weak ref to be unref'd/reused in
* lb_on_response_received_locked */
@@ -1558,7 +1558,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op* op = ops;
- if (glb_policy->lb_response_payload != NULL) {
+ if (glb_policy->lb_response_payload != nullptr) {
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
@@ -1568,10 +1568,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
- grpc_grpclb_initial_response* response = NULL;
+ grpc_grpclb_initial_response* response = nullptr;
if (!glb_policy->seen_initial_response &&
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
- NULL) {
+ nullptr) {
if (response->has_client_stats_report_interval) {
glb_policy->client_stats_report_interval = GPR_MAX(
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
@@ -1598,8 +1598,8 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
} else {
grpc_grpclb_serverlist* serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
- if (serverlist != NULL) {
- GPR_ASSERT(glb_policy->lb_call != NULL);
+ if (serverlist != nullptr) {
+ GPR_ASSERT(glb_policy->lb_call != nullptr);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
@@ -1622,14 +1622,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
}
grpc_grpclb_destroy_serverlist(serverlist);
} else { /* new serverlist */
- if (glb_policy->serverlist != NULL) {
+ if (glb_policy->serverlist != nullptr) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
} else {
/* or dispose of the fallback */
grpc_lb_addresses_destroy(exec_ctx,
glb_policy->fallback_backend_addresses);
- glb_policy->fallback_backend_addresses = NULL;
+ glb_policy->fallback_backend_addresses = nullptr;
if (glb_policy->fallback_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
glb_policy->fallback_timer_active = false;
@@ -1659,7 +1659,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
- op->reserved = NULL;
+ op->reserved = nullptr;
op++;
/* reuse the "lb_on_response_received_locked" weak ref taken in
* query_for_backends_locked() */
@@ -1685,14 +1685,14 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
- if (glb_policy->serverlist == NULL) {
+ if (glb_policy->serverlist == nullptr) {
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
(void*)glb_policy);
}
- GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
rr_handover_locked(exec_ctx, glb_policy);
}
}
@@ -1703,7 +1703,7 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) {
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
- GPR_ASSERT(glb_policy->lb_call != NULL);
+ GPR_ASSERT(glb_policy->lb_call != nullptr);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char* status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
@@ -1727,7 +1727,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
glb_lb_policy* glb_policy,
const grpc_lb_addresses* addresses) {
- GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses);
@@ -1742,8 +1742,8 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- if (glb_policy->lb_channel == NULL) {
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ if (glb_policy->lb_channel == nullptr) {
// If we don't have a current channel to the LB, go into TRANSIENT
// FAILURE.
grpc_connectivity_state_set(
@@ -1763,10 +1763,10 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
(const grpc_lb_addresses*)arg->value.pointer.p;
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
- if (glb_policy->serverlist == NULL) {
+ if (glb_policy->serverlist == nullptr) {
fallback_update_locked(exec_ctx, glb_policy, addresses);
}
- GPR_ASSERT(glb_policy->lb_channel != NULL);
+ GPR_ASSERT(glb_policy->lb_channel != nullptr);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
grpc_channel_args* lb_channel_args = build_lb_channel_args(
@@ -1789,7 +1789,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_polling_entity_create_from_pollset_set(
glb_policy->base.interested_parties),
&glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, NULL);
+ &glb_policy->lb_channel_on_connectivity_changed, nullptr);
}
}
@@ -1817,7 +1817,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
grpc_polling_entity_create_from_pollset_set(
glb_policy->base.interested_parties),
&glb_policy->lb_channel_connectivity,
- &glb_policy->lb_channel_on_connectivity_changed, NULL);
+ &glb_policy->lb_channel_on_connectivity_changed, nullptr);
break;
}
case GRPC_CHANNEL_IDLE:
@@ -1825,9 +1825,9 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
// call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
- if (glb_policy->lb_call != NULL) {
+ if (glb_policy->lb_call != nullptr) {
glb_policy->updating_lb_call = true;
- grpc_call_cancel(glb_policy->lb_call, NULL);
+ grpc_call_cancel(glb_policy->lb_call, nullptr);
// lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
} else if (glb_policy->started_picking && !glb_policy->shutting_down) {
@@ -1866,21 +1866,21 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
/* Count the number of gRPC-LB addresses. There must be at least one. */
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- return NULL;
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ return nullptr;
}
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
- if (num_grpclb_addrs == 0) return NULL;
+ if (num_grpclb_addrs == 0) return nullptr;
glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
- GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg != nullptr);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
@@ -1893,7 +1893,7 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
grpc_uri_destroy(uri);
glb_policy->cc_factory = args->client_channel_factory;
- GPR_ASSERT(glb_policy->cc_factory != NULL);
+ GPR_ASSERT(glb_policy->cc_factory != nullptr);
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
glb_policy->lb_call_timeout_ms =
@@ -1931,11 +1931,11 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
- if (glb_policy->lb_channel == NULL) {
+ if (glb_policy->lb_channel == nullptr) {
gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
- return NULL;
+ return nullptr;
}
grpc_subchannel_index_ref();
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
@@ -1969,10 +1969,10 @@ static bool maybe_add_client_load_reporting_filter(
grpc_channel_stack_builder_get_channel_arguments(builder);
const grpc_arg* channel_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
- if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
+ if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
}
return true;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 2dcf29fe0e..8eaa90e97b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -35,7 +35,7 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel_args* new_args = args;
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
- if (channel_credentials != NULL) {
+ if (channel_credentials != nullptr) {
/* Substitute the channel credentials with a version without call
* credentials: the load balancer is not necessarily trusted to handle
* bearer token credentials */
@@ -43,7 +43,7 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel_credentials* creds_sans_call_creds =
grpc_channel_credentials_duplicate_without_call_credentials(
channel_credentials);
- GPR_ASSERT(creds_sans_call_creds != NULL);
+ GPR_ASSERT(creds_sans_call_creds != nullptr);
grpc_arg args_to_add[] = {
grpc_channel_credentials_to_arg(creds_sans_call_creds)};
/* Create the new set of channel args */
@@ -55,7 +55,7 @@ grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
- if (channel_credentials != NULL) {
+ if (channel_credentials != nullptr) {
grpc_channel_args_destroy(exec_ctx, new_args);
}
return lb_channel;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index 903120ca7d..e19a6a71aa 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
@@ -87,7 +87,7 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
gpr_atm_full_fetch_add(&client_stats->num_calls_started, (gpr_atm)1);
gpr_atm_full_fetch_add(&client_stats->num_calls_finished, (gpr_atm)1);
// Record the drop.
- if (client_stats->drop_token_counts == NULL) {
+ if (client_stats->drop_token_counts == nullptr) {
client_stats->drop_token_counts =
(grpc_grpclb_dropped_call_counts*)gpr_zalloc(
sizeof(grpc_grpclb_dropped_call_counts));
@@ -136,12 +136,12 @@ void grpc_grpclb_client_stats_get_locked(
num_calls_finished_known_received,
&client_stats->num_calls_finished_known_received);
*drop_token_counts = client_stats->drop_token_counts;
- client_stats->drop_token_counts = NULL;
+ client_stats->drop_token_counts = nullptr;
}
void grpc_grpclb_dropped_call_counts_destroy(
grpc_grpclb_dropped_call_counts* drop_entries) {
- if (drop_entries != NULL) {
+ if (drop_entries != nullptr) {
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
gpr_free(drop_entries->token_counts[i].token);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 87d7336b0c..2c8d7f4291 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -89,7 +89,7 @@ static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
grpc_grpclb_dropped_call_counts* drop_entries =
(grpc_grpclb_dropped_call_counts*)*arg;
- if (drop_entries == NULL) return true;
+ if (drop_entries == nullptr) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
grpc_lb_v1_ClientStatsPerToken drop_message;
@@ -165,10 +165,10 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
memset(&res, 0, sizeof(grpc_grpclb_response));
if (!pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
- return NULL;
+ return nullptr;
}
- if (!res.has_initial_response) return NULL;
+ if (!res.has_initial_response) return nullptr;
grpc_grpclb_initial_response* initial_res =
(grpc_grpclb_initial_response*)gpr_malloc(
@@ -196,7 +196,7 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
if (!status) {
gpr_free(sl);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
- return NULL;
+ return nullptr;
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
@@ -212,7 +212,7 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
if (!status) {
grpc_grpclb_destroy_serverlist(sl);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));
- return NULL;
+ return nullptr;
}
}
if (res.server_list.has_expiration_interval) {
@@ -222,7 +222,7 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
}
void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
- if (serverlist == NULL) {
+ if (serverlist == nullptr) {
return;
}
for (size_t i = 0; i < serverlist->num_servers; i++) {
@@ -251,7 +251,7 @@ grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
const grpc_grpclb_serverlist* rhs) {
- if (lhs == NULL || rhs == NULL) {
+ if (lhs == nullptr || rhs == nullptr) {
return false;
}
if (lhs->num_servers != rhs->num_servers) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 125a4186aa..a6a9a2645f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -60,9 +60,9 @@ typedef struct {
static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
- GPR_ASSERT(p->subchannel_list == NULL);
- GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
- GPR_ASSERT(p->pending_picks == NULL);
+ GPR_ASSERT(p->subchannel_list == nullptr);
+ GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
+ GPR_ASSERT(p->pending_picks == nullptr);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p);
grpc_subchannel_index_unref();
@@ -78,24 +78,24 @@ static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
}
p->shutdown = true;
pending_pick* pp;
- while ((pp = p->pending_picks) != NULL) {
+ while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
- *pp->target = NULL;
+ *pp->target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"shutdown");
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_shutdown");
- p->subchannel_list = NULL;
+ p->subchannel_list = nullptr;
}
- if (p->latest_pending_subchannel_list != NULL) {
+ if (p->latest_pending_subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
}
GRPC_ERROR_UNREF(error);
}
@@ -110,11 +110,11 @@ static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
+ p->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = NULL;
+ *target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
@@ -134,8 +134,8 @@ static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
+ p->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
@@ -155,7 +155,7 @@ static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
pick_first_lb_policy* p) {
p->started_picking = true;
- if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
+ if (p->subchannel_list != nullptr && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
grpc_lb_subchannel_list_ref_for_connectivity_watch(
p->subchannel_list, "connectivity_watch+start_picking");
@@ -178,7 +178,7 @@ static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_closure* on_complete) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
- if (p->selected != NULL) {
+ if (p->selected != nullptr) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
"picked");
return 1;
@@ -242,8 +242,8 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- if (p->subchannel_list == NULL) {
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
+ if (p->subchannel_list == nullptr) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
@@ -274,18 +274,18 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // Empty list.
- p->selected = NULL;
+ p->selected = nullptr;
return;
}
- if (p->selected == NULL) {
+ if (p->selected == nullptr) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"pf_update_before_selected");
}
@@ -307,12 +307,12 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_lb_subchannel_list_ref_for_connectivity_watch(
subchannel_list, "connectivity_watch+replace_selected");
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "pf_update_includes_selected");
}
p->subchannel_list = subchannel_list;
- if (p->selected->connected_subchannel != NULL) {
+ if (p->selected->connected_subchannel != nullptr) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
p->selected->connected_subchannel, "pf_update_includes_selected");
}
@@ -321,11 +321,11 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
// If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here.
- if (p->latest_pending_subchannel_list != NULL) {
+ if (p->latest_pending_subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"pf_update_includes_selected+outdated");
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
}
return;
}
@@ -334,7 +334,7 @@ static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
// pending subchannel list to the new subchannel list. We will wait
// for it to report READY before swapping it into the current
// subchannel list.
- if (p->latest_pending_subchannel_list != NULL) {
+ if (p->latest_pending_subchannel_list != nullptr) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
@@ -402,12 +402,12 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
- p->latest_pending_subchannel_list != NULL) {
- p->selected = NULL;
+ p->latest_pending_subchannel_list != nullptr) {
+ p->selected = nullptr;
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
@@ -450,11 +450,11 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// Case 2. Promote p->latest_pending_subchannel_list to
// p->subchannel_list.
if (sd->subchannel_list == p->latest_pending_subchannel_list) {
- GPR_ASSERT(p->subchannel_list != NULL);
+ GPR_ASSERT(p->subchannel_list != nullptr);
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "finish_update");
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
}
// Cases 1 and 2.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
@@ -495,7 +495,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
- } while (sd->subchannel == NULL);
+ } while (sd->subchannel == nullptr);
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels.
if (sd->subchannel_list->checking_subchannel == 0 &&
@@ -537,7 +537,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
sd->subchannel_list->num_subchannels;
sd = &sd->subchannel_list
->subchannels[sd->subchannel_list->checking_subchannel];
- } while (sd->subchannel == NULL && sd != original_sd);
+ } while (sd->subchannel == nullptr && sd != original_sd);
if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
@@ -585,7 +585,7 @@ static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != NULL);
+ GPR_ASSERT(args->client_channel_factory != nullptr);
pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index df235922c8..6ea1f025df 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -100,7 +100,7 @@ typedef struct round_robin_lb_policy {
* The caller must do that if it returns a pick. */
static size_t get_next_ready_subchannel_index_locked(
const round_robin_lb_policy* p) {
- GPR_ASSERT(p->subchannel_list != NULL);
+ GPR_ASSERT(p->subchannel_list != nullptr);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
@@ -161,8 +161,8 @@ static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void*)pol, (void*)pol);
}
- GPR_ASSERT(p->subchannel_list == NULL);
- GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
+ GPR_ASSERT(p->subchannel_list == nullptr);
+ GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
grpc_subchannel_index_unref();
gpr_free(p);
@@ -175,25 +175,25 @@ static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
}
p->shutdown = true;
pending_pick* pp;
- while ((pp = p->pending_picks) != NULL) {
+ while ((pp = p->pending_picks) != nullptr) {
p->pending_picks = pp->next;
- *pp->target = NULL;
+ *pp->target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_rr_shutdown");
- p->subchannel_list = NULL;
+ p->subchannel_list = nullptr;
}
- if (p->latest_pending_subchannel_list != NULL) {
+ if (p->latest_pending_subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
"sl_shutdown_pending_rr_shutdown");
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
}
GRPC_ERROR_UNREF(error);
}
@@ -209,11 +209,11 @@ static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
+ p->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if (pp->target == target) {
- *target = NULL;
+ *target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
@@ -233,12 +233,12 @@ static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_error* error) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
pending_pick* pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
+ p->pending_picks = nullptr;
+ while (pp != nullptr) {
pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- *pp->target = NULL;
+ *pp->target = nullptr;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
@@ -281,7 +281,7 @@ static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
@@ -289,7 +289,7 @@ static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
- if (user_data != NULL) {
+ if (user_data != nullptr) {
*user_data = sd->user_data;
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
@@ -472,7 +472,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
}
} else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
- if (sd->connected_subchannel == NULL) {
+ if (sd->connected_subchannel == nullptr) {
sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(sd->subchannel),
"connected");
@@ -486,7 +486,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
GPR_ASSERT(!sd->subchannel_list->shutting_down);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
const unsigned long num_subchannels =
- p->subchannel_list != NULL
+ p->subchannel_list != nullptr
? (unsigned long)p->subchannel_list->num_subchannels
: 0;
gpr_log(GPR_DEBUG,
@@ -495,13 +495,13 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
(void*)p, (void*)p->subchannel_list, num_subchannels,
(void*)sd->subchannel_list, num_subchannels);
}
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
// dispose of the current subchannel_list
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
}
p->subchannel_list = p->latest_pending_subchannel_list;
- p->latest_pending_subchannel_list = NULL;
+ p->latest_pending_subchannel_list = nullptr;
}
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
@@ -510,7 +510,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
- if (p->pending_picks != NULL) {
+ if (p->pending_picks != nullptr) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -520,7 +520,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_picked");
- if (pp->user_data != NULL) {
+ if (pp->user_data != nullptr) {
*pp->user_data = selected->user_data;
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
@@ -577,11 +577,11 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update).
- if (p->subchannel_list == NULL) {
+ if (p->subchannel_list == nullptr) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
@@ -602,7 +602,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
"sl_shutdown_empty_update");
}
@@ -610,7 +610,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
return;
}
if (p->started_picking) {
- if (p->latest_pending_subchannel_list != NULL) {
+ if (p->latest_pending_subchannel_list != nullptr) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
@@ -635,7 +635,7 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
} else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
- if (p->subchannel_list != NULL) {
+ if (p->subchannel_list != nullptr) {
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
}
@@ -662,7 +662,7 @@ static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
- GPR_ASSERT(args->client_channel_factory != NULL);
+ GPR_ASSERT(args->client_channel_factory != nullptr);
round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index db38ef5305..f53abb7d96 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -31,7 +31,7 @@
void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
grpc_lb_subchannel_data* sd,
const char* reason) {
- if (sd->subchannel != NULL) {
+ if (sd->subchannel != nullptr) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -42,16 +42,16 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
- sd->subchannel = NULL;
- if (sd->connected_subchannel != NULL) {
+ sd->subchannel = nullptr;
+ if (sd->connected_subchannel != nullptr) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
reason);
- sd->connected_subchannel = NULL;
+ sd->connected_subchannel = nullptr;
}
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
+ if (sd->user_data != nullptr) {
+ GPR_ASSERT(sd->user_data_vtable != nullptr);
sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = NULL;
+ sd->user_data = nullptr;
}
}
}
@@ -126,7 +126,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
- if (subchannel == NULL) {
+ if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
char* address_uri =
@@ -162,7 +162,7 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
sd->user_data_vtable = addresses->user_data_vtable;
- if (sd->user_data_vtable != NULL) {
+ if (sd->user_data_vtable != nullptr) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
@@ -240,7 +240,7 @@ static void subchannel_data_cancel_connectivity_watch(
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, nullptr, nullptr,
&sd->connectivity_changed_closure);
}
@@ -261,7 +261,7 @@ void grpc_lb_subchannel_list_shutdown_and_unref(
// Otherwise, unref the subchannel directly.
if (sd->connectivity_notification_pending) {
subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
- } else if (sd->subchannel != NULL) {
+ } else if (sd->subchannel != nullptr) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
}
}