From 4782d92b2d4fab261b5520a29d79ba97fea9ce7b Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 10 Nov 2017 09:53:21 -0800 Subject: s/NULL/nullptr --- .../client_channel/lb_policy/grpclb/grpclb.cc | 208 ++++++++++----------- 1 file changed, 104 insertions(+), 104 deletions(-) (limited to 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc') diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 01b243bc3e..a3282b5d93 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -133,7 +133,7 @@ grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb"); static grpc_error* initial_metadata_add_lb_token( grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata, grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) { - GPR_ASSERT(lb_token_mdelem_storage != NULL); + GPR_ASSERT(lb_token_mdelem_storage != nullptr); GPR_ASSERT(!GRPC_MDISNULL(lb_token)); return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata, lb_token_mdelem_storage, lb_token); @@ -186,14 +186,14 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg; - GPR_ASSERT(wc_arg->wrapped_closure != NULL); + GPR_ASSERT(wc_arg->wrapped_closure != nullptr); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error)); - if (wc_arg->rr_policy != NULL) { + if (wc_arg->rr_policy != nullptr) { /* if *target is NULL, no pick has been made by the RR policy (eg, all * addresses failed to connect). There won't be any user_data/token * available */ - if (*wc_arg->target != NULL) { + if (*wc_arg->target != nullptr) { if (!GRPC_MDISNULL(wc_arg->lb_token)) { initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata, wc_arg->lb_token_mdelem_storage, @@ -206,7 +206,7 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg, abort(); } // Pass on client stats via context. Passes ownership of the reference. - GPR_ASSERT(wc_arg->client_stats != NULL); + GPR_ASSERT(wc_arg->client_stats != nullptr); wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats; wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats; } else { @@ -217,7 +217,7 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg, } GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure"); } - GPR_ASSERT(wc_arg->free_when_done != NULL); + GPR_ASSERT(wc_arg->free_when_done != nullptr); gpr_free(wc_arg->free_when_done); } @@ -454,7 +454,7 @@ static void* lb_token_copy(void* token) { : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload; } static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) { - if (token != NULL) { + if (token != nullptr) { GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token}); } } @@ -537,7 +537,7 @@ static grpc_lb_addresses* process_serverlist_locked( grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len, false /* is_balancer */, - NULL /* balancer_name */, user_data); + nullptr /* balancer_name */, user_data); ++addr_idx; } GPR_ASSERT(addr_idx == num_valid); @@ -637,7 +637,7 @@ static bool pick_from_internal_rr_locked( const grpc_lb_policy_pick_args* pick_args, bool force_async, grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) { // Check for drops if we are not using fallback backend addresses. - if (glb_policy->serverlist != NULL) { + if (glb_policy->serverlist != nullptr) { // Look at the index into the serverlist to see if we should drop this call. grpc_grpclb_server* server = glb_policy->serverlist->servers[glb_policy->serverlist_index++]; @@ -660,7 +660,7 @@ static bool pick_from_internal_rr_locked( server->load_balance_token, wc_arg->client_stats); grpc_grpclb_client_stats_unref(wc_arg->client_stats); if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); + GPR_ASSERT(wc_arg->wrapped_closure != nullptr); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); gpr_free(wc_arg->free_when_done); return false; @@ -685,11 +685,11 @@ static bool pick_from_internal_rr_locked( pick_args->lb_token_mdelem_storage, GRPC_MDELEM_REF(wc_arg->lb_token)); // Pass on client stats via context. Passes ownership of the reference. - GPR_ASSERT(wc_arg->client_stats != NULL); + GPR_ASSERT(wc_arg->client_stats != nullptr); wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats; wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats; if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); + GPR_ASSERT(wc_arg->wrapped_closure != nullptr); GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); gpr_free(wc_arg->free_when_done); return false; @@ -706,7 +706,7 @@ static bool pick_from_internal_rr_locked( static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { grpc_lb_addresses* addresses; - if (glb_policy->serverlist != NULL) { + if (glb_policy->serverlist != nullptr) { GPR_ASSERT(glb_policy->serverlist->num_servers > 0); addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist); } else { @@ -714,10 +714,10 @@ static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx, // serverlist from the balancer, we use the fallback backends returned by // the resolver. Note that the fallback backend list may be empty, in which // case the new round_robin policy will keep the requested picks pending. - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr); addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses); } - GPR_ASSERT(addresses != NULL); + GPR_ASSERT(addresses != nullptr); grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args)); args->client_channel_factory = glb_policy->cc_factory; args->combiner = glb_policy->base.combiner; @@ -742,11 +742,11 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error); static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, grpc_lb_policy_args* args) { - GPR_ASSERT(glb_policy->rr_policy == NULL); + GPR_ASSERT(glb_policy->rr_policy == nullptr); grpc_lb_policy* new_rr_policy = grpc_lb_policy_create(exec_ctx, "round_robin", args); - if (new_rr_policy == NULL) { + if (new_rr_policy == nullptr) { gpr_log(GPR_ERROR, "Failure creating a RoundRobin policy for serverlist update with " "%lu entries. The previous RR instance (%p), if any, will continue " @@ -757,7 +757,7 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, return; } glb_policy->rr_policy = new_rr_policy; - grpc_error* rr_state_error = NULL; + grpc_error* rr_state_error = nullptr; const grpc_connectivity_state rr_state = grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy, &rr_state_error); @@ -824,8 +824,8 @@ static void rr_handover_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { if (glb_policy->shutting_down) return; grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy); - GPR_ASSERT(args != NULL); - if (glb_policy->rr_policy != NULL) { + GPR_ASSERT(args != nullptr); + if (glb_policy->rr_policy != nullptr) { if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)", (void*)glb_policy->rr_policy); @@ -857,7 +857,7 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, * sink, policies can't transition back from it. .*/ GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_connectivity_shutdown"); - glb_policy->rr_policy = NULL; + glb_policy->rr_policy = nullptr; GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "glb_rr_connectivity_cb"); gpr_free(rr_connectivity); @@ -912,7 +912,7 @@ static grpc_channel_args* build_lb_channel_args( * instantiated and used in that case. Otherwise, something has gone wrong. */ GPR_ASSERT(num_grpclb_addrs > 0); grpc_lb_addresses* lb_addresses = - grpc_lb_addresses_create(num_grpclb_addrs, NULL); + grpc_lb_addresses_create(num_grpclb_addrs, nullptr); grpc_slice_hash_table_entry* targets_info_entries = (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs); @@ -920,7 +920,7 @@ static grpc_channel_args* build_lb_channel_args( size_t lb_addresses_idx = 0; for (size_t i = 0; i < addresses->num_addresses; ++i) { if (!addresses->addresses[i].is_balancer) continue; - if (addresses->addresses[i].user_data != NULL) { + if (addresses->addresses[i].user_data != nullptr) { gpr_log(GPR_ERROR, "This LB policy doesn't support user data. It will be ignored"); } @@ -934,7 +934,7 @@ static grpc_channel_args* build_lb_channel_args( grpc_lb_addresses_set_address( lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr, addresses->addresses[i].address.len, false /* is balancer */, - addresses->addresses[i].balancer_name, NULL /* user data */); + addresses->addresses[i].balancer_name, nullptr /* user data */); } GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx); grpc_slice_hash_table* targets_info = @@ -959,18 +959,18 @@ static grpc_channel_args* build_lb_channel_args( static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) { glb_lb_policy* glb_policy = (glb_lb_policy*)pol; - GPR_ASSERT(glb_policy->pending_picks == NULL); - GPR_ASSERT(glb_policy->pending_pings == NULL); + GPR_ASSERT(glb_policy->pending_picks == nullptr); + GPR_ASSERT(glb_policy->pending_pings == nullptr); gpr_free((void*)glb_policy->server_name); grpc_channel_args_destroy(exec_ctx, glb_policy->args); - if (glb_policy->client_stats != NULL) { + if (glb_policy->client_stats != nullptr) { grpc_grpclb_client_stats_unref(glb_policy->client_stats); } grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker); - if (glb_policy->serverlist != NULL) { + if (glb_policy->serverlist != nullptr) { grpc_grpclb_destroy_serverlist(glb_policy->serverlist); } - if (glb_policy->fallback_backend_addresses != NULL) { + if (glb_policy->fallback_backend_addresses != nullptr) { grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); } grpc_fake_resolver_response_generator_unref(glb_policy->response_generator); @@ -991,8 +991,8 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) { * because glb_policy->lb_call is only assigned in lb_call_init_locked as part * of query_for_backends_locked, which can only be invoked while * glb_policy->shutting_down is false. */ - if (lb_call != NULL) { - grpc_call_cancel(lb_call, NULL); + if (lb_call != nullptr) { + grpc_call_cancel(lb_call, nullptr); /* lb_on_server_status_received will pick up the cancel and clean up */ } if (glb_policy->retry_timer_active) { @@ -1005,27 +1005,27 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) { } pending_pick* pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; + glb_policy->pending_picks = nullptr; pending_ping* pping = glb_policy->pending_pings; - glb_policy->pending_pings = NULL; - if (glb_policy->rr_policy != NULL) { + glb_policy->pending_pings = nullptr; + if (glb_policy->rr_policy != nullptr) { GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown"); } // We destroy the LB channel here because // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy // instance. Destroying the lb channel in glb_destroy would likely result in // a callback invocation without a valid glb_policy arg. - if (glb_policy->lb_channel != NULL) { + if (glb_policy->lb_channel != nullptr) { grpc_channel_destroy(glb_policy->lb_channel); - glb_policy->lb_channel = NULL; + glb_policy->lb_channel = nullptr; } grpc_connectivity_state_set( exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown"); - while (pp != NULL) { + while (pp != nullptr) { pending_pick* next = pp->next; - *pp->target = NULL; + *pp->target = nullptr; GRPC_CLOSURE_SCHED( exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown")); @@ -1033,7 +1033,7 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) { pp = next; } - while (pping != NULL) { + while (pping != nullptr) { pending_ping* next = pping->next; GRPC_CLOSURE_SCHED( exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, @@ -1058,11 +1058,11 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error* error) { glb_lb_policy* glb_policy = (glb_lb_policy*)pol; pending_pick* pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; - while (pp != NULL) { + glb_policy->pending_picks = nullptr; + while (pp != nullptr) { pending_pick* next = pp->next; if (pp->target == target) { - *target = NULL; + *target = nullptr; GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Pick Cancelled", &error, 1)); @@ -1072,7 +1072,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, } pp = next; } - if (glb_policy->rr_policy != NULL) { + if (glb_policy->rr_policy != nullptr) { grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target, GRPC_ERROR_REF(error)); } @@ -1096,8 +1096,8 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_error* error) { glb_lb_policy* glb_policy = (glb_lb_policy*)pol; pending_pick* pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; - while (pp != NULL) { + glb_policy->pending_picks = nullptr; + while (pp != nullptr) { pending_pick* next = pp->next; if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == initial_metadata_flags_eq) { @@ -1110,7 +1110,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx, } pp = next; } - if (glb_policy->rr_policy != NULL) { + if (glb_policy->rr_policy != nullptr) { grpc_lb_policy_cancel_picks_locked( exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask, initial_metadata_flags_eq, GRPC_ERROR_REF(error)); @@ -1126,7 +1126,7 @@ static void start_picking_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { /* start a timer to fall back */ if (glb_policy->lb_fallback_timeout_ms > 0 && - glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) { + glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) { grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms; GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer"); @@ -1155,8 +1155,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_connected_subchannel** target, grpc_call_context_element* context, void** user_data, grpc_closure* on_complete) { - if (pick_args->lb_token_mdelem_storage == NULL) { - *target = NULL; + if (pick_args->lb_token_mdelem_storage == nullptr) { + *target = nullptr; GRPC_CLOSURE_SCHED(exec_ctx, on_complete, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No mdelem storage for the LB token. Load reporting " @@ -1165,10 +1165,10 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, } glb_lb_policy* glb_policy = (glb_lb_policy*)pol; bool pick_done = false; - if (glb_policy->rr_policy != NULL) { + if (glb_policy->rr_policy != nullptr) { const grpc_connectivity_state rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(exec_ctx, - glb_policy->rr_policy, NULL); + glb_policy->rr_policy, nullptr); // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the // callback registered to capture this event // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We @@ -1197,7 +1197,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, wc_arg->rr_policy = glb_policy->rr_policy; wc_arg->target = target; wc_arg->context = context; - GPR_ASSERT(glb_policy->client_stats != NULL); + GPR_ASSERT(glb_policy->client_stats != nullptr); wc_arg->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats); wc_arg->wrapped_closure = on_complete; @@ -1264,7 +1264,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg, gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", (void*)glb_policy); } - GPR_ASSERT(glb_policy->lb_call == NULL); + GPR_ASSERT(glb_policy->lb_call == nullptr); query_for_backends_locked(exec_ctx, glb_policy); } GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); @@ -1325,8 +1325,8 @@ static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { glb_lb_policy* glb_policy = (glb_lb_policy*)arg; grpc_byte_buffer_destroy(glb_policy->client_load_report_payload); - glb_policy->client_load_report_payload = NULL; - if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) { + glb_policy->client_load_report_payload = nullptr; + if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) { glb_policy->client_load_report_timer_pending = false; GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "client_load_report"); @@ -1344,23 +1344,23 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) { request->client_stats.num_calls_finished_with_client_failed_to_send == 0 && request->client_stats.num_calls_finished_known_received == 0 && - (drop_entries == NULL || drop_entries->num_entries == 0); + (drop_entries == nullptr || drop_entries->num_entries == 0); } static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { glb_lb_policy* glb_policy = (glb_lb_policy*)arg; - if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) { + if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) { glb_policy->client_load_report_timer_pending = false; GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "client_load_report"); - if (glb_policy->lb_call == NULL) { + if (glb_policy->lb_call == nullptr) { maybe_restart_lb_call(exec_ctx, glb_policy); } return; } // Construct message payload. - GPR_ASSERT(glb_policy->client_load_report_payload == NULL); + GPR_ASSERT(glb_policy->client_load_report_payload == nullptr); grpc_grpclb_request* request = grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats); // Skip client load report if the counters were all zero in the last @@ -1403,9 +1403,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error); static void lb_call_init_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { - GPR_ASSERT(glb_policy->server_name != NULL); + GPR_ASSERT(glb_policy->server_name != nullptr); GPR_ASSERT(glb_policy->server_name[0] != '\0'); - GPR_ASSERT(glb_policy->lb_call == NULL); + GPR_ASSERT(glb_policy->lb_call == nullptr); GPR_ASSERT(!glb_policy->shutting_down); /* Note the following LB call progresses every time there's activity in \a @@ -1417,13 +1417,13 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx, ? GRPC_MILLIS_INF_FUTURE : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms; glb_policy->lb_call = grpc_channel_create_pollset_set_call( - exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS, + exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS, glb_policy->base.interested_parties, GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD, - &host, deadline, NULL); + &host, deadline, nullptr); grpc_slice_unref_internal(exec_ctx, host); - if (glb_policy->client_stats != NULL) { + if (glb_policy->client_stats != nullptr) { grpc_grpclb_client_stats_unref(glb_policy->client_stats); } glb_policy->client_stats = grpc_grpclb_client_stats_create(); @@ -1459,9 +1459,9 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx, static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { - GPR_ASSERT(glb_policy->lb_call != NULL); + GPR_ASSERT(glb_policy->lb_call != nullptr); grpc_call_unref(glb_policy->lb_call); - glb_policy->lb_call = NULL; + glb_policy->lb_call = nullptr; grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv); grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv); @@ -1479,7 +1479,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx, */ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy) { - GPR_ASSERT(glb_policy->lb_channel != NULL); + GPR_ASSERT(glb_policy->lb_channel != nullptr); if (glb_policy->shutting_down) return; lb_call_init_locked(exec_ctx, glb_policy); @@ -1490,7 +1490,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, (void*)glb_policy, (void*)glb_policy->lb_channel, (void*)glb_policy->lb_call); } - GPR_ASSERT(glb_policy->lb_call != NULL); + GPR_ASSERT(glb_policy->lb_call != nullptr); grpc_call_error call_error; grpc_op ops[3]; @@ -1500,22 +1500,22 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata.recv_initial_metadata = &glb_policy->lb_initial_metadata_recv; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; - GPR_ASSERT(glb_policy->lb_request_payload != NULL); + GPR_ASSERT(glb_policy->lb_request_payload != nullptr); op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message.send_message = glb_policy->lb_request_payload; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call, - ops, (size_t)(op - ops), NULL); + ops, (size_t)(op - ops), nullptr); GPR_ASSERT(GRPC_CALL_OK == call_error); op = ops; @@ -1526,7 +1526,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, op->data.recv_status_on_client.status_details = &glb_policy->lb_call_status_details; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref * count goes to zero) to be unref'd in lb_on_server_status_received_locked */ @@ -1541,7 +1541,7 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message.recv_message = &glb_policy->lb_response_payload; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; /* take another weak ref to be unref'd/reused in * lb_on_response_received_locked */ @@ -1558,7 +1558,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_op ops[2]; memset(ops, 0, sizeof(ops)); grpc_op* op = ops; - if (glb_policy->lb_response_payload != NULL) { + if (glb_policy->lb_response_payload != nullptr) { grpc_backoff_reset(&glb_policy->lb_call_backoff_state); /* Received data from the LB server. Look inside * glb_policy->lb_response_payload, for a serverlist. */ @@ -1568,10 +1568,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_byte_buffer_reader_destroy(&bbr); grpc_byte_buffer_destroy(glb_policy->lb_response_payload); - grpc_grpclb_initial_response* response = NULL; + grpc_grpclb_initial_response* response = nullptr; if (!glb_policy->seen_initial_response && (response = grpc_grpclb_initial_response_parse(response_slice)) != - NULL) { + nullptr) { if (response->has_client_stats_report_interval) { glb_policy->client_stats_report_interval = GPR_MAX( GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis( @@ -1598,8 +1598,8 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, } else { grpc_grpclb_serverlist* serverlist = grpc_grpclb_response_parse_serverlist(response_slice); - if (serverlist != NULL) { - GPR_ASSERT(glb_policy->lb_call != NULL); + if (serverlist != nullptr) { + GPR_ASSERT(glb_policy->lb_call != nullptr); if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "Serverlist with %lu servers received", (unsigned long)serverlist->num_servers); @@ -1622,14 +1622,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, } grpc_grpclb_destroy_serverlist(serverlist); } else { /* new serverlist */ - if (glb_policy->serverlist != NULL) { + if (glb_policy->serverlist != nullptr) { /* dispose of the old serverlist */ grpc_grpclb_destroy_serverlist(glb_policy->serverlist); } else { /* or dispose of the fallback */ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); - glb_policy->fallback_backend_addresses = NULL; + glb_policy->fallback_backend_addresses = nullptr; if (glb_policy->fallback_timer_active) { grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer); glb_policy->fallback_timer_active = false; @@ -1659,7 +1659,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message.recv_message = &glb_policy->lb_response_payload; op->flags = 0; - op->reserved = NULL; + op->reserved = nullptr; op++; /* reuse the "lb_on_response_received_locked" weak ref taken in * query_for_backends_locked() */ @@ -1685,14 +1685,14 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg, glb_policy->fallback_timer_active = false; /* If we receive a serverlist after the timer fires but before this callback * actually runs, don't fall back. */ - if (glb_policy->serverlist == NULL) { + if (glb_policy->serverlist == nullptr) { if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { gpr_log(GPR_INFO, "Falling back to use backends from resolver (grpclb %p)", (void*)glb_policy); } - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr); rr_handover_locked(exec_ctx, glb_policy); } } @@ -1703,7 +1703,7 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg, static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { glb_lb_policy* glb_policy = (glb_lb_policy*)arg; - GPR_ASSERT(glb_policy->lb_call != NULL); + GPR_ASSERT(glb_policy->lb_call != nullptr); if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { char* status_details = grpc_slice_to_c_string(glb_policy->lb_call_status_details); @@ -1727,7 +1727,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx, static void fallback_update_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, const grpc_lb_addresses* addresses) { - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr); grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); glb_policy->fallback_backend_addresses = extract_backend_addresses_locked(exec_ctx, addresses); @@ -1742,8 +1742,8 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy, glb_lb_policy* glb_policy = (glb_lb_policy*)policy; const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - if (glb_policy->lb_channel == NULL) { + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + if (glb_policy->lb_channel == nullptr) { // If we don't have a current channel to the LB, go into TRANSIENT // FAILURE. grpc_connectivity_state_set( @@ -1763,10 +1763,10 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy, (const grpc_lb_addresses*)arg->value.pointer.p; // If a non-empty serverlist hasn't been received from the balancer, // propagate the update to fallback_backend_addresses. - if (glb_policy->serverlist == NULL) { + if (glb_policy->serverlist == nullptr) { fallback_update_locked(exec_ctx, glb_policy, addresses); } - GPR_ASSERT(glb_policy->lb_channel != NULL); + GPR_ASSERT(glb_policy->lb_channel != nullptr); // Propagate updates to the LB channel (pick_first) through the fake // resolver. grpc_channel_args* lb_channel_args = build_lb_channel_args( @@ -1789,7 +1789,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy, grpc_polling_entity_create_from_pollset_set( glb_policy->base.interested_parties), &glb_policy->lb_channel_connectivity, - &glb_policy->lb_channel_on_connectivity_changed, NULL); + &glb_policy->lb_channel_on_connectivity_changed, nullptr); } } @@ -1817,7 +1817,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx, grpc_polling_entity_create_from_pollset_set( glb_policy->base.interested_parties), &glb_policy->lb_channel_connectivity, - &glb_policy->lb_channel_on_connectivity_changed, NULL); + &glb_policy->lb_channel_on_connectivity_changed, nullptr); break; } case GRPC_CHANNEL_IDLE: @@ -1825,9 +1825,9 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx, // call to kick the lb channel into gear. /* fallthrough */ case GRPC_CHANNEL_READY: - if (glb_policy->lb_call != NULL) { + if (glb_policy->lb_call != nullptr) { glb_policy->updating_lb_call = true; - grpc_call_cancel(glb_policy->lb_call, NULL); + grpc_call_cancel(glb_policy->lb_call, nullptr); // lb_on_server_status_received() will pick up the cancel and reinit // lb_call. } else if (glb_policy->started_picking && !glb_policy->shutting_down) { @@ -1866,21 +1866,21 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx, /* Count the number of gRPC-LB addresses. There must be at least one. */ const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - return NULL; + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + return nullptr; } grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p; size_t num_grpclb_addrs = 0; for (size_t i = 0; i < addresses->num_addresses; ++i) { if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs; } - if (num_grpclb_addrs == 0) return NULL; + if (num_grpclb_addrs == 0) return nullptr; glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy)); /* Get server name. */ arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); - GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg != nullptr); GPR_ASSERT(arg->type == GRPC_ARG_STRING); grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true); GPR_ASSERT(uri->path[0] != '\0'); @@ -1893,7 +1893,7 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx, grpc_uri_destroy(uri); glb_policy->cc_factory = args->client_channel_factory; - GPR_ASSERT(glb_policy->cc_factory != NULL); + GPR_ASSERT(glb_policy->cc_factory != nullptr); arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS); glb_policy->lb_call_timeout_ms = @@ -1931,11 +1931,11 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx, exec_ctx, glb_policy->response_generator, lb_channel_args); grpc_channel_args_destroy(exec_ctx, lb_channel_args); gpr_free(uri_str); - if (glb_policy->lb_channel == NULL) { + if (glb_policy->lb_channel == nullptr) { gpr_free((void*)glb_policy->server_name); grpc_channel_args_destroy(exec_ctx, glb_policy->args); gpr_free(glb_policy); - return NULL; + return nullptr; } grpc_subchannel_index_ref(); GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed, @@ -1969,10 +1969,10 @@ static bool maybe_add_client_load_reporting_filter( grpc_channel_stack_builder_get_channel_arguments(builder); const grpc_arg* channel_arg = grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME); - if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING && + if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING && strcmp(channel_arg->value.string, "grpclb") == 0) { return grpc_channel_stack_builder_append_filter( - builder, (const grpc_channel_filter*)arg, NULL, NULL); + builder, (const grpc_channel_filter*)arg, nullptr, nullptr); } return true; } -- cgit v1.2.3