aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/ext/filters/client_channel
diff options
context:
space:
mode:
authorGravatar Noah Eisen <ncteisen@gmail.com>2018-02-10 08:58:46 -0800
committerGravatar GitHub <noreply@github.com>2018-02-10 08:58:46 -0800
commit10a9e4e4fb5fb6aaacf2d02f10dc7690b98197e7 (patch)
treead77e09e79127aaf26911001f884b7ca8c92edc7 /src/core/ext/filters/client_channel
parent2650c9730f6b6288a57c5126f629f1e42c2a282c (diff)
parente1bec40dfb8ff7562fc6a6dafb6d0369294e852f (diff)
Merge pull request #14150 from ncteisen/more-clang-tidy
Remove and Prohibit C Style Casts
Diffstat (limited to 'src/core/ext/filters/client_channel')
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc9
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc14
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc156
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.cc6
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc2
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc21
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc16
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc105
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc54
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc34
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc54
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc24
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.cc15
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc15
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.cc4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc3
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc9
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc53
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc40
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc43
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc30
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc6
23 files changed, 391 insertions, 336 deletions
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index 906a72b662..ee90b499eb 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -80,7 +80,7 @@ static void backup_poller_shutdown_unref(backup_poller* p) {
}
static void done_poller(void* arg, grpc_error* error) {
- backup_poller_shutdown_unref((backup_poller*)arg);
+ backup_poller_shutdown_unref(static_cast<backup_poller*>(arg));
}
static void g_poller_unref() {
@@ -102,7 +102,7 @@ static void g_poller_unref() {
}
static void run_poller(void* arg, grpc_error* error) {
- backup_poller* p = (backup_poller*)arg;
+ backup_poller* p = static_cast<backup_poller*>(arg);
if (error != GRPC_ERROR_NONE) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
@@ -133,8 +133,9 @@ void grpc_client_channel_start_backup_polling(
}
gpr_mu_lock(&g_poller_mu);
if (g_poller == nullptr) {
- g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
- g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+ g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
+ g_poller->pollset =
+ static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
g_poller->shutting_down = false;
grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
gpr_ref_init(&g_poller->refs, 0);
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index a827aa30ec..31a5c31124 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -89,7 +89,7 @@ static void delete_state_watcher(state_watcher* w) {
static void finished_completion(void* pw, grpc_cq_completion* ignored) {
bool should_delete = false;
- state_watcher* w = (state_watcher*)pw;
+ state_watcher* w = static_cast<state_watcher*>(pw);
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@@ -162,11 +162,11 @@ static void partly_done(state_watcher* w, bool due_to_completion,
}
static void watch_complete(void* pw, grpc_error* error) {
- partly_done((state_watcher*)pw, true, GRPC_ERROR_REF(error));
+ partly_done(static_cast<state_watcher*>(pw), true, GRPC_ERROR_REF(error));
}
static void timeout_complete(void* pw, grpc_error* error) {
- partly_done((state_watcher*)pw, false, GRPC_ERROR_REF(error));
+ partly_done(static_cast<state_watcher*>(pw), false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
@@ -182,7 +182,7 @@ typedef struct watcher_timer_init_arg {
} watcher_timer_init_arg;
static void watcher_timer_init(void* arg, grpc_error* error_ignored) {
- watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
+ watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(arg);
grpc_timer_init(&wa->w->alarm, grpc_timespec_to_millis_round_up(wa->deadline),
&wa->w->on_timeout);
@@ -201,7 +201,7 @@ void grpc_channel_watch_connectivity_state(
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_core::ExecCtx exec_ctx;
- state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
+ state_watcher* w = static_cast<state_watcher*>(gpr_malloc(sizeof(*w)));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -227,8 +227,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = nullptr;
- watcher_timer_init_arg* wa =
- (watcher_timer_init_arg*)gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg* wa = static_cast<watcher_timer_init_arg*>(
+ gpr_malloc(sizeof(watcher_timer_init_arg)));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index cbee74c22d..50d562f946 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -88,10 +88,10 @@ static void method_parameters_unref(method_parameters* method_params) {
// Wrappers to pass to grpc_service_config_create_method_config_table().
static void* method_parameters_ref_wrapper(void* value) {
- return method_parameters_ref((method_parameters*)value);
+ return method_parameters_ref(static_cast<method_parameters*>(value));
}
static void method_parameters_unref_wrapper(void* value) {
- method_parameters_unref((method_parameters*)value);
+ method_parameters_unref(static_cast<method_parameters*>(value));
}
static bool parse_wait_for_ready(grpc_json* field,
@@ -119,7 +119,7 @@ static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
gpr_free(buf);
return false;
}
- int num_digits = (int)strlen(decimal_point + 1);
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
if (num_digits > 9) { // We don't accept greater precision than nanos.
gpr_free(buf);
return false;
@@ -149,7 +149,7 @@ static void* method_parameters_create_from_json(const grpc_json* json) {
}
}
method_parameters* value =
- (method_parameters*)gpr_malloc(sizeof(method_parameters));
+ static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -260,7 +260,8 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
}
static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
- lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
+ lb_policy_connectivity_watcher* w =
+ static_cast<lb_policy_connectivity_watcher*>(arg);
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
if (grpc_client_channel_trace.enabled()) {
@@ -281,7 +282,7 @@ static void watch_lb_policy_locked(channel_data* chand,
grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher* w =
- (lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
+ static_cast<lb_policy_connectivity_watcher*>(gpr_malloc(sizeof(*w)));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -310,7 +311,7 @@ typedef struct {
static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
service_config_parsing_state* parsing_state =
- (service_config_parsing_state*)arg;
+ static_cast<service_config_parsing_state*>(arg);
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -334,7 +335,7 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
uint32_t decimal_value = 0;
const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != nullptr) {
- whole_len = (size_t)(decimal_point - sub_field->value);
+ whole_len = static_cast<size_t>(decimal_point - sub_field->value);
multiplier = 1000;
size_t decimal_len = strlen(decimal_point + 1);
if (decimal_len > 3) decimal_len = 3;
@@ -353,7 +354,8 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
&whole_value)) {
return;
}
- milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
+ milli_token_ratio =
+ static_cast<int>((whole_value * multiplier) + decimal_value);
if (milli_token_ratio <= 0) return;
}
}
@@ -364,7 +366,8 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
}
static void request_reresolution_locked(void* arg, grpc_error* error) {
- reresolution_request_args* args = (reresolution_request_args*)arg;
+ reresolution_request_args* args =
+ static_cast<reresolution_request_args*>(arg);
channel_data* chand = args->chand;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
@@ -383,7 +386,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
}
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
@@ -412,7 +415,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses* addresses =
- (grpc_lb_addresses*)channel_arg->value.pointer.p;
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -458,7 +461,8 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
lb_policy_name);
} else {
reresolution_request_args* args =
- (reresolution_request_args*)gpr_zalloc(sizeof(*args));
+ static_cast<reresolution_request_args*>(
+ gpr_zalloc(sizeof(*args)));
args->chand = chand;
args->lb_policy = new_lb_policy;
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
@@ -610,10 +614,10 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
}
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
- grpc_transport_op* op = (grpc_transport_op*)arg;
+ grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
grpc_channel_element* elem =
- (grpc_channel_element*)op->handler_private.extra_arg;
- channel_data* chand = (channel_data*)elem->channel_data;
+ static_cast<grpc_channel_element*>(op->handler_private.extra_arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (op->on_connectivity_state_change != nullptr) {
grpc_connectivity_state_notify_on_state_change(
@@ -668,7 +672,7 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
static void cc_start_transport_op(grpc_channel_element* elem,
grpc_transport_op* op) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != nullptr) {
@@ -685,7 +689,7 @@ static void cc_start_transport_op(grpc_channel_element* elem,
static void cc_get_channel_info(grpc_channel_element* elem,
const grpc_channel_info* info) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != nullptr) {
*info->lb_policy_name = chand->info_lb_policy_name == nullptr
@@ -704,7 +708,7 @@ static void cc_get_channel_info(grpc_channel_element* elem,
/* Constructor for channel_data */
static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_channel_element_args* args) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -736,9 +740,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(
- (grpc_client_channel_factory*)arg->value.pointer.p);
+ static_cast<grpc_client_channel_factory*>(arg->value.pointer.p));
chand->client_channel_factory =
- (grpc_client_channel_factory*)arg->value.pointer.p;
+ static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == nullptr) {
@@ -775,7 +779,7 @@ static void shutdown_resolver_locked(void* arg, grpc_error* error) {
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->resolver != nullptr) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
@@ -867,7 +871,7 @@ typedef struct client_channel_call_data {
grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
grpc_call_element* elem) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
return calld->subchannel_call;
}
@@ -886,7 +890,7 @@ static void waiting_for_pick_batches_add(
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@@ -898,7 +902,7 @@ static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_fail(grpc_call_element* elem,
grpc_error* error) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@@ -926,7 +930,7 @@ static void waiting_for_pick_batches_fail(grpc_call_element* elem,
// This is called via the call combiner, so access to calld is synchronized.
static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@@ -937,8 +941,8 @@ static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
@@ -962,8 +966,8 @@ static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -973,8 +977,8 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != nullptr) {
- calld->method_params = (method_parameters*)grpc_method_config_table_get(
- chand->method_params_table, calld->path);
+ calld->method_params = static_cast<method_parameters*>(
+ grpc_method_config_table_get(chand->method_params_table, calld->path));
if (calld->method_params != nullptr) {
method_parameters_ref(calld->method_params);
// If the deadline from the service config is shorter than the one
@@ -995,8 +999,8 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
static void create_subchannel_call_locked(grpc_call_element* elem,
grpc_error* error) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1023,8 +1027,8 @@ static void create_subchannel_call_locked(grpc_call_element* elem,
// Invoked when a pick is completed, on both success or failure.
static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (calld->pick.connected_subchannel == nullptr) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@@ -1051,8 +1055,8 @@ static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_polling_entity_del_from_pollset_set(calld->pollent,
chand->interested_parties);
pick_done_locked(elem, error);
@@ -1061,9 +1065,9 @@ static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Note: chand->lb_policy may have changed since we started our pick,
// in which case we will be cancelling the pick on a policy other than
// the one we started it on. However, this will just be a no-op.
@@ -1081,9 +1085,9 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1096,8 +1100,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@@ -1161,7 +1165,8 @@ typedef struct {
// holding the call combiner.
static void pick_after_resolver_result_cancel_locked(void* arg,
grpc_error* error) {
- pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+ pick_after_resolver_result_args* args =
+ static_cast<pick_after_resolver_result_args*>(arg);
if (args->finished) {
gpr_free(args);
return;
@@ -1175,8 +1180,8 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
grpc_call_element* elem = args->elem;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@@ -1195,7 +1200,8 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
static void pick_after_resolver_result_done_locked(void* arg,
grpc_error* error) {
- pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
+ pick_after_resolver_result_args* args =
+ static_cast<pick_after_resolver_result_args*>(arg);
if (args->finished) {
/* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) {
@@ -1206,8 +1212,8 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
args->finished = true;
grpc_call_element* elem = args->elem;
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1255,15 +1261,15 @@ static void pick_after_resolver_result_done_locked(void* arg,
}
static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
- call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
pick_after_resolver_result_args* args =
- (pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
+ static_cast<pick_after_resolver_result_args*>(gpr_zalloc(sizeof(*args)));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
@@ -1277,9 +1283,9 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
}
static void start_pick_locked(void* arg, grpc_error* ignored) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
if (chand->lb_policy != nullptr) {
// We already have an LB policy, so ask it for a pick.
@@ -1310,8 +1316,8 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
}
static void on_complete(void* arg, grpc_error* error) {
- grpc_call_element* elem = (grpc_call_element*)arg;
- call_data* calld = (call_data*)elem->call_data;
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->retry_throttle_data != nullptr) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1331,8 +1337,8 @@ static void on_complete(void* arg, grpc_error* error) {
static void cc_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
}
@@ -1419,8 +1425,8 @@ static void cc_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* cc_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@@ -1439,8 +1445,8 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
static void cc_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_schedule_closure) {
- call_data* calld = (call_data*)elem->call_data;
- channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(elem);
}
@@ -1471,7 +1477,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
grpc_polling_entity* pollent) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
calld->pollent = pollent;
}
@@ -1494,7 +1500,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
};
static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
- channel_data* chand = (channel_data*)arg;
+ channel_data* chand = static_cast<channel_data*>(arg);
if (chand->lb_policy != nullptr) {
grpc_lb_policy_exit_idle_locked(chand->lb_policy);
} else {
@@ -1508,7 +1514,7 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_channel_element* elem, int try_to_connect) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1579,7 +1585,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element* elem) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1595,7 +1601,8 @@ int grpc_client_channel_num_external_connectivity_watchers(
}
static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
- external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* w =
+ static_cast<external_connectivity_watcher*>(arg);
grpc_closure* follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(&w->pollent,
w->chand->interested_parties);
@@ -1608,7 +1615,8 @@ static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
static void watch_connectivity_state_locked(void* arg,
grpc_error* error_ignored) {
- external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* w =
+ static_cast<external_connectivity_watcher*>(arg);
external_connectivity_watcher* found = nullptr;
if (w->state != nullptr) {
external_connectivity_watcher_list_append(w->chand, w);
@@ -1637,9 +1645,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_channel_element* elem, grpc_polling_entity pollent,
grpc_connectivity_state* state, grpc_closure* closure,
grpc_closure* watcher_timer_init) {
- channel_data* chand = (channel_data*)elem->channel_data;
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
external_connectivity_watcher* w =
- (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
+ static_cast<external_connectivity_watcher*>(gpr_zalloc(sizeof(*w)));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.cc b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 60c95d7dc9..3baf5b31ab 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.cc
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
@@ -39,12 +39,14 @@ grpc_channel* grpc_client_channel_factory_create_channel(
}
static void* factory_arg_copy(void* factory) {
- grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
+ grpc_client_channel_factory_ref(
+ static_cast<grpc_client_channel_factory*>(factory));
return factory;
}
static void factory_arg_destroy(void* factory) {
- grpc_client_channel_factory_unref((grpc_client_channel_factory*)factory);
+ grpc_client_channel_factory_unref(
+ static_cast<grpc_client_channel_factory*>(factory));
}
static int factory_arg_cmp(void* factory1, void* factory2) {
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index d756d9cc05..9172fa781c 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -36,7 +36,7 @@
static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
+ builder, static_cast<const grpc_channel_filter*>(arg), nullptr, nullptr);
}
static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index 6bfd038887..6bb4cefe73 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -119,7 +119,8 @@ static void handshake_failed_locked(http_connect_handshaker* handshaker,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(void* arg, grpc_error* error) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
+ http_connect_handshaker* handshaker =
+ static_cast<http_connect_handshaker*>(arg);
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@@ -139,7 +140,8 @@ static void on_write_done(void* arg, grpc_error* error) {
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(void* arg, grpc_error* error) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
+ http_connect_handshaker* handshaker =
+ static_cast<http_connect_handshaker*>(arg);
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@@ -224,13 +226,15 @@ done:
//
static void http_connect_handshaker_destroy(grpc_handshaker* handshaker_in) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
http_connect_handshaker_unref(handshaker);
}
static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in,
grpc_error* why) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
gpr_mu_lock(&handshaker->mu);
if (!handshaker->shutdown) {
handshaker->shutdown = true;
@@ -244,7 +248,8 @@ static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in,
static void http_connect_handshaker_do_handshake(
grpc_handshaker* handshaker_in, grpc_tcp_server_acceptor* acceptor,
grpc_closure* on_handshake_done, grpc_handshaker_args* args) {
- http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
+ http_connect_handshaker* handshaker =
+ reinterpret_cast<http_connect_handshaker*>(handshaker_in);
// Check for HTTP CONNECT channel arg.
// If not found, invoke on_handshake_done without doing anything.
const grpc_arg* arg =
@@ -270,8 +275,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
- headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
- num_header_strings);
+ headers = static_cast<grpc_http_header*>(
+ gpr_malloc(sizeof(grpc_http_header) * num_header_strings));
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == nullptr) {
@@ -324,7 +329,7 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
static grpc_handshaker* grpc_http_connect_handshaker_create() {
http_connect_handshaker* handshaker =
- (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
+ static_cast<http_connect_handshaker*>(gpr_malloc(sizeof(*handshaker)));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index d6b759227e..1a3a1f029c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -48,7 +48,7 @@ struct call_data {
} // namespace
static void on_complete_for_send(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -56,7 +56,7 @@ static void on_complete_for_send(void* arg, grpc_error* error) {
}
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
- call_data* calld = (call_data*)arg;
+ call_data* calld = static_cast<call_data*>(arg);
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -66,13 +66,13 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != nullptr);
if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
- calld->client_stats = grpc_grpclb_client_stats_ref(
- (grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS]
- .value);
+ calld->client_stats =
+ grpc_grpclb_client_stats_ref(static_cast<grpc_grpclb_client_stats*>(
+ args->context[GRPC_GRPCLB_CLIENT_STATS].value));
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
}
@@ -82,7 +82,7 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->client_stats != nullptr) {
// Record call finished, optionally setting client_failed_to_send and
// received.
@@ -97,7 +97,7 @@ static void destroy_call_elem(grpc_call_element* elem,
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
- call_data* calld = (call_data*)elem->call_data;
+ call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0);
if (calld->client_stats != nullptr) {
// Intercept send_initial_metadata.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index ac47990478..04bf798267 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -320,7 +320,8 @@ static void glb_lb_call_data_ref(glb_lb_call_data* lb_calld,
const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p REF %lu->%lu (%s)",
grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- (unsigned long)(count - 1), (unsigned long)count, reason);
+ static_cast<unsigned long>(count - 1),
+ static_cast<unsigned long>(count), reason);
}
}
@@ -331,7 +332,8 @@ static void glb_lb_call_data_unref(glb_lb_call_data* lb_calld,
const gpr_atm count = gpr_atm_acq_load(&lb_calld->refs.count);
gpr_log(GPR_DEBUG, "[%s %p] lb_calld %p UNREF %lu->%lu (%s)",
grpc_lb_glb_trace.name(), lb_calld->glb_policy, lb_calld,
- (unsigned long)(count + 1), (unsigned long)count, reason);
+ static_cast<unsigned long>(count + 1),
+ static_cast<unsigned long>(count), reason);
}
if (done) {
GPR_ASSERT(lb_calld->lb_call != nullptr);
@@ -372,7 +374,7 @@ static grpc_error* initial_metadata_add_lb_token(
}
static void destroy_client_stats(void* arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
+ grpc_grpclb_client_stats_unref(static_cast<grpc_grpclb_client_stats*>(arg));
}
static void pending_pick_set_metadata_and_context(pending_pick* pp) {
@@ -408,7 +410,7 @@ static void pending_pick_set_metadata_and_context(pending_pick* pp) {
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
static void pending_pick_complete(void* arg, grpc_error* error) {
- pending_pick* pp = (pending_pick*)arg;
+ pending_pick* pp = static_cast<pending_pick*>(arg);
pending_pick_set_metadata_and_context(pp);
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
@@ -416,7 +418,7 @@ static void pending_pick_complete(void* arg, grpc_error* error) {
static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
grpc_lb_policy_pick_state* pick) {
- pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
+ pending_pick* pp = static_cast<pending_pick*>(gpr_zalloc(sizeof(*pp)));
pp->pick = pick;
pp->glb_policy = glb_policy;
GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
@@ -433,7 +435,7 @@ static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
+ pending_ping* pping = static_cast<pending_ping*>(gpr_zalloc(sizeof(*pping)));
pping->on_initiate = on_initiate;
pping->on_ack = on_ack;
pping->next = *root;
@@ -448,7 +450,7 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
if (log) {
gpr_log(GPR_ERROR,
"Invalid port '%d' at index %lu of serverlist. Ignoring.",
- server->port, (unsigned long)idx);
+ server->port, static_cast<unsigned long>(idx));
}
return false;
}
@@ -457,7 +459,7 @@ static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
gpr_log(GPR_ERROR,
"Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
- ip->size, (unsigned long)idx);
+ ip->size, static_cast<unsigned long>(idx));
}
return false;
}
@@ -487,19 +489,21 @@ static void parse_server(const grpc_grpclb_server* server,
grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
- const uint16_t netorder_port = htons((uint16_t)server->port);
+ const uint16_t netorder_port = htons(static_cast<uint16_t>(server->port));
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
+ struct sockaddr_in* addr4 =
+ reinterpret_cast<struct sockaddr_in*>(&addr->addr);
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
+ struct sockaddr_in6* addr6 =
+ reinterpret_cast<struct sockaddr_in6*>(&addr->addr);
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
@@ -684,7 +688,7 @@ static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
grpc_grpclb_client_stats_ref(glb_policy->lb_calld->client_stats);
}
GPR_ASSERT(pp->pick->user_data == nullptr);
- pp->pick->user_data = (void**)&pp->lb_token;
+ pp->pick->user_data = reinterpret_cast<void**>(&pp->lb_token);
// Pick via the RR policy.
bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
if (pick_done) {
@@ -716,7 +720,8 @@ static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != nullptr);
- grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args* args =
+ static_cast<grpc_lb_policy_args*>(gpr_zalloc(sizeof(*args)));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@@ -823,7 +828,7 @@ static void rr_handover_locked(glb_lb_policy* glb_policy) {
}
static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
return;
@@ -859,8 +864,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void* a, void* b) {
- const char* a_str = (const char*)a;
- const char* b_str = (const char*)b;
+ const char* a_str = static_cast<const char*>(a);
+ const char* b_str = static_cast<const char*>(b);
return strcmp(a_str, b_str);
}
@@ -887,8 +892,8 @@ static grpc_channel_args* build_lb_channel_args(
grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
grpc_slice_hash_table_entry* targets_info_entries =
- (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
- num_grpclb_addrs);
+ static_cast<grpc_slice_hash_table_entry*>(
+ gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs));
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -931,7 +936,7 @@ static grpc_channel_args* build_lb_channel_args(
}
static void glb_destroy(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
GPR_ASSERT(glb_policy->pending_picks == nullptr);
GPR_ASSERT(glb_policy->pending_pings == nullptr);
gpr_free((void*)glb_policy->server_name);
@@ -951,7 +956,7 @@ static void glb_destroy(grpc_lb_policy* pol) {
static void glb_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true;
if (glb_policy->lb_calld != nullptr) {
@@ -1027,7 +1032,7 @@ static void glb_shutdown_locked(grpc_lb_policy* pol,
static void glb_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
@@ -1064,7 +1069,7 @@ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = nullptr;
while (pp != nullptr) {
@@ -1111,7 +1116,7 @@ static void start_picking_locked(glb_lb_policy* glb_policy) {
}
static void glb_exit_idle_locked(grpc_lb_policy* pol) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}
@@ -1119,7 +1124,7 @@ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
static int glb_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
pending_pick* pp = pending_pick_create(glb_policy, pick);
bool pick_done = false;
if (glb_policy->rr_policy != nullptr) {
@@ -1165,14 +1170,14 @@ static int glb_pick_locked(grpc_lb_policy* pol,
static grpc_connectivity_state glb_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** connectivity_error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
}
static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else {
@@ -1186,13 +1191,13 @@ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
current, notify);
}
static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
glb_policy->retry_timer_callback_pending = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE &&
glb_policy->lb_calld == nullptr) {
@@ -1244,7 +1249,7 @@ static void schedule_next_client_load_report(glb_lb_call_data* lb_calld) {
}
static void client_load_report_done_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
grpc_byte_buffer_destroy(lb_calld->send_message_payload);
lb_calld->send_message_payload = nullptr;
@@ -1257,8 +1262,8 @@ static void client_load_report_done_locked(void* arg, grpc_error* error) {
static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)
- request->client_stats.calls_finished_with_drop.arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ request->client_stats.calls_finished_with_drop.arg);
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
@@ -1307,7 +1312,7 @@ static void send_client_load_report_locked(glb_lb_call_data* lb_calld) {
}
static void maybe_send_client_load_report_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
lb_calld->client_load_report_timer_callback_pending = false;
if (error != GRPC_ERROR_NONE || lb_calld != glb_policy->lb_calld) {
@@ -1339,7 +1344,8 @@ static glb_lb_call_data* lb_call_data_create_locked(glb_lb_policy* glb_policy) {
glb_policy->lb_call_timeout_ms == 0
? GRPC_MILLIS_INF_FUTURE
: grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)gpr_zalloc(sizeof(*lb_calld));
+ glb_lb_call_data* lb_calld =
+ static_cast<glb_lb_call_data*>(gpr_zalloc(sizeof(*lb_calld)));
lb_calld->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1413,7 +1419,7 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
glb_lb_call_data_ref(glb_policy->lb_calld,
"lb_on_sent_initial_request_locked");
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_sent_initial_request);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv initial metadata.
@@ -1433,7 +1439,7 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
op++;
glb_lb_call_data_ref(glb_policy->lb_calld, "lb_on_response_received_locked");
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
@@ -1451,13 +1457,13 @@ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
// ref instead of a new ref. When it's invoked, it's the initial ref that is
// unreffed.
call_error = grpc_call_start_batch_and_execute(
- glb_policy->lb_calld->lb_call, ops, (size_t)(op - ops),
+ glb_policy->lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&glb_policy->lb_calld->lb_on_server_status_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
grpc_byte_buffer_destroy(lb_calld->send_message_payload);
lb_calld->send_message_payload = nullptr;
// If we attempted to send a client load report before the initial request was
@@ -1471,7 +1477,7 @@ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
}
static void lb_on_response_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
// Empty payload means the LB call was cancelled.
if (lb_calld != glb_policy->lb_calld ||
@@ -1594,7 +1600,7 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
// Reuse the "lb_on_response_received_locked" ref taken in
// query_for_backends_locked().
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_calld->lb_call, ops, (size_t)(op - ops),
+ lb_calld->lb_call, ops, static_cast<size_t>(op - ops),
&lb_calld->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
@@ -1604,7 +1610,7 @@ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
}
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
- glb_lb_call_data* lb_calld = (glb_lb_call_data*)arg;
+ glb_lb_call_data* lb_calld = static_cast<glb_lb_call_data*>(arg);
glb_lb_policy* glb_policy = lb_calld->glb_policy;
GPR_ASSERT(lb_calld->lb_call != nullptr);
if (grpc_lb_glb_trace.enabled()) {
@@ -1641,7 +1647,7 @@ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
}
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
glb_policy->fallback_timer_callback_pending = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
@@ -1673,7 +1679,7 @@ static void fallback_update_locked(glb_lb_policy* glb_policy,
static void glb_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -1694,7 +1700,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
return;
}
const grpc_lb_addresses* addresses =
- (const grpc_lb_addresses*)arg->value.pointer.p;
+ static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
if (glb_policy->serverlist == nullptr) {
@@ -1731,7 +1737,7 @@ static void glb_update_locked(grpc_lb_policy* policy,
// stayed READY throughout the update (for example if the update is identical).
static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
grpc_error* error) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+ glb_lb_policy* glb_policy = static_cast<glb_lb_policy*>(arg);
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
@@ -1777,7 +1783,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
static void glb_set_reresolve_closure_locked(
grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+ glb_lb_policy* glb_policy = reinterpret_cast<glb_lb_policy*>(policy);
GPR_ASSERT(!glb_policy->shutting_down);
GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
if (glb_policy->rr_policy != nullptr) {
@@ -1810,14 +1816,16 @@ static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
return nullptr;
}
- grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return nullptr;
- glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
+ glb_lb_policy* glb_policy =
+ static_cast<glb_lb_policy*>(gpr_zalloc(sizeof(*glb_policy)));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
@@ -1921,7 +1929,8 @@ static bool maybe_add_client_load_reporting_filter(
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
+ builder, static_cast<const grpc_channel_filter*>(arg), nullptr,
+ nullptr);
}
return true;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index 45c9f17301..0b5a798be3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
@@ -42,7 +42,7 @@ struct grpc_grpclb_client_stats {
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
grpc_grpclb_client_stats* client_stats =
- (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
+ static_cast<grpc_grpclb_client_stats*>(gpr_zalloc(sizeof(*client_stats)));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@@ -88,8 +88,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == nullptr) {
client_stats->drop_token_counts =
- (grpc_grpclb_dropped_call_counts*)gpr_zalloc(
- sizeof(grpc_grpclb_dropped_call_counts));
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts)));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@@ -104,9 +104,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
- drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
- drop_token_counts->token_counts,
- new_num_entries * sizeof(grpc_grpclb_drop_token_count));
+ drop_token_counts->token_counts = static_cast<grpc_grpclb_drop_token_count*>(
+ gpr_realloc(drop_token_counts->token_counts,
+ new_num_entries * sizeof(grpc_grpclb_drop_token_count)));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);
@@ -114,7 +114,7 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
}
static void atomic_get_and_reset_counter(int64_t* value, gpr_atm* counter) {
- *value = (int64_t)gpr_atm_acq_load(counter);
+ *value = static_cast<int64_t>(gpr_atm_acq_load(counter));
gpr_atm_full_fetch_add(counter, (gpr_atm)(-*value));
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index fc781da330..c388b6ba77 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
- grpc_grpclb_serverlist* sl = (grpc_grpclb_serverlist*)*arg;
+ grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(*arg);
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -46,10 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
void** arg) {
- decode_serverlist_arg* dec_arg = (decode_serverlist_arg*)*arg;
+ decode_serverlist_arg* dec_arg = static_cast<decode_serverlist_arg*>(*arg);
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
grpc_grpclb_server* server =
- (grpc_grpclb_server*)gpr_zalloc(sizeof(grpc_grpclb_server));
+ static_cast<grpc_grpclb_server*>(gpr_zalloc(sizeof(grpc_grpclb_server)));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -60,8 +60,8 @@ static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
}
grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) {
- grpc_grpclb_request* req =
- (grpc_grpclb_request*)gpr_malloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request* req = static_cast<grpc_grpclb_request*>(
+ gpr_malloc(sizeof(grpc_grpclb_request)));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -80,15 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
- char* str = (char*)*arg;
+ char* str = static_cast<char*>(*arg);
if (!pb_encode_tag_for_field(stream, field)) return false;
- return pb_encode_string(stream, (uint8_t*)str, strlen(str));
+ return pb_encode_string(stream, reinterpret_cast<uint8_t*>(str), strlen(str));
}
static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
void* const* arg) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)*arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(*arg);
if (drop_entries == nullptr) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -107,8 +107,8 @@ static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats* client_stats) {
- grpc_grpclb_request* req =
- (grpc_grpclb_request*)gpr_zalloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request* req = static_cast<grpc_grpclb_request*>(
+ gpr_zalloc(sizeof(grpc_grpclb_request)));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -123,8 +123,8 @@ grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received,
- (grpc_grpclb_dropped_call_counts**)&req->client_stats
- .calls_finished_with_drop.arg);
+ reinterpret_cast<grpc_grpclb_dropped_call_counts**>(
+ &req->client_stats.calls_finished_with_drop.arg));
return req;
}
@@ -148,8 +148,8 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts* drop_entries =
- (grpc_grpclb_dropped_call_counts*)
- request->client_stats.calls_finished_with_drop.arg;
+ static_cast<grpc_grpclb_dropped_call_counts*>(
+ request->client_stats.calls_finished_with_drop.arg);
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
gpr_free(request);
@@ -171,8 +171,8 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return nullptr;
grpc_grpclb_initial_response* initial_res =
- (grpc_grpclb_initial_response*)gpr_malloc(
- sizeof(grpc_grpclb_initial_response));
+ static_cast<grpc_grpclb_initial_response*>(
+ gpr_malloc(sizeof(grpc_grpclb_initial_response)));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -185,8 +185,8 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist* sl =
- (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(
+ gpr_zalloc(sizeof(grpc_grpclb_serverlist)));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -200,8 +200,8 @@ grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
- sl->num_servers);
+ sl->servers = static_cast<grpc_grpclb_server**>(
+ gpr_zalloc(sizeof(grpc_grpclb_server*) * sl->num_servers));
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -231,14 +231,14 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist* sl) {
- grpc_grpclb_serverlist* copy =
- (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* copy = static_cast<grpc_grpclb_serverlist*>(
+ gpr_zalloc(sizeof(grpc_grpclb_serverlist)));
copy->num_servers = sl->num_servers;
- copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
- sl->num_servers);
+ copy->servers = static_cast<grpc_grpclb_server**>(
+ gpr_malloc(sizeof(grpc_grpclb_server*) * sl->num_servers));
for (size_t i = 0; i < sl->num_servers; i++) {
- copy->servers[i] =
- (grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));
+ copy->servers[i] = static_cast<grpc_grpclb_server*>(
+ gpr_malloc(sizeof(grpc_grpclb_server)));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
@@ -291,7 +291,7 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
}
grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) {
- return (grpc_millis)(
+ return static_cast<grpc_millis>(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index 725b78d478..1485f7caf5 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -51,7 +51,7 @@ typedef struct {
} pick_first_lb_policy;
static void pf_destroy(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
GPR_ASSERT(p->subchannel_list == nullptr);
GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
GPR_ASSERT(p->pending_picks == nullptr);
@@ -65,7 +65,7 @@ static void pf_destroy(grpc_lb_policy* pol) {
static void pf_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
@@ -105,7 +105,7 @@ static void pf_shutdown_locked(grpc_lb_policy* pol,
static void pf_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
@@ -128,7 +128,7 @@ static void pf_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pick != nullptr) {
@@ -165,7 +165,7 @@ static void start_picking_locked(pick_first_lb_policy* p) {
}
static void pf_exit_idle_locked(grpc_lb_policy* pol) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
if (!p->started_picking) {
start_picking_locked(p);
}
@@ -173,7 +173,7 @@ static void pf_exit_idle_locked(grpc_lb_policy* pol) {
static int pf_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
// If we have a selected subchannel already, return synchronously.
if (p->selected != nullptr) {
pick->connected_subchannel = p->selected->connected_subchannel;
@@ -200,21 +200,21 @@ static void destroy_unselected_subchannels_locked(pick_first_lb_policy* p) {
static grpc_connectivity_state pf_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** error) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void pf_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
notify);
}
static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(pol);
if (p->selected) {
p->selected->connected_subchannel->Ping(on_initiate, on_ack);
} else {
@@ -229,7 +229,7 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error);
static void pf_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -249,10 +249,10 @@ static void pf_update_locked(grpc_lb_policy* policy,
return;
}
const grpc_lb_addresses* addresses =
- (const grpc_lb_addresses*)arg->value.pointer.p;
+ static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void*)p, (unsigned long)addresses->num_addresses);
+ (void*)p, static_cast<unsigned long>(addresses->num_addresses));
}
grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
&p->base, &grpc_lb_pick_first_trace, addresses, args,
@@ -347,8 +347,9 @@ static void pf_update_locked(grpc_lb_policy* policy,
}
static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
- grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
- pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
+ grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
+ pick_first_lb_policy* p =
+ reinterpret_cast<pick_first_lb_policy*>(sd->subchannel_list->policy);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
@@ -521,7 +522,7 @@ static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
static void pf_set_reresolve_closure_locked(
grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+ pick_first_lb_policy* p = reinterpret_cast<pick_first_lb_policy*>(policy);
GPR_ASSERT(!p->shutdown);
GPR_ASSERT(policy->request_reresolution == nullptr);
policy->request_reresolution = request_reresolution;
@@ -547,7 +548,8 @@ static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* create_pick_first(grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != nullptr);
- pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy* p =
+ static_cast<pick_first_lb_policy*>(gpr_zalloc(sizeof(*p)));
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 6d0485f395..cefd0d8d7d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -82,8 +82,9 @@ static size_t get_next_ready_subchannel_index_locked(
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
- (void*)p, (unsigned long)p->subchannel_list->num_subchannels,
- (unsigned long)p->last_ready_subchannel_index);
+ (void*)p,
+ static_cast<unsigned long>(p->subchannel_list->num_subchannels),
+ static_cast<unsigned long>(p->last_ready_subchannel_index));
}
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
const size_t index = (i + p->last_ready_subchannel_index + 1) %
@@ -94,7 +95,7 @@ static size_t get_next_ready_subchannel_index_locked(
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
(void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
- (void*)p->subchannel_list, (unsigned long)index,
+ (void*)p->subchannel_list, static_cast<unsigned long>(index),
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
@@ -106,7 +107,7 @@ static size_t get_next_ready_subchannel_index_locked(
"subchannel_list %p",
(void*)p,
(void*)p->subchannel_list->subchannels[index].subchannel,
- (unsigned long)index, (void*)p->subchannel_list);
+ static_cast<unsigned long>(index), (void*)p->subchannel_list);
}
return index;
}
@@ -125,7 +126,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
- (void*)p, (unsigned long)last_ready_index,
+ (void*)p, static_cast<unsigned long>(last_ready_index),
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
(void*)p->subchannel_list->subchannels[last_ready_index]
.connected_subchannel.get());
@@ -133,7 +134,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
}
static void rr_destroy(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void*)pol, (void*)pol);
@@ -147,7 +148,7 @@ static void rr_destroy(grpc_lb_policy* pol) {
static void rr_shutdown_locked(grpc_lb_policy* pol,
grpc_lb_policy* new_policy) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
@@ -187,7 +188,7 @@ static void rr_shutdown_locked(grpc_lb_policy* pol,
static void rr_cancel_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick,
grpc_error* error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_lb_policy_pick_state* pp = p->pending_picks;
p->pending_picks = nullptr;
while (pp != nullptr) {
@@ -210,7 +211,7 @@ static void rr_cancel_picks_locked(grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_lb_policy_pick_state* pick = p->pending_picks;
p->pending_picks = nullptr;
while (pick != nullptr) {
@@ -243,7 +244,7 @@ static void start_picking_locked(round_robin_lb_policy* p) {
}
static void rr_exit_idle_locked(grpc_lb_policy* pol) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (!p->started_picking) {
start_picking_locked(p);
}
@@ -251,7 +252,7 @@ static void rr_exit_idle_locked(grpc_lb_policy* pol) {
static int rr_pick_locked(grpc_lb_policy* pol,
grpc_lb_policy_pick_state* pick) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", pol,
p->shutdown);
@@ -334,7 +335,8 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
* subchannel_list->num_subchannels.
*/
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
- round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
+ round_robin_lb_policy* p =
+ reinterpret_cast<round_robin_lb_policy*>(subchannel_list->policy);
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
if (subchannel_list->num_ready > 0) {
/* 1) READY */
@@ -355,9 +357,9 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
}
static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
- grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
round_robin_lb_policy* p =
- (round_robin_lb_policy*)sd->subchannel_list->policy;
+ reinterpret_cast<round_robin_lb_policy*>(sd->subchannel_list->policy);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,
@@ -426,7 +428,8 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
if (grpc_lb_round_robin_trace.enabled()) {
const unsigned long num_subchannels =
p->subchannel_list != nullptr
- ? (unsigned long)p->subchannel_list->num_subchannels
+ ? static_cast<unsigned long>(
+ p->subchannel_list->num_subchannels)
: 0;
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
@@ -467,7 +470,8 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
(void*)p, (void*)selected->subchannel,
- (void*)p->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p->subchannel_list,
+ static_cast<unsigned long>(next_ready_index));
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
@@ -490,21 +494,21 @@ static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
static grpc_connectivity_state rr_check_connectivity_locked(
grpc_lb_policy* pol, grpc_error** error) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
return grpc_connectivity_state_get(&p->state_tracker, error);
}
static void rr_notify_on_state_change_locked(grpc_lb_policy* pol,
grpc_connectivity_state* current,
grpc_closure* notify) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
notify);
}
static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(pol);
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
grpc_lb_subchannel_data* selected =
@@ -522,7 +526,7 @@ static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
static void rr_update_locked(grpc_lb_policy* policy,
const grpc_lb_policy_args* args) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(policy);
const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -537,7 +541,8 @@ static void rr_update_locked(grpc_lb_policy* policy,
}
return;
}
- grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
@@ -617,7 +622,7 @@ static void rr_update_locked(grpc_lb_policy* policy,
static void rr_set_reresolve_closure_locked(
grpc_lb_policy* policy, grpc_closure* request_reresolution) {
- round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+ round_robin_lb_policy* p = reinterpret_cast<round_robin_lb_policy*>(policy);
GPR_ASSERT(!p->shutdown);
GPR_ASSERT(policy->request_reresolution == nullptr);
policy->request_reresolution = request_reresolution;
@@ -643,7 +648,8 @@ static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != nullptr);
- round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy* p =
+ static_cast<round_robin_lb_policy*>(gpr_zalloc(sizeof(*p)));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
@@ -651,7 +657,7 @@ static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
rr_update_locked(&p->base, args);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
- (unsigned long)p->subchannel_list->num_subchannels);
+ static_cast<unsigned long>(p->subchannel_list->num_subchannels));
}
return &p->base;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index 75f7ca2d12..e35c5e8db3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -37,7 +37,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason);
@@ -60,7 +60,8 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
" (subchannel %p): requesting connectivity change "
"notification (from %s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list,
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe));
}
@@ -79,7 +80,7 @@ void grpc_lb_subchannel_data_stop_connectivity_watch(
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
@@ -91,7 +92,8 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
grpc_lb_subchannel_list* subchannel_list =
- (grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
+ static_cast<grpc_lb_subchannel_list*>(
+ gpr_zalloc(sizeof(*subchannel_list)));
if (tracer->enabled()) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
@@ -100,8 +102,8 @@ grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
- subchannel_list->subchannels = (grpc_lb_subchannel_data*)gpr_zalloc(
- sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
+ subchannel_list->subchannels = static_cast<grpc_lb_subchannel_data*>(
+ gpr_zalloc(sizeof(grpc_lb_subchannel_data) * addresses->num_addresses));
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
@@ -190,8 +192,8 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
- subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
- reason);
+ subchannel_list, static_cast<unsigned long>(count - 1),
+ static_cast<unsigned long>(count), reason);
}
}
@@ -202,8 +204,8 @@ void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
subchannel_list->tracer->name(), subchannel_list->policy,
- subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
- reason);
+ subchannel_list, static_cast<unsigned long>(count + 1),
+ static_cast<unsigned long>(count), reason);
}
if (done) {
subchannel_list_destroy(subchannel_list);
@@ -230,7 +232,7 @@ static void subchannel_data_cancel_connectivity_watch(
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name(), sd->subchannel_list->policy,
sd->subchannel_list,
- (size_t)(sd - sd->subchannel_list->subchannels),
+ static_cast<size_t>(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(sd->subchannel, nullptr, nullptr,
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.cc b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index dbf69fdcba..f2a800b221 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -29,11 +29,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
grpc_lb_addresses* addresses =
- (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
+ static_cast<grpc_lb_addresses*>(gpr_zalloc(sizeof(grpc_lb_addresses)));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
- addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
+ addresses->addresses =
+ static_cast<grpc_lb_address*>(gpr_zalloc(addresses_size));
return addresses;
}
@@ -124,14 +125,14 @@ void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) {
}
static void* lb_addresses_copy(void* addresses) {
- return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
+ return grpc_lb_addresses_copy(static_cast<grpc_lb_addresses*>(addresses));
}
static void lb_addresses_destroy(void* addresses) {
- grpc_lb_addresses_destroy((grpc_lb_addresses*)addresses);
+ grpc_lb_addresses_destroy(static_cast<grpc_lb_addresses*>(addresses));
}
static int lb_addresses_cmp(void* addresses1, void* addresses2) {
- return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
- (grpc_lb_addresses*)addresses2);
+ return grpc_lb_addresses_cmp(static_cast<grpc_lb_addresses*>(addresses1),
+ static_cast<grpc_lb_addresses*>(addresses2));
}
static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
@@ -148,7 +149,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
if (lb_addresses_arg == nullptr || lb_addresses_arg->type != GRPC_ARG_POINTER)
return nullptr;
- return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p;
+ return static_cast<grpc_lb_addresses*>(lb_addresses_arg->value.pointer.p);
}
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index 4b6905eaa3..473c7542df 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -40,7 +40,8 @@ bool grpc_parse_unix(const grpc_uri* uri,
gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme);
return false;
}
- struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
+ struct sockaddr_un* un =
+ reinterpret_cast<struct sockaddr_un*>(resolved_addr->addr);
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
if (path_len == maxlen) return false;
@@ -69,7 +70,7 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in* in = (struct sockaddr_in*)addr->addr;
+ struct sockaddr_in* in = reinterpret_cast<struct sockaddr_in*>(addr->addr);
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -85,7 +86,7 @@ bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
goto done;
}
- in->sin_port = htons((uint16_t)port_num);
+ in->sin_port = htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
@@ -115,14 +116,14 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->addr;
+ struct sockaddr_in6* in6 = reinterpret_cast<struct sockaddr_in6*>(addr->addr);
in6->sin6_family = AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
- char* host_end = (char*)gpr_memrchr(host, '%', strlen(host));
+ char* host_end = static_cast<char*>(gpr_memrchr(host, '%', strlen(host)));
if (host_end != nullptr) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
- size_t host_without_scope_len = (size_t)(host_end - host);
+ size_t host_without_scope_len = static_cast<size_t>(host_end - host);
uint32_t sin6_scope_id = 0;
strncpy(host_without_scope, host, host_without_scope_len);
host_without_scope[host_without_scope_len] = '\0';
@@ -154,7 +155,7 @@ bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
goto done;
}
- in6->sin6_port = htons((uint16_t)port_num);
+ in6->sin6_port = htons(static_cast<uint16_t>(port_num));
success = true;
done:
gpr_free(host);
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index 51778a20cc..b42597e363 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
@@ -34,8 +34,8 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
- list->list = (grpc_proxy_mapper**)gpr_realloc(
- list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
+ list->list = static_cast<grpc_proxy_mapper**>(gpr_realloc(
+ list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*)));
if (at_start) {
memmove(list->list + 1, list->list,
sizeof(grpc_proxy_mapper*) * list->num_mappers);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 6e03ae447f..0442b1e496 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -312,7 +312,8 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
if (lb_policy_name != nullptr) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME, (char*)lb_policy_name);
+ (char*)GRPC_ARG_LB_POLICY_NAME,
+ const_cast<char*>(lb_policy_name));
}
}
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index 2bf86f82b0..10bc8f6074 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -124,7 +124,8 @@ static void fd_node_shutdown(fd_node* fdn) {
grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set) {
- *ev_driver = (grpc_ares_ev_driver*)gpr_malloc(sizeof(grpc_ares_ev_driver));
+ *ev_driver = static_cast<grpc_ares_ev_driver*>(
+ gpr_malloc(sizeof(grpc_ares_ev_driver)));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@@ -195,7 +196,7 @@ static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
}
static void on_readable_cb(void* arg, grpc_error* error) {
- fd_node* fdn = (fd_node*)arg;
+ fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
@@ -229,7 +230,7 @@ static void on_readable_cb(void* arg, grpc_error* error) {
}
static void on_writable_cb(void* arg, grpc_error* error) {
- fd_node* fdn = (fd_node*)arg;
+ fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
@@ -280,7 +281,7 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
if (fdn == nullptr) {
char* fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = (fd_node*)gpr_malloc(sizeof(fd_node));
+ fdn = static_cast<fd_node*>(gpr_malloc(sizeof(fd_node)));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 3ec1b6f9fe..82b5545601 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -88,7 +88,7 @@ static uint16_t strhtons(const char* port) {
} else if (strcmp(port, "https") == 0) {
return htons(443);
}
- return htons((unsigned short)atoi(port));
+ return htons(static_cast<unsigned short>(atoi(port)));
}
static void grpc_ares_request_ref(grpc_ares_request* r) {
@@ -110,8 +110,8 @@ static void grpc_ares_request_unref(grpc_ares_request* r) {
static grpc_ares_hostbyname_request* create_hostbyname_request(
grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)gpr_zalloc(
- sizeof(grpc_ares_hostbyname_request));
+ grpc_ares_hostbyname_request* hr = static_cast<grpc_ares_hostbyname_request*>(
+ gpr_zalloc(sizeof(grpc_ares_hostbyname_request)));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
hr->port = port;
@@ -128,7 +128,8 @@ static void destroy_hostbyname_request(grpc_ares_hostbyname_request* hr) {
static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
struct hostent* hostent) {
- grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)arg;
+ grpc_ares_hostbyname_request* hr =
+ static_cast<grpc_ares_hostbyname_request*>(arg);
grpc_ares_request* r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
@@ -144,9 +145,9 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != nullptr; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses = (grpc_lb_address*)gpr_realloc(
- (*lb_addresses)->addresses,
- sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ (*lb_addresses)->addresses = static_cast<grpc_lb_address*>(
+ gpr_realloc((*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses));
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@@ -155,7 +156,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
memset(&addr, 0, addr_len);
memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr],
sizeof(struct in6_addr));
- addr.sin6_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin6_family = static_cast<sa_family_t>(hostent->h_addrtype);
addr.sin6_port = hr->port;
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
@@ -176,7 +177,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
memset(&addr, 0, addr_len);
memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr],
sizeof(struct in_addr));
- addr.sin_family = (sa_family_t)hostent->h_addrtype;
+ addr.sin_family = static_cast<sa_family_t>(hostent->h_addrtype);
addr.sin_port = hr->port;
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
@@ -211,7 +212,7 @@ static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
static void on_srv_query_done_cb(void* arg, int status, int timeouts,
unsigned char* abuf, int alen) {
- grpc_ares_request* r = (grpc_ares_request*)arg;
+ grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
grpc_core::ExecCtx exec_ctx;
gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
if (status == ARES_SUCCESS) {
@@ -259,7 +260,7 @@ static void on_txt_done_cb(void* arg, int status, int timeouts,
unsigned char* buf, int len) {
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char* error_msg;
- grpc_ares_request* r = (grpc_ares_request*)arg;
+ grpc_ares_request* r = static_cast<grpc_ares_request*>(arg);
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
@@ -279,13 +280,15 @@ static void on_txt_done_cb(void* arg, int status, int timeouts,
// Found a service config record.
if (result != nullptr) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = (char*)gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out =
+ static_cast<char*>(gpr_malloc(service_config_len + 1));
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != nullptr && !result->record_start;
result = result->next) {
- *r->service_config_json_out = (char*)gpr_realloc(
- *r->service_config_json_out, service_config_len + result->length + 1);
+ *r->service_config_json_out = static_cast<char*>(
+ gpr_realloc(*r->service_config_json_out,
+ service_config_len + result->length + 1));
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
service_config_len += result->length;
@@ -348,7 +351,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- r = (grpc_ares_request*)gpr_zalloc(sizeof(grpc_ares_request));
+ r = static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -364,7 +367,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
r->dns_server_addr.family = AF_INET;
- struct sockaddr_in* in = (struct sockaddr_in*)addr.addr;
+ struct sockaddr_in* in = reinterpret_cast<struct sockaddr_in*>(addr.addr);
memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr,
sizeof(struct in_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -372,7 +375,8 @@ static grpc_ares_request* grpc_dns_lookup_ares_impl(
} else if (grpc_parse_ipv6_hostport(dns_server, &addr,
false /* log_errors */)) {
r->dns_server_addr.family = AF_INET6;
- struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr.addr;
+ struct sockaddr_in6* in6 =
+ reinterpret_cast<struct sockaddr_in6*>(addr.addr);
memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr,
sizeof(struct in6_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -487,16 +491,17 @@ typedef struct grpc_resolve_address_ares_request {
static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
grpc_resolve_address_ares_request* r =
- (grpc_resolve_address_ares_request*)arg;
+ static_cast<grpc_resolve_address_ares_request*>(arg);
grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->lb_addrs == nullptr || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = nullptr;
} else {
- *resolved_addresses =
- (grpc_resolved_addresses*)gpr_zalloc(sizeof(grpc_resolved_addresses));
+ *resolved_addresses = static_cast<grpc_resolved_addresses*>(
+ gpr_zalloc(sizeof(grpc_resolved_addresses)));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = (grpc_resolved_address*)gpr_zalloc(
- sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
+ (*resolved_addresses)->addrs =
+ static_cast<grpc_resolved_address*>(gpr_zalloc(
+ sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs));
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],
@@ -514,8 +519,8 @@ static void grpc_resolve_address_ares_impl(const char* name,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
grpc_resolve_address_ares_request* r =
- (grpc_resolve_address_ares_request*)gpr_zalloc(
- sizeof(grpc_resolve_address_ares_request));
+ static_cast<grpc_resolve_address_ares_request*>(
+ gpr_zalloc(sizeof(grpc_resolve_address_ares_request)));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
diff --git a/src/core/ext/filters/client_channel/retry_throttle.cc b/src/core/ext/filters/client_channel/retry_throttle.cc
index 0d38feb242..a98e27860a 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.cc
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
@@ -59,9 +59,10 @@ bool grpc_server_retry_throttle_data_record_failure(
// First, check if we are stale and need to be replaced.
get_replacement_throttle_data_if_needed(&throttle_data);
// We decrement milli_tokens by 1000 (1 token) for each failure.
- const int new_value = (int)gpr_atm_no_barrier_clamped_add(
- &throttle_data->milli_tokens, (gpr_atm)-1000, (gpr_atm)0,
- (gpr_atm)throttle_data->max_milli_tokens);
+ const int new_value = static_cast<int>(gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, static_cast<gpr_atm>(-1000),
+ static_cast<gpr_atm>(0),
+ static_cast<gpr_atm>(throttle_data->max_milli_tokens)));
// Retries are allowed as long as the new value is above the threshold
// (max_milli_tokens / 2).
return new_value > throttle_data->max_milli_tokens / 2;
@@ -73,8 +74,10 @@ void grpc_server_retry_throttle_data_record_success(
get_replacement_throttle_data_if_needed(&throttle_data);
// We increment milli_tokens by milli_token_ratio for each success.
gpr_atm_no_barrier_clamped_add(
- &throttle_data->milli_tokens, (gpr_atm)throttle_data->milli_token_ratio,
- (gpr_atm)0, (gpr_atm)throttle_data->max_milli_tokens);
+ &throttle_data->milli_tokens,
+ static_cast<gpr_atm>(throttle_data->milli_token_ratio),
+ static_cast<gpr_atm>(0),
+ static_cast<gpr_atm>(throttle_data->max_milli_tokens));
}
grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
@@ -100,7 +103,8 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
int max_milli_tokens, int milli_token_ratio,
grpc_server_retry_throttle_data* old_throttle_data) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data));
+ static_cast<grpc_server_retry_throttle_data*>(
+ gpr_malloc(sizeof(*throttle_data)));
memset(throttle_data, 0, sizeof(*throttle_data));
gpr_ref_init(&throttle_data->refs, 1);
throttle_data->max_milli_tokens = max_milli_tokens;
@@ -112,9 +116,9 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
// we will start out doing the same thing on the new one.
if (old_throttle_data != nullptr) {
double token_fraction =
- (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) /
- (double)old_throttle_data->max_milli_tokens;
- initial_milli_tokens = (int)(token_fraction * max_milli_tokens);
+ static_cast<int>(gpr_atm_acq_load(&old_throttle_data->milli_tokens)) /
+ static_cast<double>(old_throttle_data->max_milli_tokens);
+ initial_milli_tokens = static_cast<int>(token_fraction * max_milli_tokens);
}
gpr_atm_rel_store(&throttle_data->milli_tokens,
(gpr_atm)initial_milli_tokens);
@@ -132,22 +136,22 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
//
static void* copy_server_name(void* key, void* unused) {
- return gpr_strdup((const char*)key);
+ return gpr_strdup(static_cast<const char*>(key));
}
static long compare_server_name(void* key1, void* key2, void* unused) {
- return strcmp((const char*)key1, (const char*)key2);
+ return strcmp(static_cast<const char*>(key1), static_cast<const char*>(key2));
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)value;
+ static_cast<grpc_server_retry_throttle_data*>(value);
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)value;
+ static_cast<grpc_server_retry_throttle_data*>(value);
return grpc_server_retry_throttle_data_ref(throttle_data);
}
@@ -178,13 +182,14 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio) {
gpr_mu_lock(&g_mu);
grpc_server_retry_throttle_data* throttle_data =
- (grpc_server_retry_throttle_data*)grpc_avl_get(g_avl, (char*)server_name,
- nullptr);
+ static_cast<grpc_server_retry_throttle_data*>(
+ grpc_avl_get(g_avl, const_cast<char*>(server_name), nullptr));
if (throttle_data == nullptr) {
// Entry not found. Create a new one.
throttle_data = grpc_server_retry_throttle_data_create(
max_milli_tokens, milli_token_ratio, nullptr);
- g_avl = grpc_avl_add(g_avl, (char*)server_name, throttle_data, nullptr);
+ g_avl = grpc_avl_add(g_avl, const_cast<char*>(server_name), throttle_data,
+ nullptr);
} else {
if (throttle_data->max_milli_tokens != max_milli_tokens ||
throttle_data->milli_token_ratio != milli_token_ratio) {
@@ -192,7 +197,8 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
// the original one.
throttle_data = grpc_server_retry_throttle_data_create(
max_milli_tokens, milli_token_ratio, throttle_data);
- g_avl = grpc_avl_add(g_avl, (char*)server_name, throttle_data, nullptr);
+ g_avl = grpc_avl_add(g_avl, const_cast<char*>(server_name), throttle_data,
+ nullptr);
} else {
// Entry found. Increase refcount.
grpc_server_retry_throttle_data_ref(throttle_data);
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 6d4c4f931e..179e3f27ac 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -159,7 +159,7 @@ static void on_subchannel_connected(void* subchannel, grpc_error* error);
*/
static void connection_destroy(void* arg, grpc_error* error) {
- grpc_channel_stack* stk = (grpc_channel_stack*)arg;
+ grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
grpc_channel_stack_destroy(stk);
gpr_free(stk);
}
@@ -169,7 +169,7 @@ static void connection_destroy(void* arg, grpc_error* error) {
*/
static void subchannel_destroy(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
gpr_free((void*)c->filters);
grpc_channel_args_destroy(c->args);
grpc_connectivity_state_destroy(&c->state_tracker);
@@ -241,8 +241,9 @@ static void disconnect(grpc_subchannel* c) {
void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
// add a weak ref and subtract a strong ref (atomically)
- old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
- 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
+ old_refs = ref_mutate(
+ c, static_cast<gpr_atm>(1) - static_cast<gpr_atm>(1 << INTERNAL_REF_BITS),
+ 1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
disconnect(c);
}
@@ -252,7 +253,8 @@ void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
void grpc_subchannel_weak_unref(
grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
- old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
+ old_refs = ref_mutate(c, -static_cast<gpr_atm>(1),
+ 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
@@ -318,15 +320,15 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
}
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
- c = (grpc_subchannel*)gpr_zalloc(sizeof(*c));
+ c = static_cast<grpc_subchannel*>(gpr_zalloc(sizeof(*c)));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = (const grpc_channel_filter**)gpr_malloc(
- sizeof(grpc_channel_filter*) * c->num_filters);
+ c->filters = static_cast<const grpc_channel_filter**>(
+ gpr_malloc(sizeof(grpc_channel_filter*) * c->num_filters));
memcpy((void*)c->filters, args->filters,
sizeof(grpc_channel_filter*) * c->num_filters);
} else {
@@ -334,7 +336,7 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
}
c->pollset_set = grpc_pollset_set_create();
grpc_resolved_address* addr =
- (grpc_resolved_address*)gpr_malloc(sizeof(*addr));
+ static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(*addr)));
grpc_get_subchannel_address_arg(args->args, addr);
grpc_resolved_address* new_address = nullptr;
grpc_channel_args* new_args = nullptr;
@@ -391,7 +393,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c,
}
static void on_external_state_watcher_done(void* arg, grpc_error* error) {
- external_state_watcher* w = (external_state_watcher*)arg;
+ external_state_watcher* w = static_cast<external_state_watcher*>(arg);
grpc_closure* follow_up = w->notify;
if (w->pollset_set != nullptr) {
grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set,
@@ -407,7 +409,7 @@ static void on_external_state_watcher_done(void* arg, grpc_error* error) {
}
static void on_alarm(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -486,7 +488,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = (external_state_watcher*)gpr_malloc(sizeof(*w));
+ w = static_cast<external_state_watcher*>(gpr_malloc(sizeof(*w)));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -509,7 +511,7 @@ void grpc_subchannel_notify_on_state_change(
static void on_connected_subchannel_connectivity_changed(void* p,
grpc_error* error) {
- state_watcher* connected_subchannel_watcher = (state_watcher*)p;
+ state_watcher* connected_subchannel_watcher = static_cast<state_watcher*>(p);
grpc_subchannel* c = connected_subchannel_watcher->subchannel;
gpr_mu* mu = &c->mu;
@@ -570,7 +572,8 @@ static bool publish_transport_locked(grpc_subchannel* c) {
}
grpc_channel_stack* stk;
grpc_error* error = grpc_channel_stack_builder_finish(
- builder, 0, 1, connection_destroy, nullptr, (void**)&stk);
+ builder, 0, 1, connection_destroy, nullptr,
+ reinterpret_cast<void**>(&stk));
if (error != GRPC_ERROR_NONE) {
grpc_transport_destroy(c->connecting_result.transport);
gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
@@ -581,8 +584,8 @@ static bool publish_transport_locked(grpc_subchannel* c) {
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- state_watcher* connected_subchannel_watcher =
- (state_watcher*)gpr_zalloc(sizeof(*connected_subchannel_watcher));
+ state_watcher* connected_subchannel_watcher = static_cast<state_watcher*>(
+ gpr_zalloc(sizeof(*connected_subchannel_watcher)));
connected_subchannel_watcher->subchannel = c;
connected_subchannel_watcher->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&connected_subchannel_watcher->closure,
@@ -617,7 +620,7 @@ static bool publish_transport_locked(grpc_subchannel* c) {
}
static void on_subchannel_connected(void* arg, grpc_error* error) {
- grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "on_subchannel_connected");
@@ -653,7 +656,7 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
static void subchannel_call_destroy(void* call, grpc_error* error) {
GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0);
- grpc_subchannel_call* c = (grpc_subchannel_call*)call;
+ grpc_subchannel_call* c = static_cast<grpc_subchannel_call*>(call);
GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
grpc_core::ConnectedSubchannel* connection = c->connection;
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
@@ -770,9 +773,9 @@ void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
- *call = (grpc_subchannel_call*)gpr_arena_alloc(
+ *call = static_cast<grpc_subchannel_call*>(gpr_arena_alloc(
args.arena,
- sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size);
+ sizeof(grpc_subchannel_call) + channel_stack_->call_stack_size));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
RefCountedPtr<ConnectedSubchannel> connection =
Ref(DEBUG_LOCATION, "subchannel_call");
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 69dbdccc18..d1dc5ee970 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -45,13 +45,14 @@ static bool g_force_creation = false;
static grpc_subchannel_key* create_key(
const grpc_subchannel_args* args,
grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) {
- grpc_subchannel_key* k = (grpc_subchannel_key*)gpr_malloc(sizeof(*k));
+ grpc_subchannel_key* k =
+ static_cast<grpc_subchannel_key*>(gpr_malloc(sizeof(*k)));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters = (const grpc_channel_filter**)gpr_malloc(
- sizeof(*k->args.filters) * k->args.filter_count);
- memcpy((grpc_channel_filter*)k->args.filters, args->filters,
- sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.filters = static_cast<const grpc_channel_filter**>(
+ gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count));
+ memcpy(reinterpret_cast<grpc_channel_filter*>(k->args.filters),
+ args->filters, sizeof(*k->args.filters) * k->args.filter_count);
} else {
k->args.filters = nullptr;
}
@@ -82,22 +83,22 @@ int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
}
void grpc_subchannel_key_destroy(grpc_subchannel_key* k) {
- gpr_free((grpc_channel_args*)k->args.filters);
- grpc_channel_args_destroy((grpc_channel_args*)k->args.args);
+ gpr_free(reinterpret_cast<grpc_channel_args*>(k->args.filters));
+ grpc_channel_args_destroy(const_cast<grpc_channel_args*>(k->args.args));
gpr_free(k);
}
static void sck_avl_destroy(void* p, void* user_data) {
- grpc_subchannel_key_destroy((grpc_subchannel_key*)p);
+ grpc_subchannel_key_destroy(static_cast<grpc_subchannel_key*>(p));
}
static void* sck_avl_copy(void* p, void* unused) {
- return subchannel_key_copy((grpc_subchannel_key*)p);
+ return subchannel_key_copy(static_cast<grpc_subchannel_key*>(p));
}
static long sck_avl_compare(void* a, void* b, void* unused) {
- return grpc_subchannel_key_compare((grpc_subchannel_key*)a,
- (grpc_subchannel_key*)b);
+ return grpc_subchannel_key_compare(static_cast<grpc_subchannel_key*>(a),
+ static_cast<grpc_subchannel_key*>(b));
}
static void scv_avl_destroy(void* p, void* user_data) {
@@ -170,7 +171,8 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get());
+ c = static_cast<grpc_subchannel*>(
+ grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
if (c != nullptr) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -220,8 +222,8 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel* c =
- (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get());
+ grpc_subchannel* c = static_cast<grpc_subchannel*>(
+ grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
if (c != constructed) {
grpc_avl_unref(index, grpc_core::ExecCtx::Get());
break;
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index c5f2d6822c..cd07a6fbf5 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -45,7 +45,7 @@ static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = (char*)gpr_malloc(pfx_len + 1);
+ line_prefix = static_cast<char*>(gpr_malloc(pfx_len + 1));
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -159,7 +159,7 @@ static void parse_query_parts(grpc_uri* uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
uri->query_parts_values =
- (char**)gpr_malloc(uri->num_query_parts * sizeof(char**));
+ static_cast<char**>(gpr_malloc(uri->num_query_parts * sizeof(char**)));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char** query_param_parts;
size_t num_query_param_parts;
@@ -271,7 +271,7 @@ grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) {
fragment_end = i;
}
- uri = (grpc_uri*)gpr_zalloc(sizeof(*uri));
+ uri = static_cast<grpc_uri*>(gpr_zalloc(sizeof(*uri)));
uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end);
uri->authority =
decode_and_copy_component(uri_text, authority_begin, authority_end);