aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib/iomgr
diff options
context:
space:
mode:
authorGravatar Noah Eisen <ncteisen@google.com>2018-02-09 09:16:55 -0800
committerGravatar Noah Eisen <ncteisen@google.com>2018-02-09 09:16:55 -0800
commitbe82e64b3debcdb1d9ec6a149fc85af0d46bfb7e (patch)
treecc5e1234073eb250a2c319b5a4db2919fce060ea /src/core/lib/iomgr
parent194436342137924b4fb7429bede037a4b5ec7edb (diff)
Autofix c casts to c++ casts
Diffstat (limited to 'src/core/lib/iomgr')
-rw-r--r--src/core/lib/iomgr/call_combiner.cc14
-rw-r--r--src/core/lib/iomgr/closure.h4
-rw-r--r--src/core/lib/iomgr/combiner.cc8
-rw-r--r--src/core/lib/iomgr/error.cc66
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc20
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc34
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc22
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc52
-rw-r--r--src/core/lib/iomgr/ev_posix.cc6
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc12
-rw-r--r--src/core/lib/iomgr/executor.cc18
-rw-r--r--src/core/lib/iomgr/gethostname_host_name_max.cc2
-rw-r--r--src/core/lib/iomgr/is_epollexclusive_available.cc2
-rw-r--r--src/core/lib/iomgr/load_file.cc4
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc10
-rw-r--r--src/core/lib/iomgr/resource_quota.cc64
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.cc44
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.cc8
-rw-r--r--src/core/lib/iomgr/socket_mutator.cc8
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc6
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.cc4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc10
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc74
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc18
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.cc10
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc4
-rw-r--r--src/core/lib/iomgr/timer_generic.cc20
-rw-r--r--src/core/lib/iomgr/timer_heap.cc12
-rw-r--r--src/core/lib/iomgr/timer_manager.cc4
-rw-r--r--src/core/lib/iomgr/udp_server.cc30
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.cc14
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.cc6
32 files changed, 305 insertions, 305 deletions
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 0e4c9cb3b3..f180594545 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -28,13 +28,13 @@ grpc_core::TraceFlag grpc_call_combiner_trace(false, "call_combiner");
static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
if (cancel_state & 1) {
- return (grpc_error*)(cancel_state & ~(gpr_atm)1);
+ return (grpc_error*)(cancel_state & ~static_cast<gpr_atm>(1));
}
return GRPC_ERROR_NONE;
}
static gpr_atm encode_cancel_state_error(grpc_error* error) {
- return (gpr_atm)1 | (gpr_atm)error;
+ return static_cast<gpr_atm>(1) | (gpr_atm)error;
}
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
@@ -69,7 +69,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
grpc_error_string(error));
}
size_t prev_size =
- (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
+ static_cast<size_t>(gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
@@ -90,7 +90,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
}
// Queue was not empty, so add closure to queue.
closure->error_data.error = error;
- gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
+ gpr_mpscq_push(&call_combiner->queue, reinterpret_cast<gpr_mpscq_node*>(closure));
}
}
@@ -103,7 +103,7 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
call_combiner DEBUG_FMT_ARGS, reason);
}
size_t prev_size =
- (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
+ static_cast<size_t>(gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1));
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
@@ -115,8 +115,8 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_DEBUG, " checking queue");
}
bool empty;
- grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
- &call_combiner->queue, &empty);
+ grpc_closure* closure = reinterpret_cast<grpc_closure*>(gpr_mpscq_pop_and_check_end(
+ &call_combiner->queue, &empty));
if (closure == nullptr) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start().
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 2bf2578c63..64527d6bb1 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -143,7 +143,7 @@ typedef struct {
} wrapped_closure;
inline void closure_wrapper(void* arg, grpc_error* error) {
- wrapped_closure* wc = (wrapped_closure*)arg;
+ wrapped_closure* wc = static_cast<wrapped_closure*>(arg);
grpc_iomgr_cb_func cb = wc->cb;
void* cb_arg = wc->cb_arg;
gpr_free(wc);
@@ -161,7 +161,7 @@ inline grpc_closure* grpc_closure_create(grpc_iomgr_cb_func cb, void* cb_arg,
grpc_closure_scheduler* scheduler) {
#endif
closure_impl::wrapped_closure* wc =
- (closure_impl::wrapped_closure*)gpr_malloc(sizeof(*wc));
+ static_cast<closure_impl::wrapped_closure*>(gpr_malloc(sizeof(*wc)));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index 2965d807c1..6ff6fccf03 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -72,7 +72,7 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
static void offload(void* arg, grpc_error* error);
grpc_combiner* grpc_combiner_create(void) {
- grpc_combiner* lock = (grpc_combiner*)gpr_zalloc(sizeof(*lock));
+ grpc_combiner* lock = static_cast<grpc_combiner*>(gpr_zalloc(sizeof(*lock)));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
@@ -194,7 +194,7 @@ static void move_next() {
}
static void offload(void* arg, grpc_error* error) {
- grpc_combiner* lock = (grpc_combiner*)arg;
+ grpc_combiner* lock = static_cast<grpc_combiner*>(arg);
push_last_on_exec_ctx(lock);
}
@@ -249,7 +249,7 @@ bool grpc_combiner_continue_exec_ctx() {
return true;
}
GPR_TIMER_SCOPE("combiner.exec1", 0);
- grpc_closure* cl = (grpc_closure*)n;
+ grpc_closure* cl = reinterpret_cast<grpc_closure*>(n);
grpc_error* cl_err = cl->error_data.error;
#ifndef NDEBUG
cl->scheduled = false;
@@ -342,7 +342,7 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
}
static void enqueue_finally(void* closure, grpc_error* error) {
- combiner_finally_exec((grpc_closure*)closure, GRPC_ERROR_REF(error));
+ combiner_finally_exec(static_cast<grpc_closure*>(closure), GRPC_ERROR_REF(error));
}
grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) {
diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc
index d5b5013269..0ba0617f75 100644
--- a/src/core/lib/iomgr/error.cc
+++ b/src/core/lib/iomgr/error.cc
@@ -148,7 +148,7 @@ grpc_error* grpc_error_ref(grpc_error* err) {
static void unref_errs(grpc_error* err) {
uint8_t slot = err->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
+ grpc_linked_error* lerr = reinterpret_cast<grpc_linked_error*>(err->arena + slot);
GRPC_ERROR_UNREF(lerr->err);
GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX
: lerr->next != UINT8_MAX);
@@ -162,7 +162,7 @@ static void unref_strs(grpc_error* err) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- unref_slice(*(grpc_slice*)(err->arena + slot));
+ unref_slice(*reinterpret_cast<grpc_slice*>(err->arena + slot));
}
}
}
@@ -198,18 +198,18 @@ void grpc_error_unref(grpc_error* err) {
static uint8_t get_placement(grpc_error** err, size_t size) {
GPR_ASSERT(*err);
- uint8_t slots = (uint8_t)(size / sizeof(intptr_t));
+ uint8_t slots = static_cast<uint8_t>(size / sizeof(intptr_t));
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
(*err)->arena_capacity =
- (uint8_t)GPR_MIN(UINT8_MAX - 1, (3 * (*err)->arena_capacity / 2));
+ static_cast<uint8_t>GPR_MIN(UINT8_MAX - 1, (3 * (*err)->arena_capacity / 2));
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
return UINT8_MAX;
}
#ifndef NDEBUG
grpc_error* orig = *err;
#endif
- *err = (grpc_error*)gpr_realloc(
- *err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
+ *err = static_cast<grpc_error*>(gpr_realloc(
+ *err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t)));
#ifndef NDEBUG
if (grpc_trace_error_refcount.enabled()) {
if (*err != orig) {
@@ -219,7 +219,7 @@ static uint8_t get_placement(grpc_error** err, size_t size) {
#endif
}
uint8_t placement = (*err)->arena_size;
- (*err)->arena_size = (uint8_t)((*err)->arena_size + slots);
+ (*err)->arena_size = static_cast<uint8_t>((*err)->arena_size + slots);
return placement;
}
@@ -251,7 +251,7 @@ static void internal_set_str(grpc_error** err, grpc_error_strs which,
return;
}
} else {
- unref_slice(*(grpc_slice*)((*err)->arena + slot));
+ unref_slice(*reinterpret_cast<grpc_slice*>((*err)->arena + slot));
}
(*err)->strs[which] = slot;
memcpy((*err)->arena + slot, &value, sizeof(value));
@@ -291,7 +291,7 @@ static void internal_add_error(grpc_error** err, grpc_error* new_err) {
} else {
GPR_ASSERT((*err)->last_err != UINT8_MAX);
grpc_linked_error* old_last =
- (grpc_linked_error*)((*err)->arena + (*err)->last_err);
+ reinterpret_cast<grpc_linked_error*>((*err)->arena + (*err)->last_err);
old_last->next = slot;
(*err)->last_err = slot;
}
@@ -315,11 +315,11 @@ grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
grpc_error** referencing,
size_t num_referencing) {
GPR_TIMER_SCOPE("grpc_error_create", 0);
- uint8_t initial_arena_capacity = (uint8_t)(
+ uint8_t initial_arena_capacity = static_cast<uint8_t>(
DEFAULT_ERROR_CAPACITY +
- (uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
- grpc_error* err = (grpc_error*)gpr_malloc(
- sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
+ static_cast<uint8_t>(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
+ grpc_error* err = static_cast<grpc_error*>(gpr_malloc(
+ sizeof(*err) + initial_arena_capacity * sizeof(intptr_t)));
if (err == nullptr) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
@@ -362,7 +362,7 @@ static void ref_strs(grpc_error* err) {
for (size_t i = 0; i < GRPC_ERROR_STR_MAX; ++i) {
uint8_t slot = err->strs[i];
if (slot != UINT8_MAX) {
- grpc_slice_ref_internal(*(grpc_slice*)(err->arena + slot));
+ grpc_slice_ref_internal(*reinterpret_cast<grpc_slice*>(err->arena + slot));
}
}
}
@@ -370,7 +370,7 @@ static void ref_strs(grpc_error* err) {
static void ref_errs(grpc_error* err) {
uint8_t slot = err->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
+ grpc_linked_error* lerr = reinterpret_cast<grpc_linked_error*>(err->arena + slot);
GRPC_ERROR_REF(lerr->err);
slot = lerr->next;
}
@@ -399,11 +399,11 @@ static grpc_error* copy_error_and_unref(grpc_error* in) {
uint8_t new_arena_capacity = in->arena_capacity;
// the returned err will be added to, so we ensure this is room to avoid
// unneeded allocations.
- if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
- new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
+ if (in->arena_capacity - in->arena_size < static_cast<uint8_t>SLOTS_PER_STR) {
+ new_arena_capacity = static_cast<uint8_t>(3 * new_arena_capacity / 2);
}
- out = (grpc_error*)gpr_malloc(sizeof(*in) +
- new_arena_capacity * sizeof(intptr_t));
+ out = static_cast<grpc_error*>(gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t)));
#ifndef NDEBUG
if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -487,7 +487,7 @@ bool grpc_error_get_str(grpc_error* err, grpc_error_strs which,
}
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- *str = *(grpc_slice*)(err->arena + slot);
+ *str = *reinterpret_cast<grpc_slice*>(err->arena + slot);
return true;
} else {
return false;
@@ -519,7 +519,7 @@ typedef struct {
static void append_chr(char c, char** s, size_t* sz, size_t* cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = (char*)gpr_realloc(*s, *cap);
+ *s = static_cast<char*>(gpr_realloc(*s, *cap));
}
(*s)[(*sz)++] = c;
}
@@ -562,7 +562,7 @@ static void append_esc_str(const uint8_t* str, size_t len, char** s, size_t* sz,
break;
}
} else {
- append_chr((char)*str, s, sz, cap);
+ append_chr(static_cast<char>(*str), s, sz, cap);
}
}
append_chr('"', s, sz, cap);
@@ -572,7 +572,7 @@ static void append_kv(kv_pairs* kvs, char* key, char* value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
kvs->kvs =
- (kv_pair*)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ static_cast<kv_pair*>(gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs));
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@@ -593,7 +593,7 @@ static void collect_ints_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_INT_MAX; ++which) {
uint8_t slot = err->ints[which];
if (slot != UINT8_MAX) {
- append_kv(kvs, key_int((grpc_error_ints)which),
+ append_kv(kvs, key_int(static_cast<grpc_error_ints>(which)),
fmt_int(err->arena[slot]));
}
}
@@ -617,8 +617,8 @@ static void collect_strs_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- append_kv(kvs, key_str((grpc_error_strs)which),
- fmt_str(*(grpc_slice*)(err->arena + slot)));
+ append_kv(kvs, key_str(static_cast<grpc_error_strs>(which)),
+ fmt_str(*reinterpret_cast<grpc_slice*>(err->arena + slot)));
}
}
}
@@ -652,8 +652,8 @@ static void collect_times_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_TIME_MAX; ++which) {
uint8_t slot = err->times[which];
if (slot != UINT8_MAX) {
- append_kv(kvs, key_time((grpc_error_times)which),
- fmt_time(*(gpr_timespec*)(err->arena + slot)));
+ append_kv(kvs, key_time(static_cast<grpc_error_times>(which)),
+ fmt_time(*reinterpret_cast<gpr_timespec*>(err->arena + slot)));
}
}
}
@@ -662,7 +662,7 @@ static void add_errs(grpc_error* err, char** s, size_t* sz, size_t* cap) {
uint8_t slot = err->first_err;
bool first = true;
while (slot != UINT8_MAX) {
- grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
+ grpc_linked_error* lerr = reinterpret_cast<grpc_linked_error*>(err->arena + slot);
if (!first) append_chr(',', s, sz, cap);
first = false;
const char* e = grpc_error_string(lerr->err);
@@ -685,8 +685,8 @@ static char* errs_string(grpc_error* err) {
}
static int cmp_kvs(const void* a, const void* b) {
- const kv_pair* ka = (const kv_pair*)a;
- const kv_pair* kb = (const kv_pair*)b;
+ const kv_pair* ka = static_cast<const kv_pair*>(a);
+ const kv_pair* kb = static_cast<const kv_pair*>(b);
return strcmp(ka->key, kb->key);
}
@@ -698,7 +698,7 @@ static char* finish_kvs(kv_pairs* kvs) {
append_chr('{', &s, &sz, &cap);
for (size_t i = 0; i < kvs->num_kvs; i++) {
if (i != 0) append_chr(',', &s, &sz, &cap);
- append_esc_str((const uint8_t*)kvs->kvs[i].key, strlen(kvs->kvs[i].key), &s,
+ append_esc_str(reinterpret_cast<const uint8_t*>(kvs->kvs[i].key), strlen(kvs->kvs[i].key), &s,
&sz, &cap);
gpr_free(kvs->kvs[i].key);
append_chr(':', &s, &sz, &cap);
@@ -720,7 +720,7 @@ const char* grpc_error_string(grpc_error* err) {
void* p = (void*)gpr_atm_acq_load(&err->atomics.error_string);
if (p != nullptr) {
- return (const char*)p;
+ return static_cast<const char*>(p);
}
kv_pairs kvs;
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index a05d74c50a..ad013c01b6 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -281,7 +281,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == nullptr) {
- new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
+ new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
new_fd->read_closure.Init();
new_fd->write_closure.Init();
}
@@ -304,7 +304,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_free(fd_name);
struct epoll_event ev;
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = new_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
@@ -440,7 +440,7 @@ static worker_remove_result worker_remove(grpc_pollset* pollset,
}
static size_t choose_neighborhood(void) {
- return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
+ return static_cast<size_t>(gpr_cpu_current_cpu()) % g_num_neighborhoods;
}
static grpc_error* pollset_global_init(void) {
@@ -451,15 +451,15 @@ static grpc_error* pollset_global_init(void) {
grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev;
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = &global_wakeup_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
&ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
- g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
- g_num_neighborhoods);
+ g_neighborhoods = static_cast<pollset_neighborhood*>(gpr_zalloc(sizeof(*g_neighborhoods) *
+ g_num_neighborhoods));
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_init(&g_neighborhoods[i].mu);
}
@@ -579,7 +579,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
} else if (delta < 0) {
return 0;
} else {
- return (int)delta;
+ return static_cast<int>(delta);
}
}
@@ -609,7 +609,7 @@ static grpc_error* process_epoll_events(grpc_pollset* pollset) {
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
- grpc_fd* fd = (grpc_fd*)(data_ptr);
+ grpc_fd* fd = static_cast<grpc_fd*>(data_ptr);
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
@@ -881,7 +881,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
size_t poller_neighborhood_idx =
- (size_t)(pollset->neighborhood - g_neighborhoods);
+ static_cast<size_t>(pollset->neighborhood - g_neighborhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
bool scan_state[MAX_NEIGHBORHOODS];
@@ -1150,7 +1150,7 @@ static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
*/
static grpc_pollset_set* pollset_set_create(void) {
- return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+ return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
}
static void pollset_set_destroy(grpc_pollset_set* pss) {}
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 000faed1d9..e201845cd2 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -276,7 +276,7 @@ static void ref_by(grpc_fd* fd, int n) {
}
static void fd_destroy(void* arg, grpc_error* error) {
- grpc_fd* fd = (grpc_fd*)arg;
+ grpc_fd* fd = static_cast<grpc_fd*>(arg);
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
@@ -338,7 +338,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == nullptr) {
- new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
+ new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
new_fd->read_closure.Init();
new_fd->write_closure.Init();
}
@@ -441,7 +441,7 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
if (epfd == -1) {
return GRPC_OS_ERROR(errno, "epoll_create1");
}
- *p = (pollable*)gpr_malloc(sizeof(**p));
+ *p = static_cast<pollable*>(gpr_malloc(sizeof(**p)));
grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
if (err != GRPC_ERROR_NONE) {
close(epfd);
@@ -450,7 +450,7 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
return err;
}
struct epoll_event ev;
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
@@ -479,7 +479,7 @@ static pollable* pollable_ref(pollable* p) {
#else
static pollable* pollable_ref(pollable* p, int line, const char* reason) {
if (grpc_trace_pollable_refcount.enabled()) {
- int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
+ int r = static_cast<int>gpr_atm_no_barrier_load(&p->refs.count);
gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
"POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason);
}
@@ -494,7 +494,7 @@ static void pollable_unref(pollable* p) {
static void pollable_unref(pollable* p, int line, const char* reason) {
if (p == nullptr) return;
if (grpc_trace_pollable_refcount.enabled()) {
- int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
+ int r = static_cast<int>gpr_atm_no_barrier_load(&p->refs.count);
gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
"POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason);
}
@@ -516,7 +516,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
}
struct epoll_event ev_fd;
- ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
+ ev_fd.events = static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
ev_fd.data.ptr = fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
switch (errno) {
@@ -699,7 +699,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
else if (delta < 0)
return 0;
else
- return (int)delta;
+ return static_cast<int>(delta);
}
static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
@@ -768,10 +768,10 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
- (grpc_wakeup_fd*)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ (grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) & (intptr_t)data_ptr)),
err_desc);
} else {
- grpc_fd* fd = (grpc_fd*)data_ptr;
+ grpc_fd* fd = static_cast<grpc_fd*>(data_ptr);
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
@@ -1170,7 +1170,7 @@ static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
}
static grpc_pollset_set* pollset_set_create(void) {
- grpc_pollset_set* pss = (grpc_pollset_set*)gpr_zalloc(sizeof(*pss));
+ grpc_pollset_set* pss = static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pss)));
gpr_mu_init(&pss->mu);
gpr_ref_init(&pss->refs, 1);
return pss;
@@ -1211,7 +1211,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
if (pss->fd_count == pss->fd_capacity) {
pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
pss->fds =
- (grpc_fd**)gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds));
+ static_cast<grpc_fd**>(gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds)));
}
REF_BY(fd, 2, "pollset_set");
pss->fds[pss->fd_count++] = fd;
@@ -1319,8 +1319,8 @@ static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
err_desc);
if (pss->pollset_count == pss->pollset_capacity) {
pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
- pss->pollsets = (grpc_pollset**)gpr_realloc(
- pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets));
+ pss->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
+ pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets)));
}
pss->pollsets[pss->pollset_count++] = ps;
gpr_mu_unlock(&pss->mu);
@@ -1373,7 +1373,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
b->parent = a;
if (a->fd_capacity < a->fd_count + b->fd_count) {
a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
- a->fds = (grpc_fd**)gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds));
+ a->fds = static_cast<grpc_fd**>(gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds)));
}
size_t initial_a_fd_count = a->fd_count;
a->fd_count = 0;
@@ -1390,8 +1390,8 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
a->pollset_capacity =
GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
- a->pollsets = (grpc_pollset**)gpr_realloc(
- a->pollsets, a->pollset_capacity * sizeof(*a->pollsets));
+ a->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
+ a->pollsets, a->pollset_capacity * sizeof(*a->pollsets)));
}
if (b->pollset_count > 0) {
memcpy(a->pollsets + a->pollset_count, b->pollsets,
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 4c26bbd2c6..438b4c8c09 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -349,7 +349,7 @@ static void polling_island_add_fds_locked(polling_island* pi, grpc_fd** fds,
#endif /* defined(GRPC_TSAN) */
for (i = 0; i < fd_count; i++) {
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = fds[i];
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
@@ -369,7 +369,7 @@ static void polling_island_add_fds_locked(polling_island* pi, grpc_fd** fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
pi->fds =
- (grpc_fd**)gpr_realloc(pi->fds, sizeof(grpc_fd*) * pi->fd_capacity);
+ static_cast<grpc_fd**>(gpr_realloc(pi->fds, sizeof(grpc_fd*) * pi->fd_capacity));
}
pi->fds[pi->fd_cnt++] = fds[i];
@@ -388,7 +388,7 @@ static void polling_island_add_wakeup_fd_locked(polling_island* pi,
char* err_msg;
const char* err_desc = "polling_island_add_wakeup_fd";
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = wakeup_fd;
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
@@ -471,7 +471,7 @@ static polling_island* polling_island_create(grpc_fd* initial_fd,
*error = GRPC_ERROR_NONE;
- pi = (polling_island*)gpr_malloc(sizeof(*pi));
+ pi = static_cast<polling_island*>(gpr_malloc(sizeof(*pi)));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -815,7 +815,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == nullptr) {
- new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
+ new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
gpr_mu_init(&new_fd->po.mu);
new_fd->read_closure.Init();
new_fd->write_closure.Init();
@@ -976,7 +976,7 @@ static grpc_error* pollset_worker_kick(grpc_pollset_worker* worker) {
grpc_error* err = GRPC_ERROR_NONE;
/* Kick the worker only if it was not already kicked */
- if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
+ if (gpr_atm_no_barrier_cas(&worker->is_kicked, static_cast<gpr_atm>(0), static_cast<gpr_atm>(1))) {
GRPC_POLLING_TRACE(
"pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
(void*)worker, (long int)worker->pt_id);
@@ -1096,7 +1096,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
else if (delta < 0)
return 0;
else
- return (int)delta;
+ return static_cast<int>(delta);
}
static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
@@ -1251,7 +1251,7 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
- grpc_fd* fd = (grpc_fd*)data_ptr;
+ grpc_fd* fd = static_cast<grpc_fd*>(data_ptr);
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@@ -1538,7 +1538,7 @@ static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
*/
static grpc_pollset_set* pollset_set_create(void) {
- grpc_pollset_set* pss = (grpc_pollset_set*)gpr_malloc(sizeof(*pss));
+ grpc_pollset_set* pss = static_cast<grpc_pollset_set*>(gpr_malloc(sizeof(*pss)));
gpr_mu_init(&pss->po.mu);
pss->po.pi = nullptr;
#ifndef NDEBUG
@@ -1607,8 +1607,8 @@ void* grpc_pollset_get_polling_island(grpc_pollset* ps) {
}
bool grpc_are_polling_islands_equal(void* p, void* q) {
- polling_island* p1 = (polling_island*)p;
- polling_island* p2 = (polling_island*)q;
+ polling_island* p1 = static_cast<polling_island*>(p);
+ polling_island* p2 = static_cast<polling_island*>(q);
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 9833b47bd3..769fdbc299 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -323,7 +323,7 @@ static void unref_by(grpc_fd* fd, int n) {
}
static grpc_fd* fd_create(int fd, const char* name) {
- grpc_fd* r = (grpc_fd*)gpr_malloc(sizeof(*r));
+ grpc_fd* r = static_cast<grpc_fd*>(gpr_malloc(sizeof(*r)));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@@ -835,8 +835,8 @@ static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
- pollset->fds = (grpc_fd**)gpr_realloc(
- pollset->fds, sizeof(grpc_fd*) * pollset->fd_capacity);
+ pollset->fds = static_cast<grpc_fd**>(gpr_realloc(
+ pollset->fds, sizeof(grpc_fd*) * pollset->fd_capacity));
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
@@ -890,7 +890,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
worker.wakeup_fd =
- (grpc_cached_wakeup_fd*)gpr_malloc(sizeof(*worker.wakeup_fd));
+ static_cast<grpc_cached_wakeup_fd*>(gpr_malloc(sizeof(*worker.wakeup_fd)));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@@ -945,8 +945,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
void* buf = gpr_malloc(pfd_size + watch_size);
- pfds = (struct pollfd*)buf;
- watchers = (grpc_fd_watcher*)(void*)((char*)buf + pfd_size);
+ pfds = static_cast<struct pollfd*>(buf);
+ watchers = static_cast<grpc_fd_watcher*>((void*)(static_cast<char*>(buf) + pfd_size));
}
fd_count = 0;
@@ -972,8 +972,8 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
for (i = 1; i < pfd_count; i++) {
grpc_fd* fd = watchers[i].fd;
- pfds[i].events = (short)fd_begin_poll(fd, pollset, &worker, POLLIN,
- POLLOUT, &watchers[i]);
+ pfds[i].events = static_cast<short>(fd_begin_poll(fd, pollset, &worker, POLLIN,
+ POLLOUT, &watchers[i]));
GRPC_FD_UNREF(fd, "multipoller_start");
}
@@ -1123,7 +1123,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis deadline) {
grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now();
if (n < 0) return 0;
if (n > INT_MAX) return -1;
- return (int)n;
+ return static_cast<int>(n);
}
/*******************************************************************************
@@ -1132,7 +1132,7 @@ static int poll_deadline_to_millis_timeout(grpc_millis deadline) {
static grpc_pollset_set* pollset_set_create(void) {
grpc_pollset_set* pollset_set =
- (grpc_pollset_set*)gpr_zalloc(sizeof(*pollset_set));
+ static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pollset_set)));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
@@ -1173,9 +1173,9 @@ static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets = (grpc_pollset**)gpr_realloc(
+ pollset_set->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
pollset_set->pollsets,
- pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
+ pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
@@ -1222,9 +1222,9 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets = (grpc_pollset_set**)gpr_realloc(
+ bag->pollset_sets = static_cast<grpc_pollset_set**>(gpr_realloc(
bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets)));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
@@ -1259,8 +1259,8 @@ static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = (grpc_fd**)gpr_realloc(
- pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
+ pollset_set->fds = static_cast<grpc_fd**>(gpr_realloc(
+ pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)));
}
GRPC_FD_REF(fd, "pollset_set");
pollset_set->fds[pollset_set->fd_count++] = fd;
@@ -1312,12 +1312,12 @@ static void cache_insert_locked(poll_args* args) {
}
static void init_result(poll_args* pargs) {
- pargs->result = (poll_result*)gpr_malloc(sizeof(poll_result));
+ pargs->result = static_cast<poll_result*>(gpr_malloc(sizeof(poll_result)));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = nullptr;
pargs->result->watchcount = 0;
pargs->result->fds =
- (struct pollfd*)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
+ static_cast<struct pollfd*>(gpr_malloc(sizeof(struct pollfd) * pargs->nfds));
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@@ -1356,7 +1356,7 @@ static poll_args* get_poller_locked(struct pollfd* fds, nfds_t count) {
return pargs;
}
- poll_args* pargs = (poll_args*)gpr_malloc(sizeof(struct poll_args));
+ poll_args* pargs = static_cast<poll_args*>(gpr_malloc(sizeof(struct poll_args)));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@@ -1404,7 +1404,7 @@ static void cache_poller_locked(poll_args* args) {
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
poll_cache.active_pollers =
- (poll_args**)gpr_malloc(sizeof(void*) * poll_cache.size);
+ static_cast<poll_args**>(gpr_malloc(sizeof(void*) * poll_cache.size));
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = nullptr;
}
@@ -1461,7 +1461,7 @@ gpr_timespec thread_grace;
// Poll in a background thread
static void run_poll(void* args) {
- poll_args* pargs = (poll_args*)args;
+ poll_args* pargs = static_cast<poll_args*>(args);
while (1) {
poll_result* result = pargs->result;
int retval = g_cvfds.poll(result->fds, result->nfds, CV_POLL_PERIOD_MS);
@@ -1509,12 +1509,12 @@ static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
nfds_t nsockfds = 0;
poll_result* result = nullptr;
gpr_mu_lock(&g_cvfds.mu);
- pollcv = (grpc_cv_node*)gpr_malloc(sizeof(grpc_cv_node));
+ pollcv = static_cast<grpc_cv_node*>(gpr_malloc(sizeof(grpc_cv_node)));
pollcv->next = nullptr;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
- grpc_cv_node* fd_cvs = (grpc_cv_node*)gpr_malloc(nfds * sizeof(grpc_cv_node));
+ grpc_cv_node* fd_cvs = static_cast<grpc_cv_node*>(gpr_malloc(nfds * sizeof(grpc_cv_node)));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
@@ -1547,7 +1547,7 @@ static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
struct pollfd* pollfds =
- (struct pollfd*)gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ static_cast<struct pollfd*>(gpr_malloc(sizeof(struct pollfd) * nsockfds));
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@@ -1611,7 +1611,7 @@ static void global_cv_fd_table_init() {
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds =
- (grpc_fd_node*)gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE);
+ static_cast<grpc_fd_node*>(gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE));
g_cvfds.free_fds = nullptr;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1628,7 +1628,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = nullptr;
- poll_cache.active_pollers = (poll_args**)gpr_malloc(sizeof(void*) * 32);
+ poll_cache.active_pollers = static_cast<poll_args**>(gpr_malloc(sizeof(void*) * 32));
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = nullptr;
}
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index af5dfe5bf3..4280794428 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -108,11 +108,11 @@ static void add(const char* beg, const char* end, char*** ss, size_t* ns) {
char* s;
size_t len;
GPR_ASSERT(end >= beg);
- len = (size_t)(end - beg);
- s = (char*)gpr_malloc(len + 1);
+ len = static_cast<size_t>(end - beg);
+ s = static_cast<char*>(gpr_malloc(len + 1));
memcpy(s, beg, len);
s[len] = 0;
- *ss = (char**)gpr_realloc(*ss, sizeof(char**) * np);
+ *ss = static_cast<char**>(gpr_realloc(*ss, sizeof(char**) * np));
(*ss)[n] = s;
*ns = np;
}
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 03c833ff07..89c6a873e6 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -54,20 +54,20 @@ static gpr_timespec g_start_time;
static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x =
- GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
+ GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) + static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS;
if (x < 0) return 0;
if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
+ return static_cast<gpr_atm>(x);
}
static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
- double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
- (double)ts.tv_nsec / GPR_NS_PER_MS +
- (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
+ double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
+ static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS +
+ static_cast<double>(GPR_NS_PER_SEC - 1) / static_cast<double>(GPR_NS_PER_SEC);
if (x < 0) return 0;
if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
+ return static_cast<gpr_atm>(x);
}
gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index 3288ea3de5..901bbdd4df 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -95,7 +95,7 @@ void grpc_executor_set_threading(bool threading) {
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
g_thread_state =
- (thread_state*)gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ static_cast<thread_state*>(gpr_zalloc(sizeof(thread_state) * g_max_threads));
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
@@ -140,7 +140,7 @@ void grpc_executor_init() {
void grpc_executor_shutdown() { grpc_executor_set_threading(false); }
static void executor_thread(void* arg) {
- thread_state* ts = (thread_state*)arg;
+ thread_state* ts = static_cast<thread_state*>(arg);
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_core::ExecCtx exec_ctx(0);
@@ -149,7 +149,7 @@ static void executor_thread(void* arg) {
for (;;) {
if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
- (int)(ts - g_thread_state), subtract_depth);
+ static_cast<int>(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
@@ -160,7 +160,7 @@ static void executor_thread(void* arg) {
if (ts->shutdown) {
if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
- (int)(ts - g_thread_state));
+ static_cast<int>(ts - g_thread_state));
}
gpr_mu_unlock(&ts->mu);
break;
@@ -170,7 +170,7 @@ static void executor_thread(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (executor_trace.enabled()) {
- gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", static_cast<int>(ts - g_thread_state));
}
grpc_core::ExecCtx::Get()->InvalidateNow();
@@ -188,7 +188,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
}
do {
retry_push = false;
- size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ size_t cur_thread_count = static_cast<size_t>gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count == 0) {
if (executor_trace.enabled()) {
#ifndef NDEBUG
@@ -219,7 +219,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
GPR_DEBUG,
"EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
closure, is_short ? "short" : "long", closure->file_created,
- closure->line_created, (int)(ts - g_thread_state));
+ closure->line_created, static_cast<int>(ts - g_thread_state));
#else
gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
@@ -233,7 +233,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
// guarantee no starvation)
// ... spin through queues and try again
gpr_mu_unlock(&ts->mu);
- size_t idx = (size_t)(ts - g_thread_state);
+ size_t idx = static_cast<size_t>(ts - g_thread_state);
ts = &g_thread_state[(idx + 1) % cur_thread_count];
if (ts == orig_ts) {
retry_push = true;
@@ -255,7 +255,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
break;
}
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
- cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ cur_thread_count = static_cast<size_t>gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
diff --git a/src/core/lib/iomgr/gethostname_host_name_max.cc b/src/core/lib/iomgr/gethostname_host_name_max.cc
index 2487160916..ae95788a1e 100644
--- a/src/core/lib/iomgr/gethostname_host_name_max.cc
+++ b/src/core/lib/iomgr/gethostname_host_name_max.cc
@@ -27,7 +27,7 @@
#include <grpc/support/alloc.h>
char* grpc_gethostname() {
- char* hostname = (char*)gpr_malloc(HOST_NAME_MAX);
+ char* hostname = static_cast<char*>(gpr_malloc(HOST_NAME_MAX));
if (gethostname(hostname, HOST_NAME_MAX) != 0) {
gpr_free(hostname);
return nullptr;
diff --git a/src/core/lib/iomgr/is_epollexclusive_available.cc b/src/core/lib/iomgr/is_epollexclusive_available.cc
index 08f9cf2b69..a58a5775d3 100644
--- a/src/core/lib/iomgr/is_epollexclusive_available.cc
+++ b/src/core/lib/iomgr/is_epollexclusive_available.cc
@@ -61,7 +61,7 @@ bool grpc_is_epollexclusive_available(void) {
/* choose events that should cause an error on
EPOLLEXCLUSIVE enabled kernels - specifically the combination of
EPOLLONESHOT and EPOLLEXCLUSIVE */
- ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
+ ev.events = static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
ev.data.ptr = nullptr;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
diff --git a/src/core/lib/iomgr/load_file.cc b/src/core/lib/iomgr/load_file.cc
index b6586fbc73..5e637ff8b5 100644
--- a/src/core/lib/iomgr/load_file.cc
+++ b/src/core/lib/iomgr/load_file.cc
@@ -45,10 +45,10 @@ grpc_error* grpc_load_file(const char* filename, int add_null_terminator,
}
fseek(file, 0, SEEK_END);
/* Converting to size_t on the assumption that it will not fail */
- contents_size = (size_t)ftell(file);
+ contents_size = static_cast<size_t>(ftell(file));
fseek(file, 0, SEEK_SET);
contents =
- (unsigned char*)gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
+ static_cast<unsigned char*>(gpr_malloc(contents_size + (add_null_terminator ? 1 : 0)));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index 3eea709ce4..b49b080c12 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -115,13 +115,13 @@ static grpc_error* blocking_resolve_address_impl(
/* Success path: set addrs non-NULL, fill it in */
*addresses =
- (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
+ static_cast<grpc_resolved_addresses*>(gpr_malloc(sizeof(grpc_resolved_addresses)));
(*addresses)->naddrs = 0;
for (resp = result; resp != nullptr; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc(
- sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = static_cast<grpc_resolved_address*>(gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs));
i = 0;
for (resp = result; resp != nullptr; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -155,7 +155,7 @@ typedef struct {
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
static void do_request_thread(void* rp, grpc_error* error) {
- request* r = (request*)rp;
+ request* r = static_cast<request*>(rp);
GRPC_CLOSURE_SCHED(r->on_done, grpc_blocking_resolve_address(
r->name, r->default_port, r->addrs_out));
gpr_free(r->name);
@@ -174,7 +174,7 @@ static void resolve_address_impl(const char* name, const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
grpc_resolved_addresses** addrs) {
- request* r = (request*)gpr_malloc(sizeof(request));
+ request* r = static_cast<request*>(gpr_malloc(sizeof(request)));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index e64d772a7d..40dd29b6a4 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -244,7 +244,7 @@ static bool rq_reclaim_from_per_user_free_pool(
static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive);
static void rq_step(void* rq, grpc_error* error) {
- grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
+ grpc_resource_quota* resource_quota = static_cast<grpc_resource_quota*>(rq);
resource_quota->step_scheduled = false;
do {
if (rq_alloc(resource_quota)) goto done;
@@ -303,7 +303,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
resource_user->free_pool += aborted_allocations;
GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
- ru_unref_by(resource_user, (gpr_atm)aborted_allocations);
+ ru_unref_by(resource_user, static_cast<gpr_atm>(aborted_allocations));
continue;
}
if (resource_user->free_pool < 0 &&
@@ -401,12 +401,12 @@ typedef struct {
} ru_slice_refcount;
static void ru_slice_ref(void* p) {
- ru_slice_refcount* rc = (ru_slice_refcount*)p;
+ ru_slice_refcount* rc = static_cast<ru_slice_refcount*>(p);
gpr_ref(&rc->refs);
}
static void ru_slice_unref(void* p) {
- ru_slice_refcount* rc = (ru_slice_refcount*)p;
+ ru_slice_refcount* rc = static_cast<ru_slice_refcount*>(p);
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(rc->resource_user, rc->size);
gpr_free(rc);
@@ -420,7 +420,7 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
size_t size) {
ru_slice_refcount* rc =
- (ru_slice_refcount*)gpr_malloc(sizeof(ru_slice_refcount) + size);
+ static_cast<ru_slice_refcount*>(gpr_malloc(sizeof(ru_slice_refcount) + size));
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@@ -428,7 +428,7 @@ static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
rc->size = size;
grpc_slice slice;
slice.refcount = &rc->base;
- slice.data.refcounted.bytes = (uint8_t*)(rc + 1);
+ slice.data.refcounted.bytes = reinterpret_cast<uint8_t*>(rc + 1);
slice.data.refcounted.length = size;
return slice;
}
@@ -439,7 +439,7 @@ static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
*/
static void ru_allocate(void* ru, grpc_error* error) {
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(resource_user->resource_quota);
@@ -448,7 +448,7 @@ static void ru_allocate(void* ru, grpc_error* error) {
}
static void ru_add_to_free_pool(void* ru, grpc_error* error) {
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -473,7 +473,7 @@ static bool ru_post_reclaimer(grpc_resource_user* resource_user,
}
static void ru_post_benign_reclaimer(void* ru, grpc_error* error) {
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
if (!ru_post_reclaimer(resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -487,7 +487,7 @@ static void ru_post_benign_reclaimer(void* ru, grpc_error* error) {
}
static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
if (!ru_post_reclaimer(resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -506,7 +506,7 @@ static void ru_shutdown(void* ru, grpc_error* error) {
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
}
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
gpr_mu_lock(&resource_user->mu);
GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
@@ -521,10 +521,10 @@ static void ru_shutdown(void* ru, grpc_error* error) {
}
static void ru_destroy(void* ru, grpc_error* error) {
- grpc_resource_user* resource_user = (grpc_resource_user*)ru;
+ grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
- rulist_remove(resource_user, (grpc_rulist)i);
+ rulist_remove(resource_user, static_cast<grpc_rulist>(i));
}
GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
@@ -540,7 +540,7 @@ static void ru_destroy(void* ru, grpc_error* error) {
static void ru_allocated_slices(void* arg, grpc_error* error) {
grpc_resource_user_slice_allocator* slice_allocator =
- (grpc_resource_user_slice_allocator*)arg;
+ static_cast<grpc_resource_user_slice_allocator*>(arg);
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@@ -563,7 +563,7 @@ typedef struct {
} rq_resize_args;
static void rq_resize(void* args, grpc_error* error) {
- rq_resize_args* a = (rq_resize_args*)args;
+ rq_resize_args* a = static_cast<rq_resize_args*>(args);
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@@ -574,7 +574,7 @@ static void rq_resize(void* args, grpc_error* error) {
}
static void rq_reclamation_done(void* rq, grpc_error* error) {
- grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
+ grpc_resource_quota* resource_quota = static_cast<grpc_resource_quota*>(rq);
resource_quota->reclaiming = false;
rq_step_sched(resource_quota);
grpc_resource_quota_unref_internal(resource_quota);
@@ -587,7 +587,7 @@ static void rq_reclamation_done(void* rq, grpc_error* error) {
/* Public API */
grpc_resource_quota* grpc_resource_quota_create(const char* name) {
grpc_resource_quota* resource_quota =
- (grpc_resource_quota*)gpr_malloc(sizeof(*resource_quota));
+ static_cast<grpc_resource_quota*>(gpr_malloc(sizeof(*resource_quota)));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@@ -640,18 +640,18 @@ void grpc_resource_quota_ref(grpc_resource_quota* resource_quota) {
double grpc_resource_quota_get_memory_pressure(
grpc_resource_quota* resource_quota) {
- return ((double)(gpr_atm_no_barrier_load(
+ return (static_cast<double>(gpr_atm_no_barrier_load(
&resource_quota->memory_usage_estimation))) /
- ((double)MEMORY_USAGE_ESTIMATION_MAX);
+ (static_cast<double>(MEMORY_USAGE_ESTIMATION_MAX));
}
/* Public API */
void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
size_t size) {
grpc_core::ExecCtx exec_ctx;
- rq_resize_args* a = (rq_resize_args*)gpr_malloc(sizeof(*a));
+ rq_resize_args* a = static_cast<rq_resize_args*>(gpr_malloc(sizeof(*a)));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
- a->size = (int64_t)size;
+ a->size = static_cast<int64_t>(size);
gpr_atm_no_barrier_store(&resource_quota->last_size,
(gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
@@ -659,7 +659,7 @@ void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
}
size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
- return (size_t)gpr_atm_no_barrier_load(&resource_quota->last_size);
+ return static_cast<size_t>gpr_atm_no_barrier_load(&resource_quota->last_size);
}
/*******************************************************************************
@@ -672,7 +672,7 @@ grpc_resource_quota* grpc_resource_quota_from_channel_args(
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
- (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
+ static_cast<grpc_resource_quota*>(channel_args->args[i].value.pointer.p));
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@@ -682,12 +682,12 @@ grpc_resource_quota* grpc_resource_quota_from_channel_args(
}
static void* rq_copy(void* rq) {
- grpc_resource_quota_ref((grpc_resource_quota*)rq);
+ grpc_resource_quota_ref(static_cast<grpc_resource_quota*>(rq));
return rq;
}
static void rq_destroy(void* rq) {
- grpc_resource_quota_unref_internal((grpc_resource_quota*)rq);
+ grpc_resource_quota_unref_internal(static_cast<grpc_resource_quota*>(rq));
}
static int rq_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
@@ -704,7 +704,7 @@ const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void) {
grpc_resource_user* grpc_resource_user_create(
grpc_resource_quota* resource_quota, const char* name) {
grpc_resource_user* resource_user =
- (grpc_resource_user*)gpr_malloc(sizeof(*resource_user));
+ static_cast<grpc_resource_user*>(gpr_malloc(sizeof(*resource_user)));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
@@ -785,9 +785,9 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done) {
gpr_mu_lock(&resource_user->mu);
- ru_ref_by(resource_user, (gpr_atm)size);
- resource_user->free_pool -= (int64_t)size;
- resource_user->outstanding_allocations += (int64_t)size;
+ ru_ref_by(resource_user, static_cast<gpr_atm>(size));
+ resource_user->free_pool -= static_cast<int64_t>(size);
+ resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
@@ -801,7 +801,7 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE);
}
} else {
- resource_user->outstanding_allocations -= (int64_t)size;
+ resource_user->outstanding_allocations -= static_cast<int64_t>(size);
GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
@@ -810,7 +810,7 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
bool was_zero_or_negative = resource_user->free_pool <= 0;
- resource_user->free_pool += (int64_t)size;
+ resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
@@ -824,7 +824,7 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
- ru_unref_by(resource_user, (gpr_atm)size);
+ ru_unref_by(resource_user, static_cast<gpr_atm>(size));
}
void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
diff --git a/src/core/lib/iomgr/sockaddr_utils.cc b/src/core/lib/iomgr/sockaddr_utils.cc
index ac97af4dee..06f080376a 100644
--- a/src/core/lib/iomgr/sockaddr_utils.cc
+++ b/src/core/lib/iomgr/sockaddr_utils.cc
@@ -39,13 +39,13 @@ static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr,
grpc_resolved_address* resolved_addr4_out) {
GPR_ASSERT(resolved_addr != resolved_addr4_out);
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
struct sockaddr_in* addr4_out =
resolved_addr4_out == nullptr
? nullptr
- : (struct sockaddr_in*)resolved_addr4_out->addr;
+ : reinterpret_cast<struct sockaddr_in*>(resolved_addr4_out->addr);
if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
+ const struct sockaddr_in6* addr6 = reinterpret_cast<const struct sockaddr_in6*>(addr);
if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix,
sizeof(kV4MappedPrefix)) == 0) {
if (resolved_addr4_out != nullptr) {
@@ -66,11 +66,11 @@ int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr,
int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* resolved_addr,
grpc_resolved_address* resolved_addr6_out) {
GPR_ASSERT(resolved_addr != resolved_addr6_out);
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
struct sockaddr_in6* addr6_out =
- (struct sockaddr_in6*)resolved_addr6_out->addr;
+ reinterpret_cast<struct sockaddr_in6*>(resolved_addr6_out->addr);
if (addr->sa_family == AF_INET) {
- const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
+ const struct sockaddr_in* addr4 = reinterpret_cast<const struct sockaddr_in*>(addr);
memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out));
addr6_out->sin6_family = AF_INET6;
memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12);
@@ -89,10 +89,10 @@ int grpc_sockaddr_is_wildcard(const grpc_resolved_address* resolved_addr,
if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) {
resolved_addr = &addr4_normalized;
}
- addr = (const struct sockaddr*)resolved_addr->addr;
+ addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
if (addr->sa_family == AF_INET) {
/* Check for 0.0.0.0 */
- const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
+ const struct sockaddr_in* addr4 = reinterpret_cast<const struct sockaddr_in*>(addr);
if (addr4->sin_addr.s_addr != 0) {
return 0;
}
@@ -100,7 +100,7 @@ int grpc_sockaddr_is_wildcard(const grpc_resolved_address* resolved_addr,
return 1;
} else if (addr->sa_family == AF_INET6) {
/* Check for :: */
- const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
+ const struct sockaddr_in6* addr6 = reinterpret_cast<const struct sockaddr_in6*>(addr);
int i;
for (i = 0; i < 16; i++) {
if (addr6->sin6_addr.s6_addr[i] != 0) {
@@ -122,21 +122,21 @@ void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out,
void grpc_sockaddr_make_wildcard4(int port,
grpc_resolved_address* resolved_wild_out) {
- struct sockaddr_in* wild_out = (struct sockaddr_in*)resolved_wild_out->addr;
+ struct sockaddr_in* wild_out = reinterpret_cast<struct sockaddr_in*>(resolved_wild_out->addr);
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin_family = AF_INET;
- wild_out->sin_port = htons((uint16_t)port);
+ wild_out->sin_port = htons(static_cast<uint16_t>(port));
resolved_wild_out->len = sizeof(struct sockaddr_in);
}
void grpc_sockaddr_make_wildcard6(int port,
grpc_resolved_address* resolved_wild_out) {
- struct sockaddr_in6* wild_out = (struct sockaddr_in6*)resolved_wild_out->addr;
+ struct sockaddr_in6* wild_out = reinterpret_cast<struct sockaddr_in6*>(resolved_wild_out->addr);
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;
- wild_out->sin6_port = htons((uint16_t)port);
+ wild_out->sin6_port = htons(static_cast<uint16_t>(port));
resolved_wild_out->len = sizeof(struct sockaddr_in6);
}
@@ -156,13 +156,13 @@ int grpc_sockaddr_to_string(char** out,
if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
resolved_addr = &addr_normalized;
}
- addr = (const struct sockaddr*)resolved_addr->addr;
+ addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
if (addr->sa_family == AF_INET) {
- const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
+ const struct sockaddr_in* addr4 = reinterpret_cast<const struct sockaddr_in*>(addr);
ip = &addr4->sin_addr;
port = ntohs(addr4->sin_port);
} else if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
+ const struct sockaddr_in6* addr6 = reinterpret_cast<const struct sockaddr_in6*>(addr);
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
sin6_scope_id = addr6->sin6_scope_id;
@@ -208,7 +208,7 @@ char* grpc_sockaddr_to_uri(const grpc_resolved_address* resolved_addr) {
const char* grpc_sockaddr_get_uri_scheme(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case AF_INET:
return "ipv4";
@@ -221,12 +221,12 @@ const char* grpc_sockaddr_get_uri_scheme(
}
int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
return addr->sa_family;
}
int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case AF_INET:
return ntohs(((struct sockaddr_in*)addr)->sin_port);
@@ -244,15 +244,15 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr,
int port) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in*)addr)->sin_port = htons((uint16_t)port);
+ ((struct sockaddr_in*)addr)->sin_port = htons(static_cast<uint16_t>(port));
return 1;
case AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in6*)addr)->sin6_port = htons((uint16_t)port);
+ ((struct sockaddr_in6*)addr)->sin6_port = htons(static_cast<uint16_t>(port));
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
diff --git a/src/core/lib/iomgr/socket_factory_posix.cc b/src/core/lib/iomgr/socket_factory_posix.cc
index 1c1b2fb270..3e696c2e10 100644
--- a/src/core/lib/iomgr/socket_factory_posix.cc
+++ b/src/core/lib/iomgr/socket_factory_posix.cc
@@ -69,16 +69,16 @@ void grpc_socket_factory_unref(grpc_socket_factory* factory) {
}
static void* socket_factory_arg_copy(void* p) {
- return grpc_socket_factory_ref((grpc_socket_factory*)p);
+ return grpc_socket_factory_ref(static_cast<grpc_socket_factory*>(p));
}
static void socket_factory_arg_destroy(void* p) {
- grpc_socket_factory_unref((grpc_socket_factory*)p);
+ grpc_socket_factory_unref(static_cast<grpc_socket_factory*>(p));
}
static int socket_factory_cmp(void* a, void* b) {
- return grpc_socket_factory_compare((grpc_socket_factory*)a,
- (grpc_socket_factory*)b);
+ return grpc_socket_factory_compare(static_cast<grpc_socket_factory*>(a),
+ static_cast<grpc_socket_factory*>(b));
}
static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
diff --git a/src/core/lib/iomgr/socket_mutator.cc b/src/core/lib/iomgr/socket_mutator.cc
index 8c8f0e56e1..eb219d7cb3 100644
--- a/src/core/lib/iomgr/socket_mutator.cc
+++ b/src/core/lib/iomgr/socket_mutator.cc
@@ -60,16 +60,16 @@ void grpc_socket_mutator_unref(grpc_socket_mutator* mutator) {
}
static void* socket_mutator_arg_copy(void* p) {
- return grpc_socket_mutator_ref((grpc_socket_mutator*)p);
+ return grpc_socket_mutator_ref(static_cast<grpc_socket_mutator*>(p));
}
static void socket_mutator_arg_destroy(void* p) {
- grpc_socket_mutator_unref((grpc_socket_mutator*)p);
+ grpc_socket_mutator_unref(static_cast<grpc_socket_mutator*>(p));
}
static int socket_mutator_cmp(void* a, void* b) {
- return grpc_socket_mutator_compare((grpc_socket_mutator*)a,
- (grpc_socket_mutator*)b);
+ return grpc_socket_mutator_compare(static_cast<grpc_socket_mutator*>(a),
+ static_cast<grpc_socket_mutator*>(b));
}
static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc
index d02d77eb02..2668aa6d10 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
@@ -218,7 +218,7 @@ static void probe_ipv6_once(void) {
memset(&addr, 0, sizeof(addr));
addr.sin6_family = AF_INET6;
addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */
- if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0) {
+ if (bind(fd, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)) == 0) {
g_ipv6_loopback_available = 1;
} else {
gpr_log(GPR_INFO,
@@ -278,7 +278,7 @@ static int create_socket(grpc_socket_factory* factory, int domain, int type,
grpc_error* grpc_create_dualstack_socket_using_factory(
grpc_socket_factory* factory, const grpc_resolved_address* resolved_addr,
int type, int protocol, grpc_dualstack_mode* dsmode, int* newfd) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
@@ -310,7 +310,7 @@ grpc_error* grpc_create_dualstack_socket_using_factory(
const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
GPR_ASSERT(size <= (socklen_t)-1);
- return inet_ntop(af, src, dst, (socklen_t)size);
+ return inet_ntop(af, src, dst, static_cast<socklen_t>(size));
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_linux.cc b/src/core/lib/iomgr/socket_utils_linux.cc
index 12199c5c89..edb5274e0e 100644
--- a/src/core/lib/iomgr/socket_utils_linux.cc
+++ b/src/core/lib/iomgr/socket_utils_linux.cc
@@ -35,8 +35,8 @@ int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
flags |= nonblock ? SOCK_NONBLOCK : 0;
flags |= cloexec ? SOCK_CLOEXEC : 0;
- return accept4(sockfd, (struct sockaddr*)resolved_addr->addr,
- (socklen_t*)&resolved_addr->len, flags);
+ return accept4(sockfd, reinterpret_cast<struct sockaddr*>(resolved_addr->addr),
+ reinterpret_cast<socklen_t*>(&resolved_addr->len), flags);
}
#endif
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 3dff624065..8d08127915 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -80,7 +80,7 @@ static grpc_error* prepare_socket(const grpc_resolved_address* addr, int fd,
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
grpc_socket_mutator* mutator =
- (grpc_socket_mutator*)channel_args->args[i].value.pointer.p;
+ static_cast<grpc_socket_mutator*>(channel_args->args[i].value.pointer.p);
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@@ -98,7 +98,7 @@ done:
static void tc_on_alarm(void* acp, grpc_error* error) {
int done;
- async_connect* ac = (async_connect*)acp;
+ async_connect* ac = static_cast<async_connect*>(acp);
if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
@@ -125,7 +125,7 @@ grpc_endpoint* grpc_tcp_client_create_from_fd(
}
static void on_writable(void* acp, grpc_error* error) {
- async_connect* ac = (async_connect*)acp;
+ async_connect* ac = static_cast<async_connect*>(acp);
int so_error = 0;
socklen_t so_error_size;
int err;
@@ -291,7 +291,7 @@ void grpc_tcp_client_create_from_prepared_fd(
async_connect* ac;
do {
GPR_ASSERT(addr->len < ~(socklen_t)0);
- err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
+ err = connect(fd, reinterpret_cast<const struct sockaddr*>(addr->addr), static_cast<socklen_t>(addr->len));
} while (err < 0 && errno == EINTR);
if (err >= 0) {
char* addr_str = grpc_sockaddr_to_uri(addr);
@@ -309,7 +309,7 @@ void grpc_tcp_client_create_from_prepared_fd(
grpc_pollset_set_add_fd(interested_parties, fdobj);
- ac = (async_connect*)gpr_malloc(sizeof(async_connect));
+ ac = static_cast<async_connect*>(gpr_malloc(sizeof(async_connect)));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index e78e19761a..71819adaba 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -116,7 +116,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
grpc_error* error);
static void done_poller(void* bp, grpc_error* error_ignored) {
- backup_poller* p = (backup_poller*)bp;
+ backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
@@ -125,7 +125,7 @@ static void done_poller(void* bp, grpc_error* error_ignored) {
}
static void run_poller(void* bp, grpc_error* error_ignored) {
- backup_poller* p = (backup_poller*)bp;
+ backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
@@ -165,8 +165,8 @@ static void drop_uncovered(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
- (int)old_count - 1);
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, static_cast<int>(old_count),
+ static_cast<int>(old_count) - 1);
}
GPR_ASSERT(old_count != 1);
}
@@ -176,12 +176,12 @@ static void cover_self(grpc_tcp* tcp) {
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (grpc_tcp_trace.enabled()) {
- gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
- 2 + (int)old_count);
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", static_cast<int>(old_count),
+ 2 + static_cast<int>(old_count));
}
if (old_count == 0) {
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
- p = (backup_poller*)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
+ p = static_cast<backup_poller*>(gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
}
@@ -230,12 +230,12 @@ static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
- drop_uncovered((grpc_tcp*)arg);
+ drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
}
static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
- tcp->bytes_read_this_round += (double)bytes;
+ tcp->bytes_read_this_round += static_cast<double>(bytes);
}
static void finish_estimate(grpc_tcp* tcp) {
@@ -257,10 +257,10 @@ static size_t get_target_read_size(grpc_tcp* tcp) {
double pressure = grpc_resource_quota_get_memory_pressure(rq);
double target =
tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
- size_t sz = (((size_t)GPR_CLAMP(target, tcp->min_read_chunk_size,
+ size_t sz = ((static_cast<size_t>GPR_CLAMP(target, tcp->min_read_chunk_size,
tcp->max_read_chunk_size)) +
255) &
- ~(size_t)255;
+ ~static_cast<size_t>(255);
/* don't use more than 1/16th of the overall resource quota for a single read
* alloc */
size_t rqmax = grpc_resource_quota_peek_size(rq);
@@ -285,7 +285,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_fd_shutdown(tcp->em_fd, why);
grpc_resource_user_shutdown(tcp->resource_user);
}
@@ -339,7 +339,7 @@ static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
static void tcp_destroy(grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
TCP_UNREF(tcp, "destroy");
}
@@ -385,7 +385,7 @@ static void tcp_do_read(grpc_tcp* tcp) {
msg.msg_name = nullptr;
msg.msg_namelen = 0;
msg.msg_iov = iov;
- msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count;
+ msg.msg_iovlen = static_cast<msg_iovlen_type>(tcp->incoming_buffer->count);
msg.msg_control = nullptr;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -421,12 +421,12 @@ static void tcp_do_read(grpc_tcp* tcp) {
TCP_UNREF(tcp, "read");
} else {
GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
- add_to_estimate(tcp, (size_t)read_bytes);
+ add_to_estimate(tcp, static_cast<size_t>(read_bytes));
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
- if ((size_t)read_bytes < tcp->incoming_buffer->length) {
+ if (static_cast<size_t>(read_bytes) < tcp->incoming_buffer->length) {
grpc_slice_buffer_trim_end(
tcp->incoming_buffer,
- tcp->incoming_buffer->length - (size_t)read_bytes,
+ tcp->incoming_buffer->length - static_cast<size_t>(read_bytes),
&tcp->last_read_buffer);
}
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
@@ -436,7 +436,7 @@ static void tcp_do_read(grpc_tcp* tcp) {
}
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
- grpc_tcp* tcp = (grpc_tcp*)tcpp;
+ grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
@@ -469,7 +469,7 @@ static void tcp_continue_read(grpc_tcp* tcp) {
}
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
- grpc_tcp* tcp = (grpc_tcp*)arg;
+ grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
GPR_ASSERT(!tcp->finished_edge);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
@@ -487,7 +487,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
grpc_closure* cb) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
GPR_ASSERT(tcp->read_cb == nullptr);
tcp->read_cb = cb;
tcp->incoming_buffer = incoming_buffer;
@@ -578,7 +578,7 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
}
GPR_ASSERT(tcp->outgoing_byte_idx == 0);
- trailing = sending_length - (size_t)sent_length;
+ trailing = sending_length - static_cast<size_t>(sent_length);
while (trailing > 0) {
size_t slice_length;
@@ -602,7 +602,7 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
}
static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
- grpc_tcp* tcp = (grpc_tcp*)arg;
+ grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
grpc_closure* cb;
if (error != GRPC_ERROR_NONE) {
@@ -634,7 +634,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
grpc_closure* cb) {
GPR_TIMER_SCOPE("tcp_write", 0);
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_error* error = GRPC_ERROR_NONE;
if (grpc_tcp_trace.enabled()) {
@@ -678,34 +678,34 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
}
static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_pollset_add_fd(pollset, tcp->em_fd);
}
static void tcp_add_to_pollset_set(grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
}
static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
grpc_pollset_set* pollset_set) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
}
static char* tcp_get_peer(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
return gpr_strdup(tcp->peer_string);
}
static int tcp_get_fd(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
return tcp->fd;
}
static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
return tcp->resource_user;
}
@@ -733,19 +733,19 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
- grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ grpc_integer_options options = {tcp_read_chunk_size, 1,
MAX_CHUNK_SIZE};
tcp_read_chunk_size =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
- grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ grpc_integer_options options = {tcp_read_chunk_size, 1,
MAX_CHUNK_SIZE};
tcp_min_read_chunk_size =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
- grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ grpc_integer_options options = {tcp_read_chunk_size, 1,
MAX_CHUNK_SIZE};
tcp_max_read_chunk_size =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
@@ -753,7 +753,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
+ static_cast<grpc_resource_quota*>(channel_args->args[i].value.pointer.p));
}
}
}
@@ -764,7 +764,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
tcp_max_read_chunk_size);
- grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
+ grpc_tcp* tcp = static_cast<grpc_tcp*>(gpr_malloc(sizeof(grpc_tcp)));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
tcp->fd = grpc_fd_wrapped_fd(em_fd);
@@ -773,7 +773,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->release_fd_cb = nullptr;
tcp->release_fd = nullptr;
tcp->incoming_buffer = nullptr;
- tcp->target_length = (double)tcp_read_chunk_size;
+ tcp->target_length = static_cast<double>(tcp_read_chunk_size);
tcp->min_read_chunk_size = tcp_min_read_chunk_size;
tcp->max_read_chunk_size = tcp_max_read_chunk_size;
tcp->bytes_read_this_round = 0;
@@ -794,7 +794,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
}
int grpc_tcp_fd(grpc_endpoint* ep) {
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
GPR_ASSERT(ep->vtable == &vtable);
return grpc_fd_wrapped_fd(tcp->em_fd);
}
@@ -802,7 +802,7 @@ int grpc_tcp_fd(grpc_endpoint* ep) {
void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
grpc_closure* done) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 89d9160e02..f8f94e3fbb 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -72,7 +72,7 @@ grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
grpc_tcp_server** server) {
gpr_once_init(&check_init, init);
- grpc_tcp_server* s = (grpc_tcp_server*)gpr_zalloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server* s = static_cast<grpc_tcp_server*>(gpr_zalloc(sizeof(grpc_tcp_server)));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) {
@@ -135,7 +135,7 @@ static void finish_shutdown(grpc_tcp_server* s) {
}
static void destroyed_port(void* server, grpc_error* error) {
- grpc_tcp_server* s = (grpc_tcp_server*)server;
+ grpc_tcp_server* s = static_cast<grpc_tcp_server*>(server);
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -194,15 +194,15 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
/* event manager callback when reads are ready */
static void on_read(void* arg, grpc_error* err) {
- grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
+ grpc_tcp_listener* sp = static_cast<grpc_tcp_listener*>(arg);
grpc_pollset* read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
read_notifier_pollset =
- sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
- &sp->server->next_pollset_to_assign, 1) %
+ sp->server->pollsets[static_cast<size_t>(gpr_atm_no_barrier_fetch_add(
+ &sp->server->next_pollset_to_assign, 1)) %
sp->server->pollset_count];
/* loop until accept4 returns EAGAIN, and then re-arm notification */
@@ -249,7 +249,7 @@ static void on_read(void* arg, grpc_error* err) {
// Create acceptor.
grpc_tcp_server_acceptor* acceptor =
- (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
+ static_cast<grpc_tcp_server_acceptor*>(gpr_malloc(sizeof(*acceptor)));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@@ -363,7 +363,7 @@ static grpc_error* clone_port(grpc_tcp_listener* listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
- sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = static_cast<grpc_tcp_listener*>(gpr_malloc(sizeof(grpc_tcp_listener)));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original
@@ -410,8 +410,8 @@ grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
if (requested_port == 0) {
for (sp = s->head; sp; sp = sp->next) {
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (0 == getsockname(sp->fd, (struct sockaddr*)&sockname_temp.addr,
- (socklen_t*)&sockname_temp.len)) {
+ if (0 == getsockname(sp->fd, reinterpret_cast<struct sockaddr*>(&sockname_temp.addr),
+ reinterpret_cast<socklen_t*>(&sockname_temp.len))) {
int used_port = grpc_sockaddr_get_port(&sockname_temp);
if (used_port > 0) {
memcpy(&sockname_temp, addr, sizeof(grpc_resolved_address));
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
index 5139760634..db8ccf02eb 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
@@ -56,7 +56,7 @@ static void init_max_accept_queue_size(void) {
char* end;
long i = strtol(buf, &end, 10);
if (i > 0 && i <= INT_MAX && end && *end == '\n') {
- n = (int)i;
+ n = static_cast<int>(i);
}
}
fclose(fp);
@@ -93,7 +93,7 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = static_cast<grpc_tcp_listener*>(gpr_malloc(sizeof(grpc_tcp_listener)));
sp->next = nullptr;
if (s->head == nullptr) {
s->head = sp;
@@ -169,7 +169,7 @@ grpc_error* grpc_tcp_server_prepare_socket(int fd,
if (err != GRPC_ERROR_NONE) goto error;
GPR_ASSERT(addr->len < ~(socklen_t)0);
- if (bind(fd, (struct sockaddr*)addr->addr, (socklen_t)addr->len) < 0) {
+ if (bind(fd, reinterpret_cast<struct sockaddr*>(addr->addr), static_cast<socklen_t>(addr->len)) < 0) {
err = GRPC_OS_ERROR(errno, "bind");
goto error;
}
@@ -181,8 +181,8 @@ grpc_error* grpc_tcp_server_prepare_socket(int fd,
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (getsockname(fd, (struct sockaddr*)sockname_temp.addr,
- (socklen_t*)&sockname_temp.len) < 0) {
+ if (getsockname(fd, reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
+ reinterpret_cast<socklen_t*>(&sockname_temp.len)) < 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
goto error;
}
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
index 227bf94aa7..5470dbcff7 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
@@ -66,12 +66,12 @@ static grpc_error* get_unused_port(int* port) {
if (dsmode == GRPC_DSMODE_IPV4) {
grpc_sockaddr_make_wildcard4(0, &wild);
}
- if (bind(fd, (const struct sockaddr*)wild.addr, (socklen_t)wild.len) != 0) {
+ if (bind(fd, reinterpret_cast<const struct sockaddr*>(wild.addr), static_cast<socklen_t>(wild.len)) != 0) {
err = GRPC_OS_ERROR(errno, "bind");
close(fd);
return err;
}
- if (getsockname(fd, (struct sockaddr*)wild.addr, (socklen_t*)&wild.len) !=
+ if (getsockname(fd, reinterpret_cast<struct sockaddr*>(wild.addr), reinterpret_cast<socklen_t*>(&wild.len)) !=
0) {
err = GRPC_OS_ERROR(errno, "getsockname");
close(fd);
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index ef86d237d3..b47c12b689 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -240,9 +240,9 @@ void grpc_timer_list_init() {
uint32_t i;
g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
- g_shards = (timer_shard*)gpr_zalloc(g_num_shards * sizeof(*g_shards));
+ g_shards = static_cast<timer_shard*>(gpr_zalloc(g_num_shards * sizeof(*g_shards)));
g_shard_queue =
- (timer_shard**)gpr_zalloc(g_num_shards * sizeof(*g_shard_queue));
+ static_cast<timer_shard**>(gpr_zalloc(g_num_shards * sizeof(*g_shard_queue)));
g_shared_mutables.initialized = true;
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
@@ -360,7 +360,7 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
}
grpc_time_averaged_stats_add_sample(&shard->stats,
- (double)(deadline - now) / 1000.0);
+ static_cast<double>(deadline - now) / 1000.0);
ADD_TO_HASH_TABLE(timer);
@@ -374,7 +374,7 @@ void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
gpr_log(GPR_DEBUG,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s",
- (int)(shard - g_shards), shard->queue_deadline_cap,
+ static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
}
gpr_mu_unlock(&shard->mu);
@@ -461,11 +461,11 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
/* Compute the new cap and put all timers under it into the queue: */
shard->queue_deadline_cap =
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
- (gpr_atm)(deadline_delta * 1000.0));
+ static_cast<gpr_atm>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
- (int)(shard - g_shards), shard->queue_deadline_cap);
+ static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
for (timer = shard->list.next; timer != &shard->list; timer = next) {
next = timer->next;
@@ -490,7 +490,7 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
for (;;) {
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
- (int)(shard - g_shards),
+ static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
}
if (grpc_timer_heap_is_empty(&shard->heap)) {
@@ -530,7 +530,7 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
gpr_mu_unlock(&shard->mu);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
- (int)(shard - g_shards), n);
+ static_cast<int>(shard - g_shards), n);
}
return n;
}
@@ -553,7 +553,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
- (int)(g_shard_queue[0] - g_shards),
+ static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
@@ -573,7 +573,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
" .. result --> %d"
", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
", now=%" PRIdPTR,
- result, (int)(g_shard_queue[0] - g_shards),
+ result, static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline, new_min_deadline, now);
}
diff --git a/src/core/lib/iomgr/timer_heap.cc b/src/core/lib/iomgr/timer_heap.cc
index 632928fafa..9aedfd3755 100644
--- a/src/core/lib/iomgr/timer_heap.cc
+++ b/src/core/lib/iomgr/timer_heap.cc
@@ -35,7 +35,7 @@
its argument. */
static void adjust_upwards(grpc_timer** first, uint32_t i, grpc_timer* t) {
while (i > 0) {
- uint32_t parent = (uint32_t)(((int)i - 1) / 2);
+ uint32_t parent = static_cast<uint32_t>((static_cast<int>(i) - 1) / 2);
if (first[parent]->deadline <= t->deadline) break;
first[i] = first[parent];
first[i]->heap_index = i;
@@ -74,14 +74,14 @@ static void maybe_shrink(grpc_timer_heap* heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
- heap->timers = (grpc_timer**)gpr_realloc(
- heap->timers, heap->timer_capacity * sizeof(grpc_timer*));
+ heap->timers = static_cast<grpc_timer**>(gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer*)));
}
}
static void note_changed_priority(grpc_timer_heap* heap, grpc_timer* timer) {
uint32_t i = timer->heap_index;
- uint32_t parent = (uint32_t)(((int)i - 1) / 2);
+ uint32_t parent = static_cast<uint32_t>((static_cast<int>(i) - 1) / 2);
if (heap->timers[parent]->deadline > timer->deadline) {
adjust_upwards(heap->timers, i, timer);
} else {
@@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
- heap->timers = (grpc_timer**)gpr_realloc(
- heap->timers, heap->timer_capacity * sizeof(grpc_timer*));
+ heap->timers = static_cast<grpc_timer**>(gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer*)));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 7fb068f10f..4cefb7fc9f 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -86,7 +86,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
- completed_thread* ct = (completed_thread*)gpr_malloc(sizeof(*ct));
+ completed_thread* ct = static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
@@ -276,7 +276,7 @@ static void timer_thread(void* completed_thread_ptr) {
grpc_core::ExecCtx exec_ctx(0);
timer_main_loop();
- timer_thread_cleanup((completed_thread*)completed_thread_ptr);
+ timer_thread_cleanup(static_cast<completed_thread*>(completed_thread_ptr));
}
static void start_threads(void) {
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 27d32c59ae..0d052dc166 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -132,14 +132,14 @@ static grpc_socket_factory* get_socket_factory(const grpc_channel_args* args) {
const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
- return (grpc_socket_factory*)arg->value.pointer.p;
+ return static_cast<grpc_socket_factory*>(arg->value.pointer.p);
}
}
return nullptr;
}
grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) {
- grpc_udp_server* s = (grpc_udp_server*)gpr_malloc(sizeof(grpc_udp_server));
+ grpc_udp_server* s = static_cast<grpc_udp_server*>(gpr_malloc(sizeof(grpc_udp_server)));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@@ -156,7 +156,7 @@ grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) {
}
static void shutdown_fd(void* args, grpc_error* error) {
- struct shutdown_fd_args* shutdown_args = (struct shutdown_fd_args*)args;
+ struct shutdown_fd_args* shutdown_args = static_cast<struct shutdown_fd_args*>(args);
grpc_udp_listener* sp = shutdown_args->sp;
gpr_log(GPR_DEBUG, "shutdown fd %d", sp->fd);
gpr_mu_lock(shutdown_args->server_mu);
@@ -198,7 +198,7 @@ static void finish_shutdown(grpc_udp_server* s) {
}
static void destroyed_port(void* server, grpc_error* error) {
- grpc_udp_server* s = (grpc_udp_server*)server;
+ grpc_udp_server* s = static_cast<grpc_udp_server*>(server);
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -260,7 +260,7 @@ void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
struct shutdown_fd_args* args =
- (struct shutdown_fd_args*)gpr_malloc(sizeof(*args));
+ static_cast<struct shutdown_fd_args*>(gpr_malloc(sizeof(*args)));
args->sp = sp;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -279,7 +279,7 @@ static int bind_socket(grpc_socket_factory* socket_factory, int sockfd,
const grpc_resolved_address* addr) {
return (socket_factory != nullptr)
? grpc_socket_factory_bind(socket_factory, sockfd, addr)
- : bind(sockfd, (struct sockaddr*)addr->addr, (socklen_t)addr->len);
+ : bind(sockfd, reinterpret_cast<struct sockaddr*>(addr->addr), static_cast<socklen_t>(addr->len));
}
/* Prepare a recently-created socket for listening. */
@@ -287,7 +287,7 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
const grpc_resolved_address* addr, int rcv_buf_size,
int snd_buf_size) {
grpc_resolved_address sockname_temp;
- struct sockaddr* addr_ptr = (struct sockaddr*)addr->addr;
+ struct sockaddr* addr_ptr = reinterpret_cast<struct sockaddr*>(addr->addr);
if (fd < 0) {
goto error;
@@ -323,8 +323,8 @@ static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (getsockname(fd, (struct sockaddr*)sockname_temp.addr,
- (socklen_t*)&sockname_temp.len) < 0) {
+ if (getsockname(fd, reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
+ reinterpret_cast<socklen_t*>(&sockname_temp.len)) < 0) {
goto error;
}
@@ -379,7 +379,7 @@ static void do_read(void* arg, grpc_error* error) {
/* event manager callback when reads are ready */
static void on_read(void* arg, grpc_error* error) {
- grpc_udp_listener* sp = (grpc_udp_listener*)arg;
+ grpc_udp_listener* sp = static_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -438,7 +438,7 @@ static void do_write(void* arg, grpc_error* error) {
}
static void on_write(void* arg, grpc_error* error) {
- grpc_udp_listener* sp = (grpc_udp_listener*)arg;
+ grpc_udp_listener* sp = static_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -479,7 +479,7 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
- sp = (grpc_udp_listener*)gpr_malloc(sizeof(grpc_udp_listener));
+ sp = static_cast<grpc_udp_listener*>(gpr_malloc(sizeof(grpc_udp_listener)));
sp->next = nullptr;
if (s->head == nullptr) {
s->head = sp;
@@ -530,12 +530,12 @@ int grpc_udp_server_add_port(grpc_udp_server* s,
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (0 == getsockname(sp->fd, (struct sockaddr*)sockname_temp.addr,
- (socklen_t*)&sockname_temp.len)) {
+ if (0 == getsockname(sp->fd, reinterpret_cast<struct sockaddr*>(sockname_temp.addr),
+ reinterpret_cast<socklen_t*>(&sockname_temp.len))) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
allocated_addr =
- (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
+ static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(grpc_resolved_address)));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
diff --git a/src/core/lib/iomgr/unix_sockets_posix.cc b/src/core/lib/iomgr/unix_sockets_posix.cc
index 91b7cb1e59..7365787c71 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.cc
+++ b/src/core/lib/iomgr/unix_sockets_posix.cc
@@ -52,11 +52,11 @@ grpc_error* grpc_resolve_unix_domain_address(const char* name,
return err;
}
*addrs =
- (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
+ static_cast<grpc_resolved_addresses*>(gpr_malloc(sizeof(grpc_resolved_addresses)));
(*addrs)->naddrs = 1;
(*addrs)->addrs =
- (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
- un = (struct sockaddr_un*)(*addrs)->addrs->addr;
+ static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(grpc_resolved_address)));
+ un = reinterpret_cast<struct sockaddr_un*>((*addrs)->addrs->addr);
un->sun_family = AF_UNIX;
strncpy(un->sun_path, name, sizeof(un->sun_path));
(*addrs)->addrs->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
@@ -64,17 +64,17 @@ grpc_error* grpc_resolve_unix_domain_address(const char* name,
}
int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
return addr->sa_family == AF_UNIX;
}
void grpc_unlink_if_unix_domain_socket(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
if (addr->sa_family != AF_UNIX) {
return;
}
- struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
+ struct sockaddr_un* un = reinterpret_cast<struct sockaddr_un*>(resolved_addr->addr);
struct stat st;
if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
@@ -84,7 +84,7 @@ void grpc_unlink_if_unix_domain_socket(
char* grpc_sockaddr_to_uri_unix_if_possible(
const grpc_resolved_address* resolved_addr) {
- const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ const struct sockaddr* addr = reinterpret_cast<const struct sockaddr*>(resolved_addr->addr);
if (addr->sa_family != AF_UNIX) {
return nullptr;
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.cc b/src/core/lib/iomgr/wakeup_fd_cv.cc
index 4818cafe3d..80f07ccf23 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.cc
+++ b/src/core/lib/iomgr/wakeup_fd_cv.cc
@@ -43,8 +43,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
- g_cvfds.cvfds = (grpc_fd_node*)gpr_realloc(g_cvfds.cvfds,
- sizeof(grpc_fd_node) * newsize);
+ g_cvfds.cvfds = static_cast<grpc_fd_node*>(gpr_realloc(g_cvfds.cvfds,
+ sizeof(grpc_fd_node) * newsize));
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = nullptr;
@@ -54,7 +54,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
g_cvfds.size = newsize;
}
- idx = (int)(g_cvfds.free_fds - g_cvfds.cvfds);
+ idx = static_cast<int>(g_cvfds.free_fds - g_cvfds.cvfds);
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = nullptr;
g_cvfds.cvfds[idx].is_set = 0;