diff options
author | Craig Tiller <ctiller@google.com> | 2017-07-11 08:34:26 -0700 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2017-07-11 08:34:26 -0700 |
commit | ed38016565d8c7127211fdde45d6add3230193d0 (patch) | |
tree | e494cfd93a76cd4c88693018d633192de5d76e0b | |
parent | fd5eb2412206ab3620644f8e800fb1f44a18ccdd (diff) |
C++ compatibility fixes
-rw-r--r-- | .clang_complete | 2 | ||||
-rw-r--r-- | src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c | 6 | ||||
-rw-r--r-- | src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c | 3 | ||||
-rw-r--r-- | src/core/ext/filters/deadline/deadline_filter.c | 26 | ||||
-rw-r--r-- | src/core/ext/filters/max_age/max_age_filter.c | 18 | ||||
-rw-r--r-- | src/core/ext/filters/message_size/message_size_filter.c | 22 | ||||
-rw-r--r-- | src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c | 8 | ||||
-rw-r--r-- | src/core/ext/filters/workarounds/workaround_utils.c | 3 | ||||
-rw-r--r-- | src/core/lib/support/arena.c | 4 | ||||
-rw-r--r-- | src/core/lib/support/atm.c | 14 | ||||
-rw-r--r-- | src/core/lib/support/avl.c | 2 |
11 files changed, 57 insertions, 51 deletions
diff --git a/.clang_complete b/.clang_complete index e35f74198f..1818679705 100644 --- a/.clang_complete +++ b/.clang_complete @@ -1,3 +1,5 @@ +-Wall +-Wc++-compat -Ithird_party/googletest/include -Ithird_party/googletest -Iinclude diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c index af3391a731..5ea75f0554 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c @@ -132,7 +132,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - dns_resolver *r = arg; + dns_resolver *r = (dns_resolver *)arg; r->have_retry_timer = false; if (error == GRPC_ERROR_NONE) { @@ -146,7 +146,7 @@ static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - dns_resolver *r = arg; + dns_resolver *r = (dns_resolver *)arg; grpc_channel_args *result = NULL; GPR_ASSERT(r->resolving); r->resolving = false; @@ -241,7 +241,7 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx, char *path = args->uri->path; if (path[0] == '/') ++path; // Create resolver. - dns_resolver *r = gpr_zalloc(sizeof(dns_resolver)); + dns_resolver *r = (dns_resolver *)gpr_zalloc(sizeof(dns_resolver)); grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner); r->name_to_resolve = gpr_strdup(path); r->default_port = gpr_strdup(default_port); diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c index 7b4fe38272..7ceb8f40a1 100644 --- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c @@ -177,7 +177,8 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx, return NULL; } /* Instantiate resolver. */ - sockaddr_resolver *r = gpr_zalloc(sizeof(sockaddr_resolver)); + sockaddr_resolver *r = + (sockaddr_resolver *)gpr_zalloc(sizeof(sockaddr_resolver)); r->addresses = addresses; r->channel_args = grpc_channel_args_copy(args->args); grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner); diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c index ced025e2e2..6789903c95 100644 --- a/src/core/ext/filters/deadline/deadline_filter.c +++ b/src/core/ext/filters/deadline/deadline_filter.c @@ -37,8 +37,8 @@ // Timer callback. static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_call_element* elem = arg; - grpc_deadline_state* deadline_state = elem->call_data; + grpc_call_element* elem = (grpc_call_element*)arg; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; if (error != GRPC_ERROR_CANCELLED) { grpc_call_element_signal_error( exec_ctx, elem, @@ -57,7 +57,7 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) { return; } - grpc_deadline_state* deadline_state = elem->call_data; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; grpc_deadline_timer_state cur_state; grpc_closure* closure = NULL; retry: @@ -112,7 +112,7 @@ static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, // Callback run when the call is complete. static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_deadline_state* deadline_state = arg; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg; cancel_timer_if_needed(exec_ctx, deadline_state); // Invoke the next callback. GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete, @@ -145,7 +145,7 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, gpr_timespec deadline) { - grpc_deadline_state* deadline_state = elem->call_data; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; deadline_state->call_stack = call_stack; // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. @@ -169,13 +169,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem) { - grpc_deadline_state* deadline_state = elem->call_data; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; cancel_timer_if_needed(exec_ctx, deadline_state); } void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec new_deadline) { - grpc_deadline_state* deadline_state = elem->call_data; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; cancel_timer_if_needed(exec_ctx, deadline_state); start_timer_if_needed(exec_ctx, elem, new_deadline); } @@ -183,7 +183,7 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void grpc_deadline_state_client_start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op) { - grpc_deadline_state* deadline_state = elem->call_data; + grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; if (op->cancel_stream) { cancel_timer_if_needed(exec_ctx, deadline_state); } else { @@ -256,8 +256,8 @@ static void client_start_transport_stream_op_batch( // Callback for receiving initial metadata on the server. static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_call_element* elem = arg; - server_call_data* calld = elem->call_data; + grpc_call_element* elem = (grpc_call_element*)arg; + server_call_data* calld = (server_call_data*)elem->call_data; // Get deadline from metadata and start the timer if needed. start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline); // Invoke the next callback. @@ -269,7 +269,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg, static void server_start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op) { - server_call_data* calld = elem->call_data; + server_call_data* calld = (server_call_data*)elem->call_data; if (op->cancel_stream) { cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state); } else { @@ -341,8 +341,8 @@ static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx, void* arg) { return grpc_deadline_checking_enabled( grpc_channel_stack_builder_get_channel_arguments(builder)) - ? grpc_channel_stack_builder_prepend_filter(builder, arg, NULL, - NULL) + ? grpc_channel_stack_builder_prepend_filter( + builder, (const grpc_channel_filter*)arg, NULL, NULL) : true; } diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 35304f8150..7d748b9c32 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -108,7 +108,7 @@ static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) { static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; /* Decrease call_count. If there are no active calls at this time, max_idle_timer will start here. If the number of active calls is not 0, max_idle_timer will start after all the active calls end. */ @@ -119,7 +119,7 @@ static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer"); @@ -140,7 +140,7 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer"); @@ -156,7 +156,7 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx, static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; if (error == GRPC_ERROR_NONE) { /* Prevent the max idle timer from being set again */ gpr_atm_no_barrier_fetch_add(&chand->call_count, 1); @@ -176,7 +176,7 @@ static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg, static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_timer_pending = false; gpr_mu_unlock(&chand->max_age_timer_mu); @@ -200,7 +200,7 @@ static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = false; gpr_mu_unlock(&chand->max_age_timer_mu); @@ -220,7 +220,7 @@ static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - channel_data* chand = arg; + channel_data* chand = (channel_data*)arg; if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { grpc_transport_op* op = grpc_make_transport_op(NULL); op->on_connectivity_state_change = &chand->channel_connectivity_changed, @@ -264,7 +264,7 @@ static int add_random_max_connection_age_jitter(int value) { static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_element_args* args) { - channel_data* chand = elem->channel_data; + channel_data* chand = (channel_data*)elem->channel_data; increase_call_count(exec_ctx, chand); return GRPC_ERROR_NONE; } @@ -281,7 +281,7 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, grpc_channel_element_args* args) { - channel_data* chand = elem->channel_data; + channel_data* chand = (channel_data*)elem->channel_data; gpr_mu_init(&chand->max_age_timer_mu); chand->max_age_timer_pending = false; chand->max_age_grace_timer_pending = false; diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c index 9bb565ed6d..846c7df69a 100644 --- a/src/core/ext/filters/message_size/message_size_filter.c +++ b/src/core/ext/filters/message_size/message_size_filter.c @@ -60,7 +60,8 @@ static void* message_size_limits_create_from_json(const grpc_json* json) { if (max_response_message_bytes == -1) return NULL; } } - message_size_limits* value = gpr_malloc(sizeof(message_size_limits)); + message_size_limits* value = + (message_size_limits*)gpr_malloc(sizeof(message_size_limits)); value->max_send_size = max_request_message_bytes; value->max_recv_size = max_response_message_bytes; return value; @@ -88,8 +89,8 @@ typedef struct channel_data { // receive message size. static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data, grpc_error* error) { - grpc_call_element* elem = user_data; - call_data* calld = elem->call_data; + grpc_call_element* elem = (grpc_call_element*)user_data; + call_data* calld = (call_data*)elem->call_data; if (*calld->recv_message != NULL && calld->limits.max_recv_size >= 0 && (*calld->recv_message)->length > (size_t)calld->limits.max_recv_size) { char* message_string; @@ -117,7 +118,7 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data, static void start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op) { - call_data* calld = elem->call_data; + call_data* calld = (call_data*)elem->call_data; // Check max send message size. if (op->send_message && calld->limits.max_send_size >= 0 && op->payload->send_message.send_message->length > @@ -149,8 +150,8 @@ static void start_transport_stream_op_batch( static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_element_args* args) { - channel_data* chand = elem->channel_data; - call_data* calld = elem->call_data; + channel_data* chand = (channel_data*)elem->channel_data; + call_data* calld = (call_data*)elem->call_data; calld->next_recv_message_ready = NULL; GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem, grpc_schedule_on_exec_ctx); @@ -160,8 +161,9 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, // size to the receive limit. calld->limits = chand->limits; if (chand->method_limit_table != NULL) { - message_size_limits* limits = grpc_method_config_table_get( - exec_ctx, chand->method_limit_table, args->path); + message_size_limits* limits = + (message_size_limits*)grpc_method_config_table_get( + exec_ctx, chand->method_limit_table, args->path); if (limits != NULL) { if (limits->max_send_size >= 0 && (limits->max_send_size < calld->limits.max_send_size || @@ -220,7 +222,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); - channel_data* chand = elem->channel_data; + channel_data* chand = (channel_data*)elem->channel_data; chand->limits = get_message_size_limits(args->channel_args); // Get method config table from channel args. const grpc_arg* channel_arg = @@ -243,7 +245,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, // Destructor for channel_data. static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) { - channel_data* chand = elem->channel_data; + channel_data* chand = (channel_data*)elem->channel_data; grpc_slice_hash_table_unref(exec_ctx, chand->method_limit_table); } diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c index 8b3fff5fa3..b4d2cb4b8c 100644 --- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c @@ -52,8 +52,8 @@ static bool get_user_agent_mdelem(const grpc_metadata_batch* batch, // Callback invoked when we receive an initial metadata. static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* user_data, grpc_error* error) { - grpc_call_element* elem = user_data; - call_data* calld = elem->call_data; + grpc_call_element* elem = (grpc_call_element*)user_data; + call_data* calld = (call_data*)elem->call_data; if (GRPC_ERROR_NONE == error) { grpc_mdelem md; @@ -75,7 +75,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, static void start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op) { - call_data* calld = elem->call_data; + call_data* calld = (call_data*)elem->call_data; // Inject callback for receiving initial metadata if (op->recv_initial_metadata) { @@ -103,7 +103,7 @@ static void start_transport_stream_op_batch( static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_element_args* args) { - call_data* calld = elem->call_data; + call_data* calld = (call_data*)elem->call_data; calld->next_recv_initial_metadata_ready = NULL; calld->workaround_active = false; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, diff --git a/src/core/ext/filters/workarounds/workaround_utils.c b/src/core/ext/filters/workarounds/workaround_utils.c index bc76753a8a..e600fbee67 100644 --- a/src/core/ext/filters/workarounds/workaround_utils.c +++ b/src/core/ext/filters/workarounds/workaround_utils.c @@ -33,7 +33,8 @@ grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md) { if (NULL != user_agent_md) { return user_agent_md; } - user_agent_md = gpr_malloc(sizeof(grpc_workaround_user_agent_md)); + user_agent_md = (grpc_workaround_user_agent_md *)gpr_malloc( + sizeof(grpc_workaround_user_agent_md)); for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) { if (ua_parser[i]) { user_agent_md->workaround_active[i] = ua_parser[i](md); diff --git a/src/core/lib/support/arena.c b/src/core/lib/support/arena.c index b433c61b4c..9e0f73ae3d 100644 --- a/src/core/lib/support/arena.c +++ b/src/core/lib/support/arena.c @@ -38,7 +38,7 @@ struct gpr_arena { gpr_arena *gpr_arena_create(size_t initial_size) { initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size); - gpr_arena *a = gpr_zalloc(sizeof(gpr_arena) + initial_size); + gpr_arena *a = (gpr_arena *)gpr_zalloc(sizeof(gpr_arena) + initial_size); a->initial_zone.size_end = initial_size; return a; } @@ -64,7 +64,7 @@ void *gpr_arena_alloc(gpr_arena *arena, size_t size) { zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm); if (next_z == NULL) { size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far); - next_z = gpr_zalloc(sizeof(zone) + next_z_size); + next_z = (zone *)gpr_zalloc(sizeof(zone) + next_z_size); next_z->size_begin = z->size_end; next_z->size_end = z->size_end + next_z_size; if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) { diff --git a/src/core/lib/support/atm.c b/src/core/lib/support/atm.c index caa0bafe33..2f37d62f76 100644 --- a/src/core/lib/support/atm.c +++ b/src/core/lib/support/atm.c @@ -21,12 +21,12 @@ gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta, gpr_atm min, gpr_atm max) { - gpr_atm current; - gpr_atm new; + gpr_atm current_value; + gpr_atm new_value; do { - current = gpr_atm_no_barrier_load(value); - new = GPR_CLAMP(current + delta, min, max); - if (new == current) break; - } while (!gpr_atm_no_barrier_cas(value, current, new)); - return new; + current_value = gpr_atm_no_barrier_load(value); + new_value = GPR_CLAMP(current_value + delta, min, max); + if (new_value == current_value) break; + } while (!gpr_atm_no_barrier_cas(value, current_value, new_value)); + return new_value; } diff --git a/src/core/lib/support/avl.c b/src/core/lib/support/avl.c index aa0f665272..a6178fdbce 100644 --- a/src/core/lib/support/avl.c +++ b/src/core/lib/support/avl.c @@ -76,7 +76,7 @@ static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; } gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left, gpr_avl_node *right) { - gpr_avl_node *node = gpr_malloc(sizeof(*node)); + gpr_avl_node *node = (gpr_avl_node *)gpr_malloc(sizeof(*node)); gpr_ref_init(&node->refs, 1); node->key = key; node->value = value; |