aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-09-11 12:09:03 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-09-11 12:09:03 -0700
commit4aac9a664a56b313bab725b7a6bf5da571e0c747 (patch)
tree615c37fa234acb13a02cea1f826bebc1fef21c32 /src
parent4f5acf73c9c2246eb03e66c880eb44f37d91b9cd (diff)
parent55c4b31389d5557b88d39bde6d783d68aa747de7 (diff)
Merge github.com:grpc/grpc into pollset_kick_stats
Diffstat (limited to 'src')
-rw-r--r--src/c-ares/CMakeLists.txt34
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.c7
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c133
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.c12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.c2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c42
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c33
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c13
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.c5
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.c2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c7
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c20
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c5
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.c6
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c28
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.c12
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.c7
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.c15
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.c18
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.c2
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c16
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.c22
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.c16
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.c2
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c8
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c17
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c269
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c9
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h29
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c12
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c68
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.c15
-rw-r--r--src/core/lib/channel/channel_args.c18
-rw-r--r--src/core/lib/channel/channel_stack_builder.c8
-rw-r--r--src/core/lib/channel/connected_channel.c25
-rw-r--r--src/core/lib/channel/handshaker.c14
-rw-r--r--src/core/lib/channel/handshaker_registry.c2
-rw-r--r--src/core/lib/debug/stats.c2
-rw-r--r--src/core/lib/debug/stats_data.c177
-rw-r--r--src/core/lib/debug/stats_data.h73
-rw-r--r--src/core/lib/debug/stats_data.yaml41
-rw-r--r--src/core/lib/debug/trace.c6
-rw-r--r--src/core/lib/http/format_request.c2
-rw-r--r--src/core/lib/http/httpcli.c13
-rw-r--r--src/core/lib/http/parser.c7
-rw-r--r--src/core/lib/iomgr/closure.c4
-rw-r--r--src/core/lib/iomgr/combiner.c7
-rw-r--r--src/core/lib/iomgr/error.c14
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.c6
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c14
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.c15
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c64
-rw-r--r--src/core/lib/iomgr/ev_posix.c4
-rw-r--r--src/core/lib/iomgr/executor.c178
-rw-r--r--src/core/lib/iomgr/executor.h7
-rw-r--r--src/core/lib/iomgr/load_file.c3
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c13
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c2
-rw-r--r--src/core/lib/iomgr/resource_quota.c36
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c9
-rw-r--r--src/core/lib/iomgr/tcp_posix.c179
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c11
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.c2
-rw-r--r--src/core/lib/iomgr/timer_heap.c8
-rw-r--r--src/core/lib/iomgr/timer_manager.c2
-rw-r--r--src/core/lib/iomgr/udp_server.c16
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.c3
-rw-r--r--src/core/lib/json/json.c2
-rw-r--r--src/core/lib/json/json_string.c26
-rw-r--r--src/core/lib/security/transport/security_handshaker.c36
-rw-r--r--src/core/lib/slice/b64.c4
-rw-r--r--src/core/lib/slice/slice.c26
-rw-r--r--src/core/lib/slice/slice_buffer.c9
-rw-r--r--src/core/lib/slice/slice_hash_table.c5
-rw-r--r--src/core/lib/slice/slice_intern.c13
-rw-r--r--src/core/lib/surface/alarm.c6
-rw-r--r--src/core/lib/surface/byte_buffer.c6
-rw-r--r--src/core/lib/surface/call.c35
-rw-r--r--src/core/lib/surface/channel.c6
-rw-r--r--src/core/lib/surface/channel_init.c12
-rw-r--r--src/core/lib/surface/channel_ping.c4
-rw-r--r--src/core/lib/surface/completion_queue.c25
-rw-r--r--src/core/lib/surface/server.c111
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/core/lib/transport/metadata.c11
-rw-r--r--src/core/lib/transport/service_config.c8
-rw-r--r--src/core/lib/transport/transport.c12
100 files changed, 1496 insertions, 816 deletions
diff --git a/src/c-ares/CMakeLists.txt b/src/c-ares/CMakeLists.txt
deleted file mode 100644
index 15bd5a2add..0000000000
--- a/src/c-ares/CMakeLists.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-# c-ares cmake file for gRPC
-#
-# This is currently very experimental, and unsupported.
-#
-# Copyright 2016 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-string(TOLOWER ${CMAKE_SYSTEM_NAME} cares_system_name)
-
-include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares)
-include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/cares)
-
-if(${cares_system_name} MATCHES windows)
- add_definitions(-DCARES_STATICLIB=1)
- add_definitions(-DWIN32_LEAN_AND_MEAN=1)
-else()
- include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/config_${cares_system_name})
- add_definitions(-DHAVE_CONFIG_H=1)
- add_definitions(-D_GNU_SOURCE=1)
-endif()
-
-file(GLOB lib_sources ../../third_party/cares/cares/*.c)
-add_library(cares ${lib_sources})
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c
index 0a9e90d12e..e5f6fa76ae 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.c
@@ -87,7 +87,7 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
int delete = 0;
- state_watcher *w = pw;
+ state_watcher *w = (state_watcher *)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@@ -203,7 +203,7 @@ void grpc_channel_watch_connectivity_state(
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- state_watcher *w = gpr_malloc(sizeof(*w));
+ state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -228,7 +228,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
- watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg *wa =
+ (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index c24e52ec1d..dad5c4fce5 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -148,7 +148,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
- method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ method_parameters *value =
+ (method_parameters *)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -254,7 +255,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- lb_policy_connectivity_watcher *w = arg;
+ lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@@ -281,7 +282,8 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
- lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ lb_policy_connectivity_watcher *w =
+ (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -310,7 +312,8 @@ typedef struct {
} service_config_parsing_state;
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
- service_config_parsing_state *parsing_state = arg;
+ service_config_parsing_state *parsing_state =
+ (service_config_parsing_state *)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -365,7 +368,7 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
@@ -391,7 +394,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
+ grpc_lb_addresses *addresses =
+ (grpc_lb_addresses *)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -586,9 +590,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- grpc_transport_op *op = arg;
- grpc_channel_element *elem = op->handler_private.extra_arg;
- channel_data *chand = elem->channel_data;
+ grpc_transport_op *op = (grpc_transport_op *)arg;
+ grpc_channel_element *elem =
+ (grpc_channel_element *)op->handler_private.extra_arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -642,7 +647,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -662,7 +667,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@@ -682,7 +687,7 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -713,7 +718,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(arg->value.pointer.p);
- chand->client_channel_factory = arg->value.pointer.p;
+ chand->client_channel_factory =
+ (grpc_client_channel_factory *)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@@ -745,7 +751,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resolver *resolver = arg;
+ grpc_resolver *resolver = (grpc_resolver *)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
@@ -753,7 +759,7 @@ static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
@@ -848,7 +854,7 @@ typedef struct client_channel_call_data {
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
return calld->subchannel_call;
}
@@ -868,7 +874,7 @@ static void waiting_for_pick_batches_add(
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@@ -882,7 +888,7 @@ static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
@@ -912,7 +918,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
// This is called via the call combiner, so access to calld is synchronized.
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *ignored) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@@ -924,8 +930,8 @@ static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
@@ -950,8 +956,8 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -961,7 +967,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
+ calld->method_params = (method_parameters *)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@@ -984,8 +990,8 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
@@ -1013,8 +1019,8 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
if (calld->connected_subchannel == NULL) {
@@ -1057,15 +1063,16 @@ typedef struct {
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- pick_after_resolver_result_args *args = arg;
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
if (args->finished) {
gpr_free(args);
return;
}
args->finished = true;
grpc_call_element *elem = args->elem;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
// If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
@@ -1091,7 +1098,8 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- pick_after_resolver_result_args *args = arg;
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1102,8 +1110,8 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
}
args->finished = true;
grpc_call_element *elem = args->elem;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1123,8 +1131,8 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
@@ -1148,9 +1156,9 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
// holding the call combiner.
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
@@ -1167,9 +1175,9 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Unrefs the LB policy and invokes subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1186,8 +1194,8 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_lb_policy_pick_args *inputs) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@@ -1222,8 +1230,8 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
bool pick_done = false;
if (chand->lb_policy != NULL) {
apply_service_config_to_call_locked(exec_ctx, elem);
@@ -1295,8 +1303,8 @@ static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1317,8 +1325,8 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
@@ -1411,8 +1419,8 @@ done:
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@@ -1432,8 +1440,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@@ -1467,7 +1475,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->pollent = pollent;
}
@@ -1491,7 +1499,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@@ -1505,7 +1513,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1576,7 +1584,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1593,7 +1601,7 @@ int grpc_client_channel_num_external_connectivity_watchers(
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- external_connectivity_watcher *w = arg;
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
@@ -1606,7 +1614,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- external_connectivity_watcher *w = arg;
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
external_connectivity_watcher *found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
@@ -1635,8 +1643,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *closure, grpc_closure *watcher_timer_init) {
- channel_data *chand = elem->channel_data;
- external_connectivity_watcher *w = gpr_zalloc(sizeof(*w));
+ channel_data *chand = (channel_data *)elem->channel_data;
+ external_connectivity_watcher *w =
+ (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.c b/src/core/ext/filters/client_channel/http_connect_handshaker.c
index 0952dc6d4e..418bb41ef6 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.c
@@ -124,7 +124,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@@ -145,7 +145,7 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@@ -281,7 +281,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
- headers = gpr_malloc(sizeof(grpc_http_header) * num_header_strings);
+ headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
+ num_header_strings);
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == NULL) {
@@ -308,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
request.host = server_name;
- request.http.method = "CONNECT";
+ request.http.method = (char*)"CONNECT";
request.http.path = server_name;
request.http.hdrs = headers;
request.http.hdr_count = num_headers;
@@ -333,7 +334,8 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_do_handshake};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
- http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
+ http_connect_handshaker* handshaker =
+ (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.c
index dd95a135cf..8e6673d737 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.c
@@ -67,7 +67,7 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_lb_policy *policy = arg;
+ grpc_lb_policy *policy = (grpc_lb_policy *)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
index 299f26b4de..bd290464c8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
@@ -49,7 +49,7 @@ typedef struct {
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -59,7 +59,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -70,7 +70,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
@@ -84,7 +84,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@@ -98,7 +98,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index 087b4076e2..5aafed1374 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -181,7 +181,7 @@ typedef struct wrapped_rr_closure_arg {
* order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_rr_closure_arg *wc_arg = arg;
+ wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@@ -245,7 +245,7 @@ static void add_pending_pick(pending_pick **root,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
grpc_closure *on_complete) {
- pending_pick *pp = gpr_zalloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -271,7 +271,7 @@ typedef struct pending_ping {
} pending_ping;
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = gpr_zalloc(sizeof(*pping));
+ pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -671,7 +671,7 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
GPR_ASSERT(addresses != NULL);
- grpc_lb_policy_args *args = gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@@ -798,7 +798,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- rr_connectivity_data *rr_connectivity = arg;
+ rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -841,8 +841,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void *a, void *b) {
- const char *a_str = a;
- const char *b_str = b;
+ const char *a_str = (const char *)a;
+ const char *b_str = (const char *)b;
return strcmp(a_str, b_str);
}
@@ -929,14 +929,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
- grpc_lb_addresses *addresses = arg->value.pointer.p;
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
- glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
+ glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
@@ -1190,7 +1190,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg *wc_arg =
+ (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@@ -1273,7 +1274,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@@ -1313,7 +1314,7 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -1520,7 +1521,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
@@ -1533,7 +1534,7 @@ static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@@ -1652,7 +1653,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
@@ -1667,7 +1668,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
@@ -1730,8 +1731,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
- glb_policy->pending_update_args =
- gpr_zalloc(sizeof(*glb_policy->pending_update_args));
+ glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
+ sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
@@ -1759,7 +1760,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
(void *)glb_policy);
}
}
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
@@ -1792,7 +1794,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
index 5b62623145..903120ca7d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
@@ -42,7 +42,8 @@ struct grpc_grpclb_client_stats {
};
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
- grpc_grpclb_client_stats* client_stats = gpr_zalloc(sizeof(*client_stats));
+ grpc_grpclb_client_stats* client_stats =
+ (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@@ -88,7 +89,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == NULL) {
client_stats->drop_token_counts =
- gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts));
+ (grpc_grpclb_dropped_call_counts*)gpr_zalloc(
+ sizeof(grpc_grpclb_dropped_call_counts));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@@ -103,9 +105,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
- drop_token_counts->token_counts =
- gpr_realloc(drop_token_counts->token_counts,
- new_num_entries * sizeof(grpc_grpclb_drop_token_count));
+ drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
+ drop_token_counts->token_counts,
+ new_num_entries * sizeof(grpc_grpclb_drop_token_count));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
index 6fa29f326e..407bd18adb 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
@@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- grpc_grpclb_serverlist *sl = *arg;
+ grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -46,9 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- decode_serverlist_arg *dec_arg = *arg;
+ decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
- grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
+ grpc_grpclb_server *server =
+ (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -59,7 +60,8 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
}
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
- grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -78,14 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- char *str = *arg;
+ char *str = (char *)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
return pb_encode_string(stream, (uint8_t *)str, strlen(str));
}
static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- grpc_grpclb_dropped_call_counts *drop_entries = *arg;
+ grpc_grpclb_dropped_call_counts *drop_entries =
+ (grpc_grpclb_dropped_call_counts *)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -104,7 +107,8 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats) {
- grpc_grpclb_request *req = gpr_zalloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -179,7 +183,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *sl =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -193,7 +198,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = gpr_zalloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ sl->servers = (grpc_grpclb_server **)gpr_zalloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -226,13 +232,16 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *copy =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
- copy->servers = gpr_malloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ copy->servers = (grpc_grpclb_server **)gpr_malloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
- copy->servers[i] = gpr_malloc(sizeof(grpc_grpclb_server));
+ copy->servers[i] =
+ (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
index a50ba09bf5..fab3073eb9 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
@@ -217,7 +217,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pp = gpr_malloc(sizeof(*pp));
+ pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -314,7 +314,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
if (addresses->num_addresses == 0) {
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
@@ -392,7 +393,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
- p->pending_update_args = gpr_zalloc(sizeof(*p->pending_update_args));
+ p->pending_update_args =
+ (grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args));
p->pending_update_args->client_channel_factory =
args->client_channel_factory;
p->pending_update_args->args = grpc_channel_args_copy(args->args);
@@ -456,7 +458,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- pick_first_lb_policy *p = arg;
+ pick_first_lb_policy *p = (pick_first_lb_policy *)arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
@@ -678,7 +680,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
index 866fb9a1eb..be91d3d651 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
@@ -144,10 +144,11 @@ struct rr_subchannel_list {
static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
size_t num_subchannels) {
- rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list));
+ rr_subchannel_list *subchannel_list =
+ (rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
subchannel_list->policy = p;
subchannel_list->subchannels =
- gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
+ (subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
subchannel_list->num_subchannels = num_subchannels;
gpr_ref_init(&subchannel_list->refcount, 1);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
@@ -452,7 +453,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = gpr_malloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -553,7 +554,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- subchannel_data *sd = arg;
+ subchannel_data *sd = (subchannel_data *)arg;
round_robin_lb_policy *p = sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
@@ -754,7 +755,7 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- grpc_lb_addresses *addresses = arg->value.pointer.p;
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
rr_subchannel_list *subchannel_list =
rr_subchannel_list_create(p, addresses->num_addresses);
if (addresses->num_addresses == 0) {
@@ -887,7 +888,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- round_robin_lb_policy *p = gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c
index 538d8d65ed..cdcaf17544 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.c
@@ -28,11 +28,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
- grpc_lb_addresses* addresses = gpr_zalloc(sizeof(grpc_lb_addresses));
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
- addresses->addresses = gpr_zalloc(addresses_size);
+ addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
return addresses;
}
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.c b/src/core/ext/filters/client_channel/proxy_mapper_registry.c
index 5f43a0596a..09967eea3c 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.c
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
- list->list = gpr_realloc(
+ list->list = (grpc_proxy_mapper**)gpr_realloc(
list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
index f1480bb1ae..b87a3b7082 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
@@ -144,7 +144,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@@ -227,7 +227,7 @@ static char *choose_service_config(char *service_config_choice_json) {
static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
@@ -363,7 +363,8 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
const char *path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
- ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
+ ares_dns_resolver *r =
+ (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
index b696344eab..9747d39a16 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -111,7 +111,7 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
- *ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
+ *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@@ -178,7 +178,7 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
fdn->readable_registered = false;
@@ -205,7 +205,7 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
fdn->writable_registered = false;
@@ -251,7 +251,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (fdn == NULL) {
char *fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = gpr_malloc(sizeof(fd_node));
+ fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
index e65723a63b..0d71f3560e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -158,9 +158,9 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses =
- gpr_realloc((*lb_addresses)->addresses,
- sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
+ (*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@@ -293,12 +293,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
- *r->service_config_json_out = gpr_realloc(
+ *r->service_config_json_out = (char *)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@@ -360,7 +360,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- grpc_ares_request *r = gpr_zalloc(sizeof(grpc_ares_request));
+ grpc_ares_request *r =
+ (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -502,10 +503,11 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
- *resolved_addresses = gpr_zalloc(sizeof(grpc_resolved_addresses));
+ *resolved_addresses =
+ (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = gpr_zalloc(sizeof(grpc_resolved_address) *
- (*resolved_addresses)->naddrs);
+ (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
+ sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
index 3ff081a514..c4676e0299 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
@@ -159,7 +159,7 @@ typedef struct set_response_closure_arg {
static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- set_response_closure_arg* closure_arg = arg;
+ set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
@@ -178,7 +178,8 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
- set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg));
+ set_response_closure_arg* closure_arg =
+ (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
closure_arg->generator = generator;
closure_arg->next_response = grpc_channel_args_copy(next_response);
GRPC_CLOSURE_SCHED(exec_ctx,
diff --git a/src/core/ext/filters/client_channel/retry_throttle.c b/src/core/ext/filters/client_channel/retry_throttle.c
index 0c7a3ae651..6cd6654b6f 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.c
+++ b/src/core/ext/filters/client_channel/retry_throttle.c
@@ -139,12 +139,14 @@ static long compare_server_name(void* key1, void* key2, void* unused) {
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
return grpc_server_retry_throttle_data_ref(throttle_data);
}
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 6b5b383efd..05c55aaa89 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -157,7 +157,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_connected_subchannel *c = arg;
+ grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
@@ -181,7 +181,7 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
@@ -290,21 +290,23 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
- c = gpr_zalloc(sizeof(*c));
+ c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
+ c->filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(grpc_channel_filter *) * c->num_filters);
memcpy((void *)c->filters, args->filters,
sizeof(grpc_channel_filter *) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
- grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
+ grpc_resolved_address *addr =
+ (grpc_resolved_address *)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
@@ -400,7 +402,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- external_state_watcher *w = arg;
+ external_state_watcher *w = (external_state_watcher *)arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
@@ -416,7 +418,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -501,7 +503,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = gpr_malloc(sizeof(*w));
+ w = (external_state_watcher *)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -533,7 +535,7 @@ void grpc_connected_subchannel_process_transport_op(
static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
grpc_error *error) {
- state_watcher *sw = p;
+ state_watcher *sw = (state_watcher *)p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@@ -623,7 +625,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@@ -660,7 +662,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
@@ -696,7 +698,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_subchannel_call *c = call;
+ grpc_subchannel_call *c = (grpc_subchannel_call *)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
@@ -750,7 +752,7 @@ grpc_error *grpc_connected_subchannel_create_call(
const grpc_connected_subchannel_call_args *args,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_arena_alloc(
+ *call = (grpc_subchannel_call *)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.c
index ababd05d84..f57b631c41 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.c
+++ b/src/core/ext/filters/client_channel/subchannel_index.c
@@ -43,11 +43,11 @@ static bool g_force_creation = false;
static grpc_subchannel_key *create_key(
const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
- grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
+ grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters =
- gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(*k->args.filters) * k->args.filter_count);
memcpy((grpc_channel_filter *)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
@@ -137,7 +137,7 @@ grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- gpr_avl_get(index, key, exec_ctx), "index_find");
+ (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
@@ -159,7 +159,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = gpr_avl_get(index, key, exec_ctx);
+ c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -207,7 +207,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel *c = gpr_avl_get(index, key, exec_ctx);
+ grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;
diff --git a/src/core/ext/filters/client_channel/uri_parser.c b/src/core/ext/filters/client_channel/uri_parser.c
index e841928760..fb4fb8e694 100644
--- a/src/core/ext/filters/client_channel/uri_parser.c
+++ b/src/core/ext/filters/client_channel/uri_parser.c
@@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = gpr_malloc(pfx_len + 1);
+ line_prefix = (char *)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -156,7 +156,8 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
- uri->query_parts_values = gpr_malloc(uri->num_query_parts * sizeof(char **));
+ uri->query_parts_values =
+ (char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char **query_param_parts;
size_t num_query_param_parts;
@@ -269,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
- uri = gpr_zalloc(sizeof(*uri));
+ uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c
index 565b0679dc..1aed488077 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.c
@@ -38,7 +38,7 @@
// filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* ignored) {
- grpc_deadline_state* deadline_state = arg;
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
"got on_complete from cancel_stream batch");
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
@@ -48,8 +48,8 @@ static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
// synchronized.
static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_call_element* elem = arg;
- grpc_deadline_state* deadline_state = elem->call_data;
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
deadline_state, grpc_schedule_on_exec_ctx));
@@ -160,8 +160,10 @@ struct start_timer_after_init_state {
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- struct start_timer_after_init_state* state = arg;
- grpc_deadline_state* deadline_state = state->elem->call_data;
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)arg;
+ grpc_deadline_state* deadline_state =
+ (grpc_deadline_state*)state->elem->call_data;
if (!state->in_call_combiner) {
// We are initially called without holding the call combiner, so we
// need to bounce ourselves into it.
@@ -195,7 +197,8 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
- struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state));
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index 2d7429c41e..6208089f2e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -139,8 +139,8 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -154,8 +154,8 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@@ -234,7 +234,7 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
- char *payload_bytes = gpr_malloc(slice_buffer->length + 1);
+ char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -300,8 +300,8 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
static void hc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
if (batch->recv_initial_metadata) {
@@ -536,7 +536,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -552,7 +552,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}
diff --git a/src/core/ext/filters/http/http_filters_plugin.c b/src/core/ext/filters/http/http_filters_plugin.c
index a5c1b92054..88bd2250f9 100644
--- a/src/core/ext/filters/http/http_filters_plugin.c
+++ b/src/core/ext/filters/http/http_filters_plugin.c
@@ -44,7 +44,7 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter *filtarg = arg;
+ optional_filter *filtarg = (optional_filter *)arg;
const grpc_channel_args *channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index 98a503cafc..0145dffab0 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -82,8 +82,8 @@ typedef struct channel_data {
static bool skip_compression(grpc_call_element *elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
@@ -106,8 +106,8 @@ static grpc_error *process_send_initial_metadata(
static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
*has_compression_algorithm = false;
grpc_stream_compression_algorithm stream_compression_algorithm =
GRPC_STREAM_COMPRESS_NONE;
@@ -285,7 +285,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (calld->send_message_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
@@ -374,7 +374,7 @@ static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) {
@@ -473,7 +473,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error);
}
@@ -482,7 +482,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *channeld = elem->channel_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
/* Configuration for message compression */
channeld->enabled_algorithms_bitset =
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c
index a10e69ba59..554a7f530d 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.c
@@ -94,7 +94,7 @@ static void add_error(const char *error_name, grpc_error **cumulative,
static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_metadata_batch *b) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_error *error = GRPC_ERROR_NONE;
static const char *error_name = "Failed processing incoming headers";
@@ -263,8 +263,8 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -276,8 +276,8 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
@@ -296,8 +296,8 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be
// returned in hs_on_complete callback.
@@ -314,7 +314,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->send_initial_metadata) {
grpc_error *error = GRPC_ERROR_NONE;
@@ -376,7 +376,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
static void hs_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
@@ -393,7 +393,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
@@ -410,7 +410,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.c b/src/core/ext/filters/load_reporting/server_load_reporting_filter.c
index 7b8cf986f7..ca8a3b2a13 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.c
@@ -56,8 +56,8 @@ typedef struct channel_data {
static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@@ -88,7 +88,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -111,7 +111,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -141,7 +141,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -176,8 +176,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_mdelem md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -189,7 +189,7 @@ static void lr_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
index 199cb883b3..f56afd27d2 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
@@ -41,7 +41,7 @@ static bool maybe_add_server_load_reporting_filter(
grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_channel_filter *filter = arg;
+ const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
grpc_channel_stack_builder_iterator *it =
grpc_channel_stack_builder_iterator_find(builder, filter->name);
const bool already_has_load_reporting_filter =
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index 983691bbad..0ec9353c04 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -93,8 +93,8 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- chttp2_connector *c = args->user_data;
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ chttp2_connector *c = (chttp2_connector *)args->user_data;
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
@@ -143,7 +143,7 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- chttp2_connector *c = arg;
+ chttp2_connector *c = (chttp2_connector *)arg;
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting);
c->connecting = false;
@@ -198,7 +198,7 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_connect};
grpc_connector *grpc_chttp2_connector_create() {
- chttp2_connector *c = gpr_zalloc(sizeof(*c));
+ chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index f207155900..d7add0538b 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -60,8 +60,9 @@ typedef struct {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- server_connection_state *connection_state = args->user_data;
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ server_connection_state *connection_state =
+ (server_connection_state *)args->user_data;
gpr_mu_lock(&connection_state->server_state->mu);
if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
const char *error_str = grpc_error_string(error);
@@ -108,7 +109,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
if (state->shutdown) {
gpr_mu_unlock(&state->mu);
@@ -143,7 +144,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
void *arg, grpc_pollset **pollsets,
size_t pollset_count) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = false;
gpr_mu_unlock(&state->mu);
@@ -153,7 +154,7 @@ static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
@@ -178,7 +179,7 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
grpc_server *server, void *arg,
grpc_closure *destroy_done) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = true;
state->server_destroy_listener_done = destroy_done;
@@ -208,7 +209,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = gpr_zalloc(sizeof(*state));
+ state = (server_state *)gpr_zalloc(sizeof(*state));
GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
@@ -225,7 +226,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&state->mu);
const size_t naddrs = resolved->naddrs;
- errors = gpr_malloc(sizeof(*errors) * naddrs);
+ errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =
grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 7ff77b7cd9..3fd701fe2f 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -84,8 +84,6 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
GRPC_TRACER_INITIALIZER(false, "chttp2_refcount");
#endif
-static const grpc_transport_vtable vtable;
-
/* forward declarations of various callbacks that we'll build closures around */
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@@ -248,6 +246,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
+static const grpc_transport_vtable *get_vtable(void);
+
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, bool is_client) {
@@ -257,7 +257,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
- t->base.vtable = &vtable;
+ t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
@@ -557,11 +557,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
- GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
- t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
- ? grpc_executor_scheduler
- : grpc_schedule_on_exec_ctx);
-
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.is_delayed_ping_timer_set = false;
@@ -589,7 +584,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@@ -715,7 +710,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@@ -799,7 +794,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id) {
- return grpc_chttp2_stream_map_find(&t->stream_map, id);
+ return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
@@ -858,6 +853,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
+ t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED(
exec_ctx,
@@ -876,52 +872,100 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-void grpc_chttp2_become_writable(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type stream_write_type, const char *reason) {
+void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ bool also_initiate_write, const char *reason) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
- switch (stream_write_type) {
- case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
+ if (also_initiate_write) {
+ grpc_chttp2_initiate_write(exec_ctx, t, reason);
+ }
+}
+
+static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
+ bool early_results_scheduled,
+ bool partial_write) {
+ /* if it's not the first write in a batch, always offload to the executor:
+ we'll probably end up queuing against the kernel anyway, so we'll likely
+ get better latency overall if we switch writing work elsewhere and continue
+ with application work above */
+ if (!t->is_first_write_in_batch) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ }
+ /* equivalently, if it's a partial write, we *know* we're going to be taking a
+ thread jump to write it because of the above, may as well do so
+ immediately */
+ if (partial_write) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
+ switch (t->opt_target) {
+ case GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT:
+ /* executor gives us the largest probability of being able to batch a
+ * write with others on this transport */
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY:
+ return grpc_schedule_on_exec_ctx;
+ }
+ GPR_UNREACHABLE_CODE(return NULL);
+}
+
+#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
+static const char *begin_writing_desc(bool partial, bool inlined) {
+ switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
+ case WRITE_STATE_TUPLE_TO_INT(false, false):
+ return "begin write in background";
+ case WRITE_STATE_TUPLE_TO_INT(false, true):
+ return "begin write in current thread";
+ case WRITE_STATE_TUPLE_TO_INT(true, false):
+ return "begin partial write in background";
+ case WRITE_STATE_TUPLE_TO_INT(true, true):
+ return "begin partial write in current thread";
+ }
+ GPR_UNREACHABLE_CODE(return "bad state tuple");
}
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
- switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
- : grpc_chttp2_begin_write(exec_ctx, t)) {
- case GRPC_CHTTP2_NOTHING_TO_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
- "begin writing nothing");
- GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
- break;
- case GRPC_CHTTP2_PARTIAL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- "begin writing partial");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
- case GRPC_CHTTP2_FULL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "begin writing");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
+ grpc_chttp2_begin_write_result r;
+ if (t->closed) {
+ r.writing = false;
+ } else {
+ r = grpc_chttp2_begin_write(exec_ctx, t);
+ }
+ if (r.writing) {
+ if (r.partial) {
+ GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
+ }
+ if (!t->is_first_write_in_batch) {
+ GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
+ }
+ grpc_closure_scheduler *scheduler =
+ write_scheduler(t, r.early_results_scheduled, r.partial);
+ if (scheduler != grpc_schedule_on_exec_ctx) {
+ GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
+ }
+ set_write_state(
+ exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
+ : GRPC_CHTTP2_WRITE_STATE_WRITING,
+ begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
+ write_action, t, scheduler),
+ GRPC_ERROR_NONE);
+ } else {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "begin writing nothing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@@ -933,7 +977,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -958,7 +1002,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "continue writing [!covered]");
+ "continue writing");
+ t->is_first_write_in_batch = false;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_RUN(
exec_ctx,
@@ -1060,9 +1105,7 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "new_stream");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@@ -1111,12 +1154,14 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
- gpr_log(GPR_DEBUG,
- "complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
- closure,
- (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
- (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
- desc, errstr);
+ gpr_log(
+ GPR_DEBUG,
+ "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
+ "write_state=%s",
+ t, closure,
+ (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
+ (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc,
+ errstr, write_state_name(t->write_state));
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@@ -1157,9 +1202,7 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "op.send_message");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
}
@@ -1191,15 +1234,19 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
} else {
grpc_chttp2_write_cb *cb = t->write_cb_pool;
if (cb == NULL) {
- cb = gpr_malloc(sizeof(*cb));
+ cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
- cb->next = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb;
+ grpc_chttp2_write_cb **list =
+ s->fetching_send_message->flags & GRPC_WRITE_THROUGH
+ ? &s->on_write_finished_cbs
+ : &s->on_flow_controlled_cbs;
+ cb->next = *list;
+ *list = cb;
}
s->fetching_send_message = NULL;
return; /* early out */
@@ -1219,7 +1266,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
- grpc_chttp2_stream *s = gs;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
grpc_chttp2_transport *t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
@@ -1254,8 +1301,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
- grpc_transport_stream_op_batch *op = stream_op;
- grpc_chttp2_stream *s = op->handler_private.extra_arg;
+ grpc_transport_stream_op_batch *op =
+ (grpc_transport_stream_op_batch *)stream_op;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
@@ -1308,7 +1356,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if ((s->stream_compression_send_enabled =
(op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding != NULL)) == true) {
- s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
+ s->compressed_data_buffer =
+ (grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->compressed_data_buffer);
}
@@ -1355,14 +1404,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
} else {
GPR_ASSERT(s->id != 0);
- grpc_chttp2_stream_write_type write_type =
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
+ bool initiate_write = true;
if (op->send_message &&
(op->payload->send_message.send_message->flags &
GRPC_WRITE_BUFFER_HINT)) {
- write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
+ initiate_write = false;
}
- grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
+ grpc_chttp2_become_writable(exec_ctx, t, s, initiate_write,
"op.send_initial_metadata");
}
} else {
@@ -1471,8 +1519,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
"op.send_trailing_metadata");
}
}
@@ -1599,7 +1646,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
}
@@ -1651,8 +1698,9 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
- grpc_transport_op *op = stream_op;
- grpc_chttp2_transport *t = op->handler_private.extra_arg;
+ grpc_transport_op *op = (grpc_transport_op *)stream_op;
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->goaway_error) {
@@ -1864,7 +1912,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@@ -1995,6 +2044,21 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
+static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
+ grpc_error *error) {
+ while (*list) {
+ grpc_chttp2_write_cb *cb = *list;
+ *list = cb->next;
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
+ GRPC_ERROR_REF(error),
+ "on_write_finished_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
@@ -2014,16 +2078,9 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
- while (s->on_write_finished_cbs) {
- grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb->next;
- grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
- GRPC_ERROR_REF(error),
- "on_write_finished_cb");
- cb->next = t->write_cb_pool;
- t->write_cb_pool = cb;
- }
- GRPC_ERROR_UNREF(error);
+ flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
+ GRPC_ERROR_REF(error));
+ flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
@@ -2242,8 +2299,8 @@ typedef struct {
} cancel_stream_cb_args;
static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
- cancel_stream_cb_args *args = user_data;
- grpc_chttp2_stream *s = stream;
+ cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
@@ -2267,13 +2324,11 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
"immediate stream flowctl");
break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
+ grpc_chttp2_become_writable(exec_ctx, t, s, false,
"queue stream flowctl");
break;
}
@@ -2345,7 +2400,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
GRPC_ERROR_REF(error);
@@ -2386,9 +2441,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->flow_control.initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "unstalled");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "unstalled");
}
}
t->flow_control.initial_window_update = 0;
@@ -2430,7 +2483,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
}
@@ -2443,7 +2496,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
@@ -2492,7 +2545,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || t->closed) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2524,7 +2577,7 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(
exec_ctx, &t->keepalive_watchdog_timer,
@@ -2534,7 +2587,7 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
@@ -2551,7 +2604,7 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2632,7 +2685,8 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = argp;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)argp;
grpc_chttp2_transport *t = bs->transport;
grpc_chttp2_stream *s = bs->stream;
@@ -2842,7 +2896,8 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = byte_stream;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
grpc_chttp2_transport *t = s->t;
@@ -2898,7 +2953,7 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@@ -2928,11 +2983,12 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@@ -2976,10 +3032,13 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
destroy_transport,
chttp2_get_endpoint};
+static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
+
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
- grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index 4bce84f21c..78ec08e177 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -46,7 +46,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
gpr_free(p->debug_data);
p->debug_length = length - 8;
- p->debug_data = gpr_malloc(p->debug_length);
+ p->debug_data = (char *)gpr_malloc(p->debug_length);
p->debug_pos = 0;
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_ERROR_NONE;
@@ -60,7 +60,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_goaway_parser *p = parser;
+ grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser;
switch (p->state) {
case GRPC_CHTTP2_GOAWAY_LSI0:
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 3d7c6fbfad..582fd7bfaa 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -75,7 +75,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_ping_parser *p = parser;
+ grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
@@ -113,7 +113,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (!g_disable_ping_ack) {
if (t->ping_ack_count == t->ping_ack_capacity) {
t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3);
- t->ping_acks = gpr_realloc(
+ t->ping_acks = (uint64_t *)gpr_realloc(
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index 689dc8935c..0133b6efa2 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -77,7 +77,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_rst_stream_parser *p = parser;
+ grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 057d3d9ed3..806100adaa 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -111,7 +111,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_settings_parser *parser = p;
+ grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p;
const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
const uint8_t *end = GRPC_SLICE_END_PTR(slice);
char *msg;
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 65f3b01d77..c94f7725bf 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -70,7 +70,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_window_update_parser *p = parser;
+ grpc_chttp2_window_update_parser *p =
+ (grpc_chttp2_window_update_parser *)parser;
while (p->byte != 4 && cur != end) {
p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
@@ -98,9 +99,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_chttp2_flowctl_recv_stream_update(
&t->flow_control, &s->flow_control, received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "stream.read_flow_control");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
+ "stream.read_flow_control");
}
}
} else {
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index a0e748e7b1..3cd1a7ee5c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -536,7 +536,7 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size =
- gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ (uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
@@ -564,7 +564,8 @@ void grpc_chttp2_hpack_compressor_set_max_usable_size(
}
static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
- uint16_t *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
+ uint16_t *table_elem_size =
+ (uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint32_t i;
memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index c21d76ba71..82ff2c8e2c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -1284,7 +1284,7 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
str->data.copied.str =
- gpr_realloc(str->data.copied.str, str->data.copied.capacity);
+ (char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
memcpy(str->data.copied.str + str->data.copied.length, data, length);
GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
@@ -1643,7 +1643,7 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
@@ -1665,7 +1665,8 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
s->stream_compression_recv_enabled = true;
- s->decompressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
+ s->decompressed_data_buffer =
+ (grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->decompressed_data_buffer);
}
}
@@ -1677,7 +1678,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_hpack_parser *parser = hpack_parser;
+ grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 944d778011..bbd135a318 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -173,7 +173,7 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
tbl->max_entries = tbl->cap_entries =
entries_for_bytes(tbl->current_table_bytes);
- tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
@@ -228,7 +228,7 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
- grpc_mdelem *ents = gpr_malloc(sizeof(*ents) * new_cap);
+ grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 9fff30d54f..0fbedd1e56 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -262,6 +262,10 @@ struct grpc_chttp2_transport {
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
+ /** is this the first write in a series of writes?
+ set when we initiate writing from idle, cleared when we
+ initiate writing from writing+more */
+ bool is_first_write_in_batch;
/** is the transport destroying itself? */
uint8_t destroying;
@@ -483,6 +487,7 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
+ int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@@ -555,6 +560,7 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
+ grpc_chttp2_write_cb *on_flow_controlled_cbs;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
@@ -595,10 +601,13 @@ struct grpc_chttp2_stream {
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason);
-typedef enum {
- GRPC_CHTTP2_NOTHING_TO_WRITE,
- GRPC_CHTTP2_PARTIAL_WRITE,
- GRPC_CHTTP2_FULL_WRITE,
+typedef struct {
+ /** are we writing? */
+ bool writing;
+ /** if writing: was it a complete flush (false) or a partial flush (true) */
+ bool partial;
+ /** did we queue any completions as part of beginning the write */
+ bool early_results_scheduled;
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
@@ -840,22 +849,12 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
-typedef enum {
- /* don't initiate a transport write, but piggyback on the next one */
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
- /* initiate a covered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- /* initiate an uncovered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED
-} grpc_chttp2_stream_write_type;
-
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type type,
- const char *reason);
+ bool also_initiate_write, const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 19bd86fd0c..6c12c91365 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -106,7 +106,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
++cur;
- ++t->deframe_state;
+ t->deframe_state =
+ (grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state);
}
if (cur == end) {
return GRPC_ERROR_NONE;
@@ -402,7 +403,7 @@ static void free_timeout(void *p) { gpr_free(p); }
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
@@ -426,11 +427,12 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
- gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
+ gpr_timespec *cached_timeout =
+ (gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout);
gpr_timespec timeout;
if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
- cached_timeout = gpr_malloc(sizeof(gpr_timespec));
+ cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec));
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
@@ -482,7 +484,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.c
index e2f10bc208..650090d8f0 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.c
@@ -27,8 +27,8 @@
void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
- map->keys = gpr_malloc(sizeof(uint32_t) * initial_capacity);
- map->values = gpr_malloc(sizeof(void *) * initial_capacity);
+ map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity);
+ map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity);
map->count = 0;
map->free = 0;
map->capacity = initial_capacity;
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 5e9d97d485..fa224a49a4 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -123,15 +123,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
(t->ping_state.pings_before_data_required != 0);
}
-static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
- grpc_chttp2_write_cb **list, grpc_error *error) {
+ grpc_chttp2_write_cb **list, int64_t *ctr,
+ grpc_error *error) {
+ bool sched_any = false;
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
- s->flow_controlled_bytes_written += send_bytes;
+ *ctr += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
- if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
+ if (cb->call_at_byte <= *ctr) {
+ sched_any = true;
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
@@ -139,6 +142,7 @@ static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
cb = next;
}
GRPC_ERROR_UNREF(error);
+ return sched_any;
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
@@ -164,6 +168,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
+ /* stats histogram counters: we increment these throughout this function,
+ and at the end publish to the central stats histograms */
+ int flow_control_writes = 0;
+ int initial_metadata_writes = 0;
+ int trailing_metadata_writes = 0;
+ int message_writes = 0;
+
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
@@ -177,6 +188,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
+ GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
/* simple writes are queued to qbuf, and flushed here */
@@ -196,13 +208,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
}
- bool partial_write = false;
+ grpc_chttp2_begin_write_result result = {false, false, false};
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (true) {
if (t->outbuf.length > target_write_size(t)) {
- partial_write = true;
+ result.partial = true;
break;
}
@@ -246,7 +258,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
- now_writing = true;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
@@ -254,6 +265,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
+ initial_metadata_writes++;
} else {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
@@ -269,10 +281,15 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.content_type->md;
}
+ trailing_metadata_writes++;
}
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
+ result.early_results_scheduled = true;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
+ "send_initial_metadata_finished");
}
/* send any window updates */
uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
@@ -288,6 +305,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
gpr_inf_past(GPR_CLOCK_MONOTONIC);
t->ping_recv_state.ping_strikes = 0;
}
+ flow_control_writes++;
}
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
@@ -306,6 +324,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (max_outgoing > 0) {
bool is_last_data_frame = false;
bool is_last_frame = false;
+ size_t sending_bytes_before = s->sending_bytes;
if (s->stream_compression_send_enabled) {
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer->length > 0) &&
@@ -373,6 +392,11 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&s->stats.outgoing));
}
}
+ result.early_results_scheduled |=
+ update_list(exec_ctx, t, s,
+ (int64_t)(s->sending_bytes - sending_bytes_before),
+ &s->on_flow_controlled_cbs,
+ &s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
@@ -380,6 +404,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
+ message_writes++;
} else if (t->flow_control.remote_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
@@ -415,6 +440,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
+ trailing_metadata_writes++;
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
@@ -424,10 +450,22 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
now_writing = true;
+ result.early_results_scheduled = true;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_NONE, "send_trailing_metadata_finished");
}
}
if (now_writing) {
+ GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
+ exec_ctx, initial_metadata_writes);
+ GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes);
+ GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
+ exec_ctx, trailing_metadata_writes);
+ GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx,
+ flow_control_writes);
+
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
@@ -465,9 +503,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
- return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
- : GRPC_CHTTP2_FULL_WRITE)
- : GRPC_CHTTP2_NOTHING_TO_WRITE;
+ result.writing = t->outbuf.count > 0;
+ return result;
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -476,20 +513,13 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
- if (s->sent_initial_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_REF(error), "send_initial_metadata_finished");
- }
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
- &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
+ &s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
+ GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (s->sent_trailing_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_trailing_metadata_finished,
- GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}
diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c
index b2d6f2d0c9..036853a53b 100644
--- a/src/core/ext/transport/inproc/inproc_transport.c
+++ b/src/core/ext/transport/inproc/inproc_transport.c
@@ -120,7 +120,7 @@ static void slice_buffer_list_append_entry(slice_buffer_list *l,
}
static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) {
- sb_list_entry *next = gpr_malloc(sizeof(*next));
+ sb_list_entry *next = (sb_list_entry *)gpr_malloc(sizeof(*next));
grpc_slice_buffer_init(&next->sb);
slice_buffer_list_append_entry(l, next);
return &next->sb;
@@ -327,7 +327,8 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem *elem = metadata->list.head;
(elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
- grpc_linked_mdelem *nelem = gpr_arena_alloc(s->arena, sizeof(*nelem));
+ grpc_linked_mdelem *nelem =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem));
nelem->md = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@@ -531,12 +532,14 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
- grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
+ grpc_linked_mdelem *path_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md));
path_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
GRPC_ERROR_NONE);
- grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
+ grpc_linked_mdelem *auth_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md));
auth_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
@@ -1172,8 +1175,8 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
grpc_transport **client_transport,
const grpc_channel_args *client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
- inproc_transport *st = gpr_zalloc(sizeof(*st));
- inproc_transport *ct = gpr_zalloc(sizeof(*ct));
+ inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
+ inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
st->mu = ct->mu = gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 02db798b5c..16d0737b40 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
- grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *dst =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
+ dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
- grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
+ grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
- const grpc_arg *const *a = ap;
- const grpc_arg *const *b = bp;
+ const grpc_arg *const *a = (const grpc_arg *const *)ap;
+ const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
- grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
- grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *b =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
- b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index 7f2b8e07ce..df45f13c81 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
+ grpc_channel_stack_builder *b =
+ (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
- grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
+ grpc_channel_stack_builder_iterator *it =
+ (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@@ -212,7 +214,7 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
- filter_node *new = gpr_malloc(sizeof(*new));
+ filter_node *new = (filter_node *)gpr_malloc(sizeof(*new));
new->next = before->next;
new->prev = before;
new->next->prev = new->prev->next = new;
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 8285226fc4..4f37908958 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -100,8 +100,8 @@ static callback_state *get_state_for_batch(
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (batch->recv_initial_metadata) {
callback_state *state = &calld->recv_initial_metadata_ready;
intercept_callback(
@@ -136,7 +136,7 @@ static void con_start_transport_stream_op_batch(
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@@ -144,8 +144,8 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
@@ -158,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@@ -168,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@@ -218,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = t;
+ cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -226,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
- channel_stack->call_stack_size += grpc_transport_stream_size(t);
+ channel_stack->call_stack_size +=
+ grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@@ -240,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 2cb83f4114..1753da5721 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
- grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
+ grpc_handshake_manager* mgr =
+ (grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
- mgr->handshakers =
- gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
+ mgr->handshakers = (grpc_handshaker**)gpr_realloc(
+ mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
- mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
+ mgr->args.read_buffer =
+ (grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
diff --git a/src/core/lib/channel/handshaker_registry.c b/src/core/lib/channel/handshaker_registry.c
index 8c4bc3aa00..c6bc87d704 100644
--- a/src/core/lib/channel/handshaker_registry.c
+++ b/src/core/lib/channel/handshaker_registry.c
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
- list->list = gpr_realloc(
+ list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c
index 91ca0aa76e..4096384dd9 100644
--- a/src/core/lib/debug/stats.c
+++ b/src/core/lib/debug/stats.c
@@ -33,7 +33,7 @@ static size_t g_num_cores;
void grpc_stats_init(void) {
g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
grpc_stats_per_cpu_storage =
- gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
+ (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
}
void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c
index 969cac9d89..73e9dd3d5f 100644
--- a/src/core/lib/debug/stats_data.c
+++ b/src/core/lib/debug/stats_data.c
@@ -36,6 +36,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
+ "tcp_backup_pollers_created",
+ "tcp_backup_poller_polls",
"http2_op_batches",
"http2_op_cancel",
"http2_op_send_initial_metadata",
@@ -44,16 +46,22 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_op_recv_initial_metadata",
"http2_op_recv_message",
"http2_op_recv_trailing_metadata",
+ "http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
+ "http2_writes_offloaded",
+ "http2_writes_continued",
+ "http2_partial_writes",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
"combiner_locks_offloaded",
- "executor_scheduled_items",
+ "executor_scheduled_short_items",
+ "executor_scheduled_long_items",
"executor_scheduled_to_self",
"executor_wakeup_initiated",
"executor_queue_drained",
+ "executor_push_retries",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
@@ -75,6 +83,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
"process",
"Number of read syscalls (or equivalent - eg recvmsg) made by this process",
+ "Number of times a backup poller has been created (this can be expensive)",
+ "Number of polls performed on the backup poller",
"Number of batches received by HTTP2 transport",
"Number of cancelations received by HTTP2 transport",
"Number of batches containing send initial metadata",
@@ -83,21 +93,41 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
- "Number of HTTP2 pings sent by process", "Number of HTTP2 writes initiated",
+ "Number of settings frames sent", "Number of HTTP2 pings sent by process",
+ "Number of HTTP2 writes initiated",
+ "Number of HTTP2 writes offloaded to the executor from application threads",
+ "Number of HTTP2 writes that finished seeing more data needed to be "
+ "written",
+ "Number of HTTP2 writes that were made knowing there was still more data "
+ "to be written (we cap maximum write size to syscall_write)",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
"Number of final items scheduled against combiner locks",
"Number of combiner locks offloaded to different threads",
- "Number of closures scheduled against the executor (gRPC thread pool)",
+ "Number of finite runtime closures scheduled against the executor (gRPC "
+ "thread pool)",
+ "Number of potentially infinite runtime closures scheduled against the "
+ "executor (gRPC thread pool)",
"Number of closures scheduled by the executor to the executor",
"Number of thread wakeups initiated within the executor",
"Number of times an executor queue was drained",
+ "Number of times we raced and were forced to retry pushing a closure to "
+ "the executor",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
- "call_initial_size", "poll_events_returned", "tcp_write_size",
- "tcp_write_iov_size", "tcp_read_size", "tcp_read_offer",
- "tcp_read_offer_iov_size", "http2_send_message_size",
+ "call_initial_size",
+ "poll_events_returned",
+ "tcp_write_size",
+ "tcp_write_iov_size",
+ "tcp_read_size",
+ "tcp_read_offer",
+ "tcp_read_offer_iov_size",
+ "http2_send_message_size",
+ "http2_send_initial_metadata_per_write",
+ "http2_send_message_per_write",
+ "http2_send_trailing_metadata_per_write",
+ "http2_send_flowctl_per_write",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
@@ -108,6 +138,10 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
+ "Number of streams initiated written per TCP write",
+ "Number of streams whose payload was written per TCP write",
+ "Number of streams terminated per TCP write",
+ "Number of flow control updates written per TCP write",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
@@ -379,13 +413,128 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 64));
}
-const int grpc_stats_histo_buckets[8] = {64, 128, 64, 64, 64, 64, 64, 64};
-const int grpc_stats_histo_start[8] = {0, 64, 192, 256, 320, 384, 448, 512};
-const int *const grpc_stats_histo_bucket_boundaries[8] = {
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+const int grpc_stats_histo_buckets[12] = {64, 128, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64};
+const int grpc_stats_histo_start[12] = {0, 64, 192, 256, 320, 384,
+ 448, 512, 576, 640, 704, 768};
+const int *const grpc_stats_histo_bucket_boundaries[12] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
- grpc_stats_table_6, grpc_stats_table_4};
-void (*const grpc_stats_inc_histogram[8])(grpc_exec_ctx *exec_ctx, int x) = {
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6};
+void (*const grpc_stats_inc_histogram[12])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
@@ -393,4 +542,8 @@ void (*const grpc_stats_inc_histogram[8])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_tcp_read_size,
grpc_stats_inc_tcp_read_offer,
grpc_stats_inc_tcp_read_offer_iov_size,
- grpc_stats_inc_http2_send_message_size};
+ grpc_stats_inc_http2_send_message_size,
+ grpc_stats_inc_http2_send_initial_metadata_per_write,
+ grpc_stats_inc_http2_send_message_per_write,
+ grpc_stats_inc_http2_send_trailing_metadata_per_write,
+ grpc_stats_inc_http2_send_flowctl_per_write};
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index 27f0aa8ba2..080234716c 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -38,6 +38,8 @@ typedef enum {
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
@@ -46,16 +48,22 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
+ GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
- GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
+ GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
@@ -69,6 +77,10 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@@ -90,7 +102,15 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
- GRPC_STATS_HISTOGRAM_BUCKETS = 576
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_BUCKETS = 832
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@@ -119,6 +139,11 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
@@ -139,10 +164,18 @@ typedef enum {
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
+#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
+#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
+#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@@ -155,9 +188,12 @@ typedef enum {
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
-#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), \
- GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
@@ -166,6 +202,8 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
+#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
@@ -190,10 +228,27 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
-extern const int grpc_stats_histo_buckets[8];
-extern const int grpc_stats_histo_start[8];
-extern const int *const grpc_stats_histo_bucket_boundaries[8];
-extern void (*const grpc_stats_inc_histogram[8])(grpc_exec_ctx *exec_ctx,
+#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
+ (int)(value))
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
+extern const int grpc_stats_histo_buckets[12];
+extern const int grpc_stats_histo_start[12];
+extern const int *const grpc_stats_histo_bucket_boundaries[12];
+extern void (*const grpc_stats_inc_histogram[12])(grpc_exec_ctx *exec_ctx,
+ int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
index ddf1294a78..31f4618f48 100644
--- a/src/core/lib/debug/stats_data.yaml
+++ b/src/core/lib/debug/stats_data.yaml
@@ -78,6 +78,10 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_read
+- counter: tcp_backup_pollers_created
+ doc: Number of times a backup poller has been created (this can be expensive)
+- counter: tcp_backup_poller_polls
+ doc: Number of polls performed on the backup poller
# chttp2
- counter: http2_op_batches
doc: Number of batches received by HTTP2 transport
@@ -99,10 +103,36 @@
max: 16777216
buckets: 64
doc: Size of messages received by HTTP2 transport
+- histogram: http2_send_initial_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams initiated written per TCP write
+- histogram: http2_send_message_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams whose payload was written per TCP write
+- histogram: http2_send_trailing_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams terminated per TCP write
+- histogram: http2_send_flowctl_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of flow control updates written per TCP write
+- counter: http2_settings_writes
+ doc: Number of settings frames sent
- counter: http2_pings_sent
doc: Number of HTTP2 pings sent by process
- counter: http2_writes_begun
doc: Number of HTTP2 writes initiated
+- counter: http2_writes_offloaded
+ doc: Number of HTTP2 writes offloaded to the executor from application threads
+- counter: http2_writes_continued
+ doc: Number of HTTP2 writes that finished seeing more data needed to be
+ written
+- counter: http2_partial_writes
+ doc: Number of HTTP2 writes that were made knowing there was still more data
+ to be written (we cap maximum write size to syscall_write)
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
@@ -114,11 +144,18 @@
- counter: combiner_locks_offloaded
doc: Number of combiner locks offloaded to different threads
# executor
-- counter: executor_scheduled_items
- doc: Number of closures scheduled against the executor (gRPC thread pool)
+- counter: executor_scheduled_short_items
+ doc: Number of finite runtime closures scheduled against the executor
+ (gRPC thread pool)
+- counter: executor_scheduled_long_items
+ doc: Number of potentially infinite runtime closures scheduled against the
+ executor (gRPC thread pool)
- counter: executor_scheduled_to_self
doc: Number of closures scheduled by the executor to the executor
- counter: executor_wakeup_initiated
doc: Number of thread wakeups initiated within the executor
- counter: executor_queue_drained
doc: Number of times an executor queue was drained
+- counter: executor_push_retries
+ doc: Number of times we raced and were forced to retry pushing a closure to
+ the executor
diff --git a/src/core/lib/debug/trace.c b/src/core/lib/debug/trace.c
index c6c1853e20..7cb2789a19 100644
--- a/src/core/lib/debug/trace.c
+++ b/src/core/lib/debug/trace.c
@@ -39,7 +39,7 @@ static tracer *tracers;
#endif
void grpc_register_tracer(grpc_tracer_flag *flag) {
- tracer *t = gpr_malloc(sizeof(*t));
+ tracer *t = (tracer *)gpr_malloc(sizeof(*t));
t->flag = flag;
t->next = tracers;
TRACER_SET(*flag, false);
@@ -53,10 +53,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
diff --git a/src/core/lib/http/format_request.c b/src/core/lib/http/format_request.c
index f887726eea..88fb0ab0b6 100644
--- a/src/core/lib/http/format_request.c
+++ b/src/core/lib/http/format_request.c
@@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
- tmp = gpr_realloc(tmp, out_len + body_size);
+ tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 77af7b7c08..84cc39604c 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
- internal_request *req = user_data;
+ internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@@ -243,7 +243,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
- internal_request *req = gpr_malloc(sizeof(internal_request));
+ internal_request *req =
+ (internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index 9c5e93f4e5..0950bd655e 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
- char *out = gpr_malloc(length + 1);
+ char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
- *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
+ *hdrs = (grpc_http_header *)gpr_realloc(
+ *hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
- *body = gpr_realloc((void *)*body, parser->body_capacity);
+ *body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 26f9cbe0fa..7236e23cf7 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_closure *wc = arg;
+ wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
- wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 4c1503bddb..360967f3ba 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -74,14 +74,15 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_combiner *grpc_combiner_create(void) {
- grpc_combiner *lock = gpr_zalloc(sizeof(*lock));
+ grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
+ GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -193,7 +194,7 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
}
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_combiner *lock = arg;
+ grpc_combiner *lock = (grpc_combiner *)arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 3759dda992..dcd175a2e1 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
- *err = gpr_realloc(
+ *err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
- out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
+ out = (grpc_error *)gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -530,7 +531,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = gpr_realloc(*s, *cap);
+ *s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
- kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ kvs->kvs =
+ (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
- const kv_pair *ka = a;
- const kv_pair *kb = b;
+ const kv_pair *ka = (const kv_pair *)a;
+ const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c
index ef5dfd4429..322fa160d0 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.c
@@ -260,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@@ -442,8 +442,8 @@ static grpc_error *pollset_global_init(void) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
- g_neighbourhoods =
- gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
+ g_neighbourhoods = (pollset_neighbourhood *)gpr_zalloc(
+ sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_init(&g_neighbourhoods[i].mu);
}
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
index 165a562bd7..ea810d6429 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ b/src/core/lib/iomgr/ev_epollex_linux.c
@@ -279,7 +279,7 @@ static void ref_by(grpc_fd *fd, int n) {
}
static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_fd *fd = arg;
+ grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
pollable_destroy(&fd->pollable);
@@ -340,7 +340,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
pollable_init(&new_fd->pollable, PO_FD);
@@ -556,7 +556,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
- grpc_pollset *pollset = arg;
+ grpc_pollset *pollset = (grpc_pollset *)arg;
gpr_mu_lock(&pollset->pollable.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
@@ -1015,7 +1015,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_fd *fd = arg;
+ grpc_fd *fd = (grpc_fd *)arg;
UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
}
@@ -1084,7 +1084,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_zalloc(sizeof(*pss));
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
po_init(&pss->po, PO_POLLSET_SET);
return pss;
}
@@ -1246,7 +1246,7 @@ static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from,
static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
size_t initial_po_count) {
/* assumes all polling objects in initial_po are locked */
- polling_group *pg = gpr_malloc(sizeof(*pg));
+ polling_group *pg = (polling_group *)gpr_malloc(sizeof(*pg));
po_init(&pg->po, PO_POLLING_GROUP);
gpr_ref_init(&pg->refs, (int)initial_po_count);
for (size_t i = 0; i < initial_po_count; i++) {
@@ -1356,7 +1356,7 @@ static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
gpr_mu_lock(&po->mu);
if (unref_count == unref_cap) {
unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
- unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
+ unref = (polling_group **)gpr_realloc(unref, unref_cap * sizeof(*unref));
}
unref[unref_count++] = po->group;
po->group = pg_ref(a);
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c
index 6cc9645526..f5d00cbc8c 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.c
@@ -363,7 +363,8 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
+ pi->fds =
+ (grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
@@ -466,7 +467,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
- pi = gpr_malloc(sizeof(*pi));
+ pi = (polling_island *)gpr_malloc(sizeof(*pi));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -810,7 +811,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
@@ -1274,7 +1275,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
- grpc_fd *fd = data_ptr;
+ grpc_fd *fd = (grpc_fd *)data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@@ -1570,7 +1571,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifndef NDEBUG
@@ -1648,8 +1649,8 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
}
bool grpc_are_polling_islands_equal(void *p, void *q) {
- polling_island *p1 = p;
- polling_island *p2 = q;
+ polling_island *p1 = (polling_island *)p;
+ polling_island *p2 = (polling_island *)q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 78817144af..15968d23ba 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -327,7 +327,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *r = gpr_malloc(sizeof(*r));
+ grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@@ -846,8 +846,8 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
- pollset->fds =
- gpr_realloc(pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
+ pollset->fds = (grpc_fd **)gpr_realloc(
+ pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
@@ -899,7 +899,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
- worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
+ worker.wakeup_fd =
+ (grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@@ -954,8 +955,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
void *buf = gpr_malloc(pfd_size + watch_size);
- pfds = buf;
- watchers = (void *)((char *)buf + pfd_size);
+ pfds = (struct pollfd *)buf;
+ watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size);
}
fd_count = 0;
@@ -992,6 +993,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
+ }
+
if (r < 0) {
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
@@ -1012,6 +1017,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
+ }
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@@ -1019,6 +1027,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
+ pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
+ (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
+ }
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@@ -1135,7 +1148,8 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set));
+ grpc_pollset_set *pollset_set =
+ (grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
@@ -1178,9 +1192,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets =
- gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
- sizeof(*pollset_set->pollsets));
+ pollset_set->pollsets = (grpc_pollset **)gpr_realloc(
+ pollset_set->pollsets,
+ pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
@@ -1229,9 +1243,9 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets =
- gpr_realloc(bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ bag->pollset_sets = (grpc_pollset_set **)gpr_realloc(
+ bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
@@ -1268,7 +1282,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = gpr_realloc(
+ pollset_set->fds = (grpc_fd **)gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
@@ -1322,11 +1336,12 @@ static void cache_insert_locked(poll_args *args) {
}
static void init_result(poll_args *pargs) {
- pargs->result = gpr_malloc(sizeof(poll_result));
+ pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = NULL;
pargs->result->watchcount = 0;
- pargs->result->fds = gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
+ pargs->result->fds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@@ -1365,7 +1380,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
- poll_args *pargs = gpr_malloc(sizeof(struct poll_args));
+ poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@@ -1412,7 +1427,8 @@ static void cache_poller_locked(poll_args *args) {
poll_args **old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * poll_cache.size);
+ poll_cache.active_pollers =
+ (poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1517,12 +1533,12 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
nfds_t nsockfds = 0;
poll_result *result = NULL;
gpr_mu_lock(&g_cvfds.mu);
- pollcv = gpr_malloc(sizeof(cv_node));
+ pollcv = (cv_node *)gpr_malloc(sizeof(cv_node));
pollcv->next = NULL;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
- cv_node *fd_cvs = gpr_malloc(nfds * sizeof(cv_node));
+ cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
@@ -1554,7 +1570,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
- struct pollfd *pollfds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ struct pollfd *pollfds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@@ -1617,7 +1634,8 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
- g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.cvfds =
+ (fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1634,7 +1652,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = NULL;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * 32);
+ poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 2b0d01f8cd..4d3ae2228e 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -76,10 +76,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index dd5cb2a64e..892385d7d7 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -40,6 +40,7 @@ typedef struct {
grpc_closure_list elems;
size_t depth;
bool shutdown;
+ bool queued_long_job;
gpr_thd_id id;
} thread_state;
@@ -50,6 +51,9 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
+static grpc_tracer_flag executor_trace =
+ GRPC_TRACER_INITIALIZER(false, "executor");
+
static void executor_thread(void *arg);
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
@@ -59,6 +63,14 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
+ c->file_created, c->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
+#endif
+ }
#ifndef NDEBUG
c->scheduled = false;
#endif
@@ -66,6 +78,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
GRPC_ERROR_UNREF(error);
c = next;
n++;
+ grpc_exec_ctx_flush(exec_ctx);
}
return n;
@@ -82,7 +95,8 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
- g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ g_thread_state =
+ (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
@@ -120,6 +134,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+ grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@@ -129,7 +144,7 @@ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
}
static void executor_thread(void *arg) {
- thread_state *ts = arg;
+ thread_state *ts = (thread_state *)arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_exec_ctx exec_ctx =
@@ -137,12 +152,21 @@ static void executor_thread(void *arg) {
size_t subtract_depth = 0;
for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
+ (int)(ts - g_thread_state), subtract_depth);
+ }
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+ ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
+ (int)(ts - g_thread_state));
+ }
gpr_mu_unlock(&ts->mu);
break;
}
@@ -150,52 +174,128 @@ static void executor_thread(void *arg) {
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
+ }
subtract_depth = run_closures(&exec_ctx, exec);
- grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx);
- if (cur_thread_count == 0) {
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- return;
- }
- thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
- if (ts == NULL) {
- ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ grpc_error *error, bool is_short) {
+ bool retry_push;
+ if (is_short) {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
} else {
- GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
- }
- gpr_mu_lock(&ts->mu);
- if (grpc_closure_list_empty(ts->elems)) {
- GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
- gpr_cv_signal(&ts->cv);
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
- grpc_closure_list_append(&ts->elems, closure, error);
- ts->depth++;
- bool try_new_thread = ts->depth > MAX_DEPTH &&
- cur_thread_count < g_max_threads && !ts->shutdown;
- gpr_mu_unlock(&ts->mu);
- if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
- cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count < g_max_threads) {
- gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
-
- gpr_thd_options opt = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&opt);
- gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
- &g_thread_state[cur_thread_count], &opt);
+ do {
+ retry_push = false;
+ size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count == 0) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
+ closure, closure->file_created, closure->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
+#endif
+ }
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ return;
}
- gpr_spinlock_unlock(&g_adding_thread_lock);
- }
+ thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ if (ts == NULL) {
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+ }
+ thread_state *orig_ts = ts;
+
+ bool try_new_thread;
+ for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(
+ GPR_DEBUG,
+ "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
+ closure, is_short ? "short" : "long", closure->file_created,
+ closure->line_created, (int)(ts - g_thread_state));
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
+ closure, is_short ? "short" : "long",
+ (int)(ts - g_thread_state));
+#endif
+ }
+ gpr_mu_lock(&ts->mu);
+ if (ts->queued_long_job) {
+ // if there's a long job queued, we never queue anything else to this
+ // queue (since long jobs can take 'infinite' time and we need to
+ // guarantee no starvation)
+ // ... spin through queues and try again
+ gpr_mu_unlock(&ts->mu);
+ size_t idx = (size_t)(ts - g_thread_state);
+ ts = &g_thread_state[(idx + 1) % cur_thread_count];
+ if (ts == orig_ts) {
+ retry_push = true;
+ try_new_thread = true;
+ break;
+ }
+ continue;
+ }
+ if (grpc_closure_list_empty(ts->elems)) {
+ GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
+ gpr_cv_signal(&ts->cv);
+ }
+ grpc_closure_list_append(&ts->elems, closure, error);
+ ts->depth++;
+ try_new_thread = ts->depth > MAX_DEPTH &&
+ cur_thread_count < g_max_threads && !ts->shutdown;
+ if (!is_short) ts->queued_long_job = true;
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
+ cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count < g_max_threads) {
+ gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
+ &g_thread_state[cur_thread_count], &opt);
+ }
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ }
+ if (retry_push) {
+ GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
+ }
+ } while (retry_push);
}
-static const grpc_closure_scheduler_vtable executor_vtable = {
- executor_push, executor_push, "executor"};
-static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
-grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
+static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, true);
+}
+
+static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, false);
+}
+
+static const grpc_closure_scheduler_vtable executor_vtable_short = {
+ executor_push_short, executor_push_short, "executor"};
+static grpc_closure_scheduler executor_scheduler_short = {
+ &executor_vtable_short};
+
+static const grpc_closure_scheduler_vtable executor_vtable_long = {
+ executor_push_long, executor_push_long, "executor"};
+static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
+
+grpc_closure_scheduler *grpc_executor_scheduler(
+ grpc_executor_job_length length) {
+ return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
+ : &executor_scheduler_long;
+}
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index c3382a0a12..0412c02790 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -21,6 +21,11 @@
#include "src/core/lib/iomgr/closure.h"
+typedef enum {
+ GRPC_EXECUTOR_SHORT,
+ GRPC_EXECUTOR_LONG
+} grpc_executor_job_length;
+
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@@ -28,7 +33,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
-extern grpc_closure_scheduler *grpc_executor_scheduler;
+grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c
index ba77a52afc..0b4d41ea4b 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.c
@@ -47,7 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
- contents = gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
+ contents = (unsigned char *)gpr_malloc(contents_size +
+ (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 35dedc23de..082e3b7947 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -112,13 +112,14 @@ static grpc_error *blocking_resolve_address_impl(
}
/* Success path: set addrs non-NULL, fill it in */
- *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addresses =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs =
- gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -153,7 +154,7 @@ typedef struct {
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
- request *r = rp;
+ request *r = (request *)rp;
GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
@@ -174,9 +175,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
- request *r = gpr_malloc(sizeof(request));
+ request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 45cfd7248d..0cb0029f4e 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -159,7 +159,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index a31d9eef93..6c58986b53 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -241,7 +241,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->step_scheduled = false;
do {
if (rq_alloc(exec_ctx, resource_quota)) goto done;
@@ -380,12 +380,12 @@ typedef struct {
} ru_slice_refcount;
static void ru_slice_ref(void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
gpr_ref(&rc->refs);
}
static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
@@ -398,7 +398,8 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
size_t size) {
- ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ ru_slice_refcount *rc =
+ (ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size);
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@@ -417,7 +418,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
*/
static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -427,7 +428,7 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -454,7 +455,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -469,7 +470,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -485,7 +486,7 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
@@ -497,7 +498,7 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
}
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
@@ -518,7 +519,8 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resource_user_slice_allocator *slice_allocator = arg;
+ grpc_resource_user_slice_allocator *slice_allocator =
+ (grpc_resource_user_slice_allocator *)arg;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@@ -541,7 +543,7 @@ typedef struct {
} rq_resize_args;
static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- rq_resize_args *a = args;
+ rq_resize_args *a = (rq_resize_args *)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@@ -553,7 +555,7 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->reclaiming = false;
rq_step_sched(exec_ctx, resource_quota);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -565,7 +567,8 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
/* Public API */
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
- grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ grpc_resource_quota *resource_quota =
+ (grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@@ -629,7 +632,7 @@ double grpc_resource_quota_get_memory_pressure(
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- rq_resize_args *a = gpr_malloc(sizeof(*a));
+ rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
@@ -684,7 +687,8 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota *resource_quota, const char *name) {
- grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
+ grpc_resource_user *resource_user =
+ (grpc_resource_user *)gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index a25fba4527..39dbb506e2 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -80,7 +80,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
- grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
+ grpc_socket_mutator *mutator =
+ (grpc_socket_mutator *)channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@@ -98,7 +99,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
@@ -126,7 +127,7 @@ grpc_endpoint *grpc_tcp_client_create_from_fd(
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
int so_error = 0;
socklen_t so_error_size;
int err;
@@ -304,7 +305,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
- ac = gpr_malloc(sizeof(async_connect));
+ ac = (async_connect *)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 3372e14eef..7e271294fd 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -43,6 +43,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -90,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
- grpc_closure read_closure;
- grpc_closure write_closure;
+ grpc_closure read_done_closure;
+ grpc_closure write_done_closure;
char *peer_string;
@@ -99,6 +100,148 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+typedef struct backup_poller {
+ gpr_mu *pollset_mu;
+ grpc_closure run_poller;
+} backup_poller;
+
+#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
+
+static gpr_atm g_uncovered_notifications_pending;
+static gpr_atm g_backup_poller; /* backup_poller* */
+
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
+ }
+ grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
+ gpr_free(p);
+}
+
+static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
+ }
+ gpr_mu_lock(p->pollset_mu);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec deadline =
+ gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
+ GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
+ GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
+ now, deadline));
+ gpr_mu_unlock(p->pollset_mu);
+ /* last "uncovered" notification is the ref that keeps us polling, if we get
+ * there try a cas to release it */
+ if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
+ gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
+ gpr_mu_lock(p->pollset_mu);
+ bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
+ }
+ gpr_mu_unlock(p->pollset_mu);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
+ }
+ grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
+ GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
+ grpc_schedule_on_exec_ctx));
+ } else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
+ }
+}
+
+static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
+ (int)old_count - 1);
+ }
+ GPR_ASSERT(old_count != 1);
+}
+
+static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p;
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
+ 2 + (int)old_count);
+ }
+ if (old_count == 0) {
+ GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
+ p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size());
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
+ }
+ grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
+ gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
+ grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
+ GRPC_ERROR_NONE);
+ } else {
+ while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
+ // spin waiting for backup poller
+ }
+ }
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
+ }
+ grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+ if (old_count != 0) {
+ drop_uncovered(exec_ctx, tcp);
+ }
+}
+
+static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
+ }
+ GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
+}
+
+static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
+ }
+ cover_self(exec_ctx, tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
+}
+
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
+ }
+ drop_uncovered(exec_ctx, (grpc_tcp *)arg);
+ tcp_handle_write(exec_ctx, arg, error);
+}
+
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@@ -214,6 +357,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -271,7 +415,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@@ -307,7 +451,11 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
- grpc_tcp *tcp = tcpp;
+ grpc_tcp *tcp = (grpc_tcp *)tcpp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
+ grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@@ -323,9 +471,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
+ }
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
+ }
tcp_do_read(exec_ctx, tcp);
}
}
@@ -334,6 +488,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@@ -357,9 +514,9 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
@@ -472,7 +629,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@@ -525,7 +682,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@@ -602,7 +759,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@@ -631,10 +788,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 0fc5c0fd86..c3ec3e447a 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -74,7 +74,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
- grpc_tcp_server *s = gpr_zalloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
@@ -138,7 +138,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_tcp_server *s = server;
+ grpc_tcp_server *s = (grpc_tcp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -197,7 +197,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
- grpc_tcp_listener *sp = arg;
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
if (err != GRPC_ERROR_NONE) {
goto error;
@@ -251,7 +251,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor *acceptor =
+ (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@@ -365,7 +366,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
index ad535bc43e..a828bee074 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
@@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.c
index a70e3942b2..2648d5da5d 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.c
@@ -74,8 +74,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
}
@@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);
diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.c
index 631f7935d9..ae2c0bf0ae 100644
--- a/src/core/lib/iomgr/timer_manager.c
+++ b/src/core/lib/iomgr/timer_manager.c
@@ -83,7 +83,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
- completed_thread *ct = gpr_malloc(sizeof(*ct));
+ completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 88fa34cb7a..9a02c1d1bb 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -125,7 +125,7 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
}
grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
- grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
+ grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@@ -176,7 +176,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_udp_server *s = server;
+ grpc_udp_server *s = (grpc_udp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -237,7 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
+ struct shutdown_fd_args *args =
+ (struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -331,7 +332,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -354,7 +355,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
@@ -393,7 +394,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
- sp = gpr_malloc(sizeof(grpc_udp_listener));
+ sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -444,7 +445,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t *)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c
index 0c8627c8c6..35f898f13a 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.c
@@ -49,9 +49,11 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
gpr_free(err_msg);
return err;
}
- *addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addrs =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addrs)->naddrs = 1;
- (*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address));
+ (*addrs)->addrs =
+ (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
un = (struct sockaddr_un *)(*addrs)->addrs->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, name);
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.c
index 075a0b6426..5e0b1d1704 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.c
+++ b/src/core/lib/iomgr/wakeup_fd_cv.c
@@ -42,7 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
- g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+ g_cvfds.cvfds =
+ (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;
diff --git a/src/core/lib/json/json.c b/src/core/lib/json/json.c
index 25eee05532..4ad51f662a 100644
--- a/src/core/lib/json/json.c
+++ b/src/core/lib/json/json.c
@@ -23,7 +23,7 @@
#include "src/core/lib/json/json.h"
grpc_json* grpc_json_create(grpc_json_type type) {
- grpc_json* json = gpr_zalloc(sizeof(*json));
+ grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json));
json->type = type;
return json;
diff --git a/src/core/lib/json/json_string.c b/src/core/lib/json/json_string.c
index 65b5f0f482..3178d2d2b4 100644
--- a/src/core/lib/json/json_string.c
+++ b/src/core/lib/json/json_string.c
@@ -63,19 +63,19 @@ typedef struct {
* bytes at a time (or multiples thereof).
*/
static void json_writer_output_check(void *userdata, size_t needed) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xffU;
- state->output = gpr_realloc(state->output, state->allocated + needed);
+ state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
}
/* These are needed by the writer's implementation. */
static void json_writer_output_char(void *userdata, char c) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
@@ -83,7 +83,7 @@ static void json_writer_output_char(void *userdata, char c) {
static void json_writer_output_string_with_len(void *userdata, const char *str,
size_t len) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
@@ -99,7 +99,7 @@ static void json_writer_output_string(void *userdata, const char *str) {
* the end of the current string, and advance our output pointer.
*/
static void json_reader_string_clear(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -108,7 +108,7 @@ static void json_reader_string_clear(void *userdata) {
}
static void json_reader_string_add_char(void *userdata, uint32_t c) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (uint8_t)c;
@@ -149,7 +149,7 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
*/
static uint32_t json_reader_read_char(void *userdata) {
uint32_t r;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -168,7 +168,7 @@ static uint32_t json_reader_read_char(void *userdata) {
* our tree-in-progress inside our opaque structure.
*/
static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
@@ -194,7 +194,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
}
static void json_reader_container_begins(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -215,7 +215,7 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
*/
static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->current_container);
@@ -236,18 +236,18 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
static void json_reader_set_key(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
state->key = state->string;
}
static void json_reader_set_string(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
json->value = (char *)state->string;
}
static int json_reader_set_number(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
json->value = (char *)state->string;
return 1;
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index ea9608f444..975d599523 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -128,13 +128,11 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = arg;
- gpr_mu_lock(&h->mu);
+static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
- goto done;
+ return;
}
// Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
@@ -146,7 +144,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
}
// Create frame protector if zero-copy frame protector is NULL.
tsi_frame_protector *protector = NULL;
@@ -158,7 +156,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
}
}
// Get unused bytes.
@@ -192,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
-done:
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = (security_handshaker *)arg;
+ gpr_mu_lock(&h->mu);
+ on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@@ -254,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
- security_handshaker *h = user_data;
+ security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -296,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -313,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
- h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
+ h->handshake_buffer =
+ (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@@ -338,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -415,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
- security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
+ security_handshaker *h =
+ (security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@@ -465,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
- grpc_handshaker *h = gpr_malloc(sizeof(*h));
+ grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}
diff --git a/src/core/lib/slice/b64.c b/src/core/lib/slice/b64.c
index d02f303bdb..50264719a4 100644
--- a/src/core/lib/slice/b64.c
+++ b/src/core/lib/slice/b64.c
@@ -58,7 +58,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
- char *result = gpr_malloc(result_projected_size);
+ char *result = (char *)gpr_malloc(result_projected_size);
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
@@ -75,7 +75,7 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
int url_safe, int multiline) {
- const unsigned char *data = vdata;
+ const unsigned char *data = (const unsigned char *)vdata;
const char *base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =
diff --git a/src/core/lib/slice/slice.c b/src/core/lib/slice/slice.c
index 8a8087805c..321a21a10b 100644
--- a/src/core/lib/slice/slice.c
+++ b/src/core/lib/slice/slice.c
@@ -27,7 +27,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
char *grpc_slice_to_c_string(grpc_slice slice) {
- char *out = gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
+ char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
out[GRPC_SLICE_LENGTH(slice)] = 0;
return out;
@@ -105,12 +105,12 @@ typedef struct new_slice_refcount {
} new_slice_refcount;
static void new_slice_ref(void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data);
gpr_free(r);
@@ -125,7 +125,8 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data) {
grpc_slice slice;
- new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
+ new_slice_refcount *rc =
+ (new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_slice_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -133,7 +134,7 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
rc->user_data = user_data;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -154,12 +155,12 @@ typedef struct new_with_len_slice_refcount {
} new_with_len_slice_refcount;
static void new_with_len_ref(void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data, r->user_length);
gpr_free(r);
@@ -183,7 +184,7 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len,
rc->user_length = len;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -205,12 +206,12 @@ typedef struct {
} malloc_refcount;
static void malloc_ref(void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
gpr_ref(&r->refs);
}
static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
if (gpr_unref(&r->refs)) {
gpr_free(r);
}
@@ -232,7 +233,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
refcount is a malloc_refcount
bytes is an array of bytes of the requested length
Both parts are placed in the same allocation returned from gpr_malloc */
- malloc_refcount *rc = gpr_malloc(sizeof(malloc_refcount) + length);
+ malloc_refcount *rc =
+ (malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length);
/* Initial refcount on rc is 1 - and it's up to the caller to release
this reference. */
@@ -451,7 +453,7 @@ int grpc_slice_rchr(grpc_slice s, char c) {
int grpc_slice_chr(grpc_slice s, char c) {
const char *b = (const char *)GRPC_SLICE_START_PTR(s);
- const char *p = memchr(b, c, GRPC_SLICE_LENGTH(s));
+ const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s));
return p == NULL ? -1 : (int)(p - b);
}
diff --git a/src/core/lib/slice/slice_buffer.c b/src/core/lib/slice/slice_buffer.c
index a54a997a0d..63ffc0b00d 100644
--- a/src/core/lib/slice/slice_buffer.c
+++ b/src/core/lib/slice/slice_buffer.c
@@ -45,11 +45,12 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
sb->capacity = GROW(sb->capacity);
GPR_ASSERT(sb->capacity > slice_count);
if (sb->base_slices == sb->inlined) {
- sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ sb->base_slices =
+ (grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice));
memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
} else {
- sb->base_slices =
- gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
+ sb->base_slices = (grpc_slice *)gpr_realloc(
+ sb->base_slices, sb->capacity * sizeof(grpc_slice));
}
sb->slices = sb->base_slices + slice_offset;
@@ -291,7 +292,7 @@ void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n,
void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *src, size_t n,
void *dst) {
- char *dstp = dst;
+ char *dstp = (char *)dst;
GPR_ASSERT(src->length >= n);
while (n > 0) {
diff --git a/src/core/lib/slice/slice_hash_table.c b/src/core/lib/slice/slice_hash_table.c
index 1866ed25ac..6c2c9c201c 100644
--- a/src/core/lib/slice/slice_hash_table.c
+++ b/src/core/lib/slice/slice_hash_table.c
@@ -60,14 +60,15 @@ grpc_slice_hash_table* grpc_slice_hash_table_create(
size_t num_entries, grpc_slice_hash_table_entry* entries,
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
int (*value_cmp)(void* a, void* b)) {
- grpc_slice_hash_table* table = gpr_zalloc(sizeof(*table));
+ grpc_slice_hash_table* table =
+ (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table));
gpr_ref_init(&table->refs, 1);
table->destroy_value = destroy_value;
table->value_cmp = value_cmp;
// Keep load factor low to improve performance of lookups.
table->size = num_entries * 2;
const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
- table->entries = gpr_zalloc(entry_size);
+ table->entries = (grpc_slice_hash_table_entry*)gpr_zalloc(entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_slice_hash_table_entry* entry = &entries[i];
grpc_slice_hash_table_add(table, entry->key, entry->value);
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.c
index a6d22c1e1f..ec71b3ca1d 100644
--- a/src/core/lib/slice/slice_intern.c
+++ b/src/core/lib/slice/slice_intern.c
@@ -69,7 +69,7 @@ static uint32_t max_static_metadata_hash_probe;
static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
static void interned_slice_ref(void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
}
@@ -90,7 +90,7 @@ static void interned_slice_destroy(interned_slice_refcount *s) {
}
static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
interned_slice_destroy(s);
}
@@ -129,7 +129,8 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_BEGIN("grow_strtab", 0);
- strtab = gpr_zalloc(sizeof(interned_slice_refcount *) * capacity);
+ strtab = (interned_slice_refcount **)gpr_zalloc(
+ sizeof(interned_slice_refcount *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@@ -242,7 +243,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* not found: create a new string */
/* string data goes after the internal_string header */
- s = gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice));
+ s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
+ GRPC_SLICE_LENGTH(slice));
gpr_atm_rel_store(&s->refcnt, 1);
s->length = GRPC_SLICE_LENGTH(slice);
s->hash = hash;
@@ -280,7 +282,8 @@ void grpc_slice_intern_init(void) {
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->strs = gpr_zalloc(sizeof(*shard->strs) * shard->capacity);
+ shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) *
+ shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;
diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c
index 5dbfaa2d43..7712f560b9 100644
--- a/src/core/lib/surface/alarm.c
+++ b/src/core/lib/surface/alarm.c
@@ -80,12 +80,12 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *c) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
}
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
/* We are queuing an op on completion queue. This means, the alarm's structure
cannot be destroyed until the op is dequeued. Adding an extra ref
@@ -96,7 +96,7 @@ static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
grpc_alarm *grpc_alarm_create(void *reserved) {
- grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
+ grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
diff --git a/src/core/lib/surface/byte_buffer.c b/src/core/lib/surface/byte_buffer.c
index 0bc990d487..7ed550ef87 100644
--- a/src/core/lib/surface/byte_buffer.c
+++ b/src/core/lib/surface/byte_buffer.c
@@ -32,7 +32,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices,
grpc_compression_algorithm compression) {
size_t i;
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
@@ -45,7 +46,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader) {
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
grpc_slice slice;
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 347cb9a303..26d6f0315d 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -307,7 +307,7 @@ void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
- p = gpr_arena_alloc(call->arena, sizeof(*p));
+ p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
@@ -333,8 +333,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
gpr_arena *arena = gpr_arena_create(initial_size);
- call = gpr_arena_alloc(arena,
- sizeof(grpc_call) + channel_stack->call_stack_size);
+ call = (grpc_call *)gpr_arena_alloc(
+ arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
grpc_call_combiner_init(&call->call_combiner);
@@ -512,7 +512,7 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
grpc_channel *channel = c->channel;
grpc_call_combiner_destroy(&c->call_combiner);
gpr_free((char *)c->peer_string);
@@ -525,7 +525,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
size_t i;
int ii;
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
GPR_TIMER_BEGIN("destroy_call", 0);
for (i = 0; i < 2; i++) {
grpc_metadata_batch_destroy(
@@ -625,8 +625,8 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
// the filter stack.
static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *ignored) {
- grpc_transport_stream_op_batch *batch = arg;
- grpc_call *call = batch->handler_private.extra_arg;
+ grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
+ grpc_call *call = (grpc_call *)batch->handler_private.extra_arg;
GPR_TIMER_BEGIN("execute_batch", 0);
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
@@ -1106,8 +1106,8 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
if (dest->count + b->list.count > dest->capacity) {
dest->capacity =
GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2);
- dest->metadata =
- gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ dest->metadata = (grpc_metadata *)gpr_realloc(
+ dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) {
mdusr = &dest->metadata[dest->count++];
@@ -1158,7 +1158,7 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
grpc_metadata_batch *b) {
- grpc_call *call = args;
+ grpc_call *call = (grpc_call *)args;
if (b->idx.named.grpc_status != NULL) {
uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
grpc_error *error =
@@ -1242,7 +1242,8 @@ static batch_control *allocate_batch_control(grpc_call *call,
int slot = batch_slot_for_op(ops[0].op);
batch_control **pslot = &call->active_batches[slot];
if (*pslot == NULL) {
- *pslot = gpr_arena_alloc(call->arena, sizeof(batch_control));
+ *pslot =
+ (batch_control *)gpr_arena_alloc(call->arena, sizeof(batch_control));
}
batch_control *bctl = *pslot;
if (bctl->call != NULL) {
@@ -1256,7 +1257,7 @@ static batch_control *allocate_batch_control(grpc_call *call,
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
- batch_control *bctl = user_data;
+ batch_control *bctl = (batch_control *)user_data;
grpc_call *call = bctl->call;
bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
@@ -1399,7 +1400,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
grpc_byte_stream *bs = call->receiving_stream;
bool release_error = false;
@@ -1458,7 +1459,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
if (error != GRPC_ERROR_NONE) {
if (call->receiving_stream != NULL) {
@@ -1484,7 +1485,7 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
receiving_stream_ready(exec_ctx, bctlp, error);
@@ -1593,7 +1594,7 @@ static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
@@ -1651,7 +1652,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 850fbe6a69..34548dac26 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -327,7 +327,7 @@ grpc_call *grpc_channel_create_pollset_set_call(
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved) {
- registered_call *rc = gpr_malloc(sizeof(registered_call));
+ registered_call *rc = (registered_call *)gpr_malloc(sizeof(registered_call));
GRPC_API_TRACE(
"grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
4, (channel, method, host, reserved));
@@ -354,7 +354,7 @@ grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved) {
- registered_call *rc = registered_call_handle;
+ registered_call *rc = (registered_call *)registered_call_handle;
GRPC_API_TRACE(
"grpc_channel_create_registered_call("
"channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
@@ -392,7 +392,7 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_channel *channel = arg;
+ grpc_channel *channel = (grpc_channel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
diff --git a/src/core/lib/surface/channel_init.c b/src/core/lib/surface/channel_init.c
index a1391ffe56..33f444b89e 100644
--- a/src/core/lib/surface/channel_init.c
+++ b/src/core/lib/surface/channel_init.c
@@ -53,9 +53,9 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
GPR_ASSERT(!g_finalized);
if (g_slots[type].cap_slots == g_slots[type].num_slots) {
g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
- g_slots[type].slots =
- gpr_realloc(g_slots[type].slots,
- g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
+ g_slots[type].slots = (stage_slot *)gpr_realloc(
+ g_slots[type].slots,
+ g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
}
stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
s->insertion_order = g_slots[type].num_slots;
@@ -65,8 +65,8 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
}
static int compare_slots(const void *a, const void *b) {
- const stage_slot *sa = a;
- const stage_slot *sb = b;
+ const stage_slot *sa = (const stage_slot *)a;
+ const stage_slot *sb = (const stage_slot *)b;
int c = GPR_ICMP(sa->priority, sb->priority);
if (c != 0) return c;
@@ -85,7 +85,7 @@ void grpc_channel_init_finalize(void) {
void grpc_channel_init_shutdown(void) {
for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
gpr_free(g_slots[i].slots);
- g_slots[i].slots = (void *)(uintptr_t)0xdeadbeef;
+ g_slots[i].slots = (stage_slot *)(void *)(uintptr_t)0xdeadbeef;
}
}
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
index e85b308850..f45b568958 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.c
@@ -39,7 +39,7 @@ static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
}
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- ping_result *pr = arg;
+ ping_result *pr = (ping_result *)arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy,
pr, &pr->completion_storage);
}
@@ -49,7 +49,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
(channel, cq, tag, reserved));
grpc_transport_op *op = grpc_make_transport_op(NULL);
- ping_result *pr = gpr_malloc(sizeof(*pr));
+ ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr));
grpc_channel_element *top_elem =
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index a15b09e55d..bc57c6136b 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -421,8 +421,9 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
- cq = gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
- poller_vtable->size());
+ cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
+ vtable->data_size +
+ poller_vtable->size());
cq->vtable = vtable;
cq->poller_vtable = poller_vtable;
@@ -442,7 +443,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
}
static void cq_init_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->shutdown_called = false;
@@ -451,13 +452,13 @@ static void cq_init_next(void *ptr) {
}
static void cq_destroy_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0);
cq_event_queue_destroy(&cqd->queue);
}
static void cq_init_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->completed_tail = &cqd->completed_head;
@@ -469,7 +470,7 @@ static void cq_init_pluck(void *ptr) {
}
static void cq_destroy_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head);
}
@@ -502,7 +503,7 @@ void grpc_cq_internal_ref(grpc_completion_queue *cq) {
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_completion_queue *cq = arg;
+ grpc_completion_queue *cq = (grpc_completion_queue *)arg;
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy");
}
@@ -589,9 +590,9 @@ bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) {
gpr_mu_lock(cq->mu);
if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
- cq->outstanding_tags =
- gpr_realloc(cq->outstanding_tags, sizeof(*cq->outstanding_tags) *
- cq->outstanding_tag_capacity);
+ cq->outstanding_tags = (void **)gpr_realloc(
+ cq->outstanding_tags,
+ sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity);
}
cq->outstanding_tags[cq->outstanding_tag_count++] = tag;
gpr_mu_unlock(cq->mu);
@@ -767,7 +768,7 @@ typedef struct {
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
cq_next_data *cqd = DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
@@ -1018,7 +1019,7 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
cq_pluck_data *cqd = DATA_FROM_CQ(cq);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 8582d826ca..f95cc10a6a 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -250,7 +250,8 @@ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
count++;
}
cb->num_channels = count;
- cb->channels = gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ cb->channels =
+ (grpc_channel **)gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
count = 0;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
cb->channels[count++] = c->channel;
@@ -265,14 +266,15 @@ struct shutdown_cleanup_args {
static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- struct shutdown_cleanup_args *a = arg;
+ struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg;
grpc_slice_unref_internal(exec_ctx, a->slice);
gpr_free(a);
}
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
bool send_goaway, grpc_error *send_disconnect) {
- struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
+ struct shutdown_cleanup_args *sc =
+ (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc));
GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
@@ -314,8 +316,8 @@ static void request_matcher_init(request_matcher *rm, size_t entries,
grpc_server *server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq =
- gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count);
+ rm->requests_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
}
@@ -426,7 +428,7 @@ static void orphan_channel(channel_data *chand) {
static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
@@ -459,7 +461,7 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
grpc_cq_completion *c) {
- requested_call *rc = req;
+ requested_call *rc = (requested_call *)req;
grpc_server *server = rc->server;
if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
@@ -505,7 +507,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
@@ -513,9 +515,9 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *call_elem = arg;
- call_data *calld = call_elem->call_data;
- channel_data *chand = call_elem->channel_data;
+ grpc_call_element *call_elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)call_elem->call_data;
+ channel_data *chand = (channel_data *)call_elem->channel_data;
request_matcher *rm = calld->request_matcher;
grpc_server *server = rm->server;
@@ -566,7 +568,7 @@ static void finish_start_new_rpc(
grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem,
request_matcher *rm,
grpc_server_register_method_payload_handling payload_handling) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_lock(&calld->mu_state);
@@ -599,8 +601,8 @@ static void finish_start_new_rpc(
}
static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_server *server = chand->server;
uint32_t i;
uint32_t hash;
@@ -732,8 +734,8 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
gpr_timespec op_deadline;
if (error == GRPC_ERROR_NONE) {
@@ -771,7 +773,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL);
@@ -795,8 +797,8 @@ static void server_start_transport_stream_op_batch(
static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
@@ -822,7 +824,7 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
grpc_transport *transport,
const void *transport_server_data) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
/* create a call */
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
@@ -838,7 +840,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
GRPC_ERROR_UNREF(error);
return;
}
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
@@ -852,7 +854,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op *op = grpc_make_transport_op(NULL);
@@ -873,8 +875,8 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
memset(calld, 0, sizeof(call_data));
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
calld->call = grpc_call_from_top_element(elem);
@@ -891,8 +893,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_ASSERT(calld->state != PENDING);
@@ -913,7 +915,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
chand->server = NULL;
@@ -930,7 +932,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
size_t i;
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
@@ -976,8 +978,8 @@ static void register_completion_queue(grpc_server *server,
GRPC_CQ_INTERNAL_REF(cq, "server");
n = server->cq_count++;
- server->cqs = gpr_realloc(server->cqs,
- server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs = (grpc_completion_queue **)gpr_realloc(
+ server->cqs, server->cq_count * sizeof(grpc_completion_queue *));
server->cqs[n] = cq;
}
@@ -1002,7 +1004,7 @@ void grpc_server_register_completion_queue(grpc_server *server,
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- grpc_server *server = gpr_zalloc(sizeof(grpc_server));
+ grpc_server *server = (grpc_server *)gpr_zalloc(sizeof(grpc_server));
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
@@ -1053,7 +1055,7 @@ void *grpc_server_register_method(
flags);
return NULL;
}
- m = gpr_zalloc(sizeof(registered_method));
+ m = (registered_method *)gpr_zalloc(sizeof(registered_method));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -1065,7 +1067,7 @@ void *grpc_server_register_method(
static void start_listeners(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
for (listener *l = server->listeners; l; l = l->next) {
l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count);
}
@@ -1086,11 +1088,12 @@ void grpc_server_start(grpc_server *server) {
server->started = true;
server->pollset_count = 0;
- server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
- server->request_freelist_per_cq =
- gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq =
- gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
+ server->pollsets =
+ (grpc_pollset **)gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
+ server->request_freelist_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*server->request_freelist_per_cq) * server->cq_count);
+ server->requested_calls_per_cq = (requested_call **)gpr_malloc(
+ sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
@@ -1101,9 +1104,9 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
- server->requested_calls_per_cq[i] =
- gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
+ server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
+ (size_t)server->max_requested_calls_per_cq *
+ sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
@@ -1114,9 +1117,11 @@ void grpc_server_start(grpc_server *server) {
server_ref(server);
server->starting = true;
- GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
- grpc_executor_scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(start_listeners, server,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1171,7 +1176,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
if (num_registered_methods > 0) {
slots = 2 * num_registered_methods;
alloc = sizeof(channel_registered_method) * slots;
- chand->registered_methods = gpr_zalloc(alloc);
+ chand->registered_methods = (channel_registered_method *)gpr_zalloc(alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
grpc_slice host;
bool has_host;
@@ -1232,7 +1237,7 @@ void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
maybe_finish_shutdown(exec_ctx, server);
@@ -1264,9 +1269,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
gpr_mu_unlock(&server->mu_global);
goto done;
}
- server->shutdown_tags =
- gpr_realloc(server->shutdown_tags,
- sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
+ server->shutdown_tags = (shutdown_tag *)gpr_realloc(
+ server->shutdown_tags,
+ sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
@@ -1349,7 +1354,7 @@ void grpc_server_add_listener(
grpc_pollset **pollsets, size_t pollset_count),
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
grpc_closure *on_done)) {
- listener *l = gpr_malloc(sizeof(listener));
+ listener *l = (listener *)gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
l->destroy = destroy;
@@ -1426,7 +1431,7 @@ grpc_call_error grpc_server_request_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1471,8 +1476,8 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
- registered_method *rm = rmp;
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ registered_method *rm = (registered_method *)rmp;
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 73a9178ae2..f328a6cdbb 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -148,7 +148,8 @@ bool grpc_connectivity_state_notify_on_state_change(
GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
- grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
+ grpc_connectivity_state_watcher *w =
+ (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
w->current = current;
w->notify = notify;
w->next = tracker->watchers;
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 2fea366072..188b485625 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -117,7 +117,8 @@ void grpc_mdctx_global_init(void) {
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->elems = gpr_zalloc(sizeof(*shard->elems) * shard->capacity);
+ shard->elems = (interned_metadata **)gpr_zalloc(sizeof(*shard->elems) *
+ shard->capacity);
}
}
@@ -204,7 +205,8 @@ static void grow_mdtab(mdtab_shard *shard) {
GPR_TIMER_BEGIN("grow_mdtab", 0);
- mdtab = gpr_zalloc(sizeof(interned_metadata *) * capacity);
+ mdtab =
+ (interned_metadata **)gpr_zalloc(sizeof(interned_metadata *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
@@ -243,7 +245,8 @@ grpc_mdelem grpc_mdelem_create(
GRPC_MDELEM_STORAGE_EXTERNAL);
}
- allocated_metadata *allocated = gpr_malloc(sizeof(*allocated));
+ allocated_metadata *allocated =
+ (allocated_metadata *)gpr_malloc(sizeof(*allocated));
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
@@ -292,7 +295,7 @@ grpc_mdelem grpc_mdelem_create(
}
/* not found: create a new pair */
- md = gpr_malloc(sizeof(interned_metadata));
+ md = (interned_metadata *)gpr_malloc(sizeof(interned_metadata));
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.c
index 0379d0010d..070a13a2b4 100644
--- a/src/core/lib/transport/service_config.c
+++ b/src/core/lib/transport/service_config.c
@@ -59,7 +59,8 @@ struct grpc_service_config {
};
grpc_service_config* grpc_service_config_create(const char* json_string) {
- grpc_service_config* service_config = gpr_malloc(sizeof(*service_config));
+ grpc_service_config* service_config =
+ (grpc_service_config*)gpr_malloc(sizeof(*service_config));
service_config->json_string = gpr_strdup(json_string);
service_config->json_tree =
grpc_json_parse_string(service_config->json_string);
@@ -198,7 +199,8 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
num_entries += count_names_in_method_config_json(method);
}
// Populate method config table entries.
- entries = gpr_malloc(num_entries * sizeof(grpc_slice_hash_table_entry));
+ entries = (grpc_slice_hash_table_entry*)gpr_malloc(
+ num_entries * sizeof(grpc_slice_hash_table_entry));
size_t idx = 0;
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
@@ -230,7 +232,7 @@ void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
- char* buf = gpr_malloc(len + 2); // '*' and NUL
+ char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 650b0559aa..caa11a956e 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -72,7 +72,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
cope with.
Throw this over to the executor (on a core-owned thread) and process it
there. */
- refcount->destroy.scheduler = grpc_executor_scheduler;
+ refcount->destroy.scheduler =
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
@@ -242,13 +243,13 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_op *op = arg;
+ made_transport_op *op = (made_transport_op *)arg;
GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
- made_transport_op *op = gpr_malloc(sizeof(*op));
+ made_transport_op *op = (made_transport_op *)gpr_malloc(sizeof(*op));
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
@@ -266,7 +267,7 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_stream_op *op = arg;
+ made_transport_stream_op *op = (made_transport_stream_op *)arg;
grpc_closure *c = op->inner_on_complete;
gpr_free(op);
GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
@@ -274,7 +275,8 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
- made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
+ made_transport_stream_op *op =
+ (made_transport_stream_op *)gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload;
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);