diff options
Diffstat (limited to 'src/core')
53 files changed, 1731 insertions, 1021 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index b80d831557..fc29dbd454 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -138,7 +138,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx, static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *d = elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */ @@ -160,7 +160,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx, static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *d = elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index bf64f84772..960d00e815 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -71,7 +71,8 @@ */ typedef enum { - WAIT_FOR_READY_UNSET, + /* zero so it can be default initialized */ + WAIT_FOR_READY_UNSET = 0, WAIT_FOR_READY_FALSE, WAIT_FOR_READY_TRUE } wait_for_ready_value; @@ -631,7 +632,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, #define CANCELLED_CALL ((grpc_subchannel_call *)1) typedef enum { - GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING, + /* zero so that it can be default-initialized */ + GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0, GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL } subchannel_creation_phase; @@ -653,13 +655,13 @@ typedef struct client_channel_call_data { gpr_timespec call_start_time; gpr_timespec deadline; method_parameters *method_params; - grpc_closure read_service_config; grpc_error *cancel_error; /** either 0 for no call, 1 for cancelled, or a pointer to a grpc_subchannel_call */ gpr_atm subchannel_call; + gpr_arena *arena; subchannel_creation_phase creation_phase; grpc_connected_subchannel *connected_subchannel; @@ -726,6 +728,47 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { gpr_free(ops); } +// Sets calld->method_params. +// If the method params specify a timeout, populates +// *per_method_deadline and returns true. +static bool set_call_method_params_from_service_config_locked( + grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + gpr_timespec *per_method_deadline) { + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + if (chand->method_params_table != NULL) { + calld->method_params = grpc_method_config_table_get( + exec_ctx, chand->method_params_table, calld->path); + if (calld->method_params != NULL) { + method_parameters_ref(calld->method_params); + if (gpr_time_cmp(calld->method_params->timeout, + gpr_time_0(GPR_TIMESPAN)) != 0) { + *per_method_deadline = + gpr_time_add(calld->call_start_time, calld->method_params->timeout); + return true; + } + } + } + return false; +} + +static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { + /* apply service-config level configuration to the call (now that we're + * certain it exists) */ + call_data *calld = elem->call_data; + gpr_timespec per_method_deadline; + if (set_call_method_params_from_service_config_locked(exec_ctx, elem, + &per_method_deadline)) { + // If the deadline from the service config is shorter than the one + // from the client API, reset the deadline timer. + if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) { + calld->deadline = per_method_deadline; + grpc_deadline_state_reset(exec_ctx, elem, calld->deadline); + } + } +} + static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = arg; @@ -754,9 +797,14 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, } else { /* Create call on subchannel. */ grpc_subchannel_call *subchannel_call = NULL; + const grpc_connected_subchannel_call_args call_args = { + .pollent = calld->pollent, + .path = calld->path, + .start_time = calld->call_start_time, + .deadline = calld->deadline, + .arena = calld->arena}; grpc_error *new_error = grpc_connected_subchannel_create_call( - exec_ctx, calld->connected_subchannel, calld->pollent, calld->path, - calld->call_start_time, calld->deadline, &subchannel_call); + exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); if (new_error != GRPC_ERROR_NONE) { new_error = grpc_error_add_child(new_error, error); subchannel_call = CANCELLED_CALL; @@ -851,6 +899,7 @@ static bool pick_subchannel_locked( } GPR_ASSERT(error == GRPC_ERROR_NONE); if (chand->lb_policy != NULL) { + apply_final_configuration_locked(exec_ctx, elem); grpc_lb_policy *lb_policy = chand->lb_policy; GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); // If the application explicitly set wait_for_ready, use that. @@ -982,9 +1031,14 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx, if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING && calld->connected_subchannel != NULL) { grpc_subchannel_call *subchannel_call = NULL; + const grpc_connected_subchannel_call_args call_args = { + .pollent = calld->pollent, + .path = calld->path, + .start_time = calld->call_start_time, + .deadline = calld->deadline, + .arena = calld->arena}; grpc_error *error = grpc_connected_subchannel_create_call( - exec_ctx, calld->connected_subchannel, calld->pollent, calld->path, - calld->call_start_time, calld->deadline, &subchannel_call); + exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); if (error != GRPC_ERROR_NONE) { subchannel_call = CANCELLED_CALL; fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error)); @@ -1060,114 +1114,19 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, GPR_TIMER_END("cc_start_transport_stream_op", 0); } -// Sets calld->method_params. -// If the method params specify a timeout, populates -// *per_method_deadline and returns true. -static bool set_call_method_params_from_service_config_locked( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - gpr_timespec *per_method_deadline) { - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - if (chand->method_params_table != NULL) { - calld->method_params = grpc_method_config_table_get( - exec_ctx, chand->method_params_table, calld->path); - if (calld->method_params != NULL) { - method_parameters_ref(calld->method_params); - if (gpr_time_cmp(calld->method_params->timeout, - gpr_time_0(GPR_TIMESPAN)) != 0) { - *per_method_deadline = - gpr_time_add(calld->call_start_time, calld->method_params->timeout); - return true; - } - } - } - return false; -} - -// Gets data from the service config. Invoked when the resolver returns -// its initial result. -static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = arg; - call_data *calld = elem->call_data; - // If this is an error, there's no point in looking at the service config. - if (error == GRPC_ERROR_NONE) { - gpr_timespec per_method_deadline; - if (set_call_method_params_from_service_config_locked( - exec_ctx, elem, &per_method_deadline)) { - // If the deadline from the service config is shorter than the one - // from the client API, reset the deadline timer. - if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) { - calld->deadline = per_method_deadline; - grpc_deadline_state_reset(exec_ctx, elem, calld->deadline); - } - } - } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config"); -} - -static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error_ignored) { - grpc_call_element *elem = arg; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - // If the resolver has already returned results, then we can access - // the service config parameters immediately. Otherwise, we need to - // defer that work until the resolver returns an initial result. - if (chand->lb_policy != NULL) { - // We already have a resolver result, so check for service config. - gpr_timespec per_method_deadline; - if (set_call_method_params_from_service_config_locked( - exec_ctx, elem, &per_method_deadline)) { - calld->deadline = gpr_time_min(calld->deadline, per_method_deadline); - } - } else { - // We don't yet have a resolver result, so register a callback to - // get the service config data once the resolver returns. - // Take a reference to the call stack to be owned by the callback. - GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); - grpc_closure_init(&calld->read_service_config, read_service_config_locked, - elem, grpc_combiner_scheduler(chand->combiner, false)); - grpc_closure_list_append(&chand->waiting_for_config_closures, - &calld->read_service_config, GRPC_ERROR_NONE); - } - // Start the deadline timer with the current deadline value. If we - // do not yet have service config data, then the timer may be reset - // later. - grpc_deadline_state_start(exec_ctx, elem, calld->deadline); - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, - "initial_read_service_config"); -} - /* Constructor for call_data */ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; // Initialize data members. grpc_deadline_state_init(exec_ctx, elem, args->call_stack); calld->path = grpc_slice_ref_internal(args->path); calld->call_start_time = args->start_time; calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); - calld->method_params = NULL; - calld->cancel_error = GRPC_ERROR_NONE; - gpr_atm_rel_store(&calld->subchannel_call, 0); - calld->connected_subchannel = NULL; - calld->waiting_ops = NULL; - calld->waiting_ops_count = 0; - calld->waiting_ops_capacity = 0; - calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; calld->owning_call = args->call_stack; - calld->pollent = NULL; - GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config"); - grpc_closure_sched( - exec_ctx, - grpc_closure_init(&calld->read_service_config, - initial_read_service_config_locked, elem, - grpc_combiner_scheduler(chand->combiner, false)), - GRPC_ERROR_NONE); + calld->arena = args->arena; + grpc_deadline_state_start(exec_ctx, elem, calld->deadline); return GRPC_ERROR_NONE; } @@ -1175,7 +1134,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *and_free_memory) { + grpc_closure *then_schedule_closure) { call_data *calld = elem->call_data; grpc_deadline_state_destroy(exec_ctx, elem); grpc_slice_unref_internal(exec_ctx, calld->path); @@ -1185,6 +1144,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(calld->cancel_error); grpc_subchannel_call *call = GET_CALL(calld); if (call != NULL && call != CANCELLED_CALL) { + grpc_subchannel_call_set_cleanup_closure(call, then_schedule_closure); + then_schedule_closure = NULL; GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); } GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING); @@ -1194,7 +1155,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, "picked"); } gpr_free(calld->waiting_ops); - gpr_free(and_free_memory); + grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); } static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, diff --git a/src/core/ext/client_channel/parse_address.c b/src/core/ext/client_channel/parse_address.c index 8ae15fc72b..cd1b2cd80c 100644 --- a/src/core/ext/client_channel/parse_address.c +++ b/src/core/ext/client_channel/parse_address.c @@ -128,6 +128,7 @@ int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) { GPR_ASSERT(host_end >= host); char host_without_scope[INET6_ADDRSTRLEN]; size_t host_without_scope_len = (size_t)(host_end - host); + uint32_t sin6_scope_id = 0; strncpy(host_without_scope, host, host_without_scope_len); host_without_scope[host_without_scope_len] = '\0'; if (inet_pton(AF_INET6, host_without_scope, &in6->sin6_addr) == 0) { @@ -136,10 +137,12 @@ int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) { } if (gpr_parse_bytes_to_uint32(host_end + 1, strlen(host) - host_without_scope_len - 1, - &in6->sin6_scope_id) == 0) { + &sin6_scope_id) == 0) { gpr_log(GPR_ERROR, "invalid ipv6 scope id: '%s'", host_end + 1); goto done; } + // Handle "sin6_scope_id" being type "u_long". See grpc issue ##10027. + in6->sin6_scope_id = sin6_scope_id; } else { if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) { gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host); diff --git a/src/core/ext/client_channel/proxy_mapper_registry.c b/src/core/ext/client_channel/proxy_mapper_registry.c index 2c44b9d490..0935ddbdbd 100644 --- a/src/core/ext/client_channel/proxy_mapper_registry.c +++ b/src/core/ext/client_channel/proxy_mapper_registry.c @@ -94,6 +94,14 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) { grpc_proxy_mapper_destroy(list->list[i]); } gpr_free(list->list); + // Clean up in case we re-initialze later. + // TODO(ctiller): This should ideally live in + // grpc_proxy_mapper_registry_init(). However, if we did this there, + // then we would do it AFTER we start registering proxy mappers from + // third-party plugins, so they'd never show up (and would leak memory). + // We probably need some sort of dependency system for plugins to fix + // this. + memset(list, 0, sizeof(*list)); } // @@ -102,9 +110,7 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) { static grpc_proxy_mapper_list g_proxy_mapper_list; -void grpc_proxy_mapper_registry_init() { - memset(&g_proxy_mapper_list, 0, sizeof(g_proxy_mapper_list)); -} +void grpc_proxy_mapper_registry_init() {} void grpc_proxy_mapper_registry_shutdown() { grpc_proxy_mapper_list_destroy(&g_proxy_mapper_list); diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c index 5df0a9060d..ed5029ea9a 100644 --- a/src/core/ext/client_channel/subchannel.c +++ b/src/core/ext/client_channel/subchannel.c @@ -148,6 +148,7 @@ struct grpc_subchannel { struct grpc_subchannel_call { grpc_connected_subchannel *connection; + grpc_closure *schedule_closure_after_destroy; }; #define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1)) @@ -340,17 +341,15 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx, GPR_ASSERT(new_address != NULL); gpr_free(addr); addr = new_address; - if (new_args != NULL) c->args = new_args; - } - if (c->args == NULL) { - static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS}; - grpc_arg new_arg = grpc_create_subchannel_address_arg(addr); - c->args = grpc_channel_args_copy_and_add_and_remove( - args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &new_arg, - 1); - gpr_free(new_arg.value.string); } + static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS}; + grpc_arg new_arg = grpc_create_subchannel_address_arg(addr); gpr_free(addr); + c->args = grpc_channel_args_copy_and_add_and_remove( + new_args != NULL ? new_args : args->args, keys_to_remove, + GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1); + gpr_free(new_arg.value.string); + if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args); c->root_external_state_watcher.next = c->root_external_state_watcher.prev = &c->root_external_state_watcher; grpc_closure_init(&c->connected, subchannel_connected, c, @@ -719,13 +718,22 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg, static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call, grpc_error *error) { grpc_subchannel_call *c = call; + GPR_ASSERT(c->schedule_closure_after_destroy != NULL); GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0); grpc_connected_subchannel *connection = c->connection; - grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, c); + grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, + c->schedule_closure_after_destroy); GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call"); GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0); } +void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call, + grpc_closure *closure) { + GPR_ASSERT(call->schedule_closure_after_destroy == NULL); + GPR_ASSERT(closure != NULL); + call->schedule_closure_after_destroy = closure; +} + void grpc_subchannel_call_ref( grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); @@ -761,15 +769,22 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel( grpc_error *grpc_connected_subchannel_create_call( grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con, - grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time, - gpr_timespec deadline, grpc_subchannel_call **call) { + const grpc_connected_subchannel_call_args *args, + grpc_subchannel_call **call) { grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con); - *call = gpr_zalloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size); + *call = gpr_arena_alloc( + args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size); grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); (*call)->connection = con; // Ref is added below. - grpc_error *error = - grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call, - NULL, NULL, path, start_time, deadline, callstk); + const grpc_call_element_args call_args = {.call_stack = callstk, + .server_transport_data = NULL, + .context = NULL, + .path = args->path, + .start_time = args->start_time, + .deadline = args->deadline, + .arena = args->arena}; + grpc_error *error = grpc_call_stack_init( + exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args); if (error != GRPC_ERROR_NONE) { const char *error_string = grpc_error_string(error); gpr_log(GPR_ERROR, "error: %s", error_string); @@ -778,7 +793,7 @@ grpc_error *grpc_connected_subchannel_create_call( return error; } GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call"); - grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, pollent); + grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, args->pollent); return GRPC_ERROR_NONE; } diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h index 6a70a76467..3e64a2507c 100644 --- a/src/core/ext/client_channel/subchannel.h +++ b/src/core/ext/client_channel/subchannel.h @@ -37,6 +37,7 @@ #include "src/core/ext/client_channel/connector.h" #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/iomgr/polling_entity.h" +#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/metadata.h" @@ -112,10 +113,18 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, GRPC_SUBCHANNEL_REF_EXTRA_ARGS); /** construct a subchannel call */ +typedef struct { + grpc_polling_entity *pollent; + grpc_slice path; + gpr_timespec start_time; + gpr_timespec deadline; + gpr_arena *arena; +} grpc_connected_subchannel_call_args; + grpc_error *grpc_connected_subchannel_create_call( grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel, - grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time, - gpr_timespec deadline, grpc_subchannel_call **subchannel_call); + const grpc_connected_subchannel_call_args *args, + grpc_subchannel_call **subchannel_call); /** process a transport level op */ void grpc_connected_subchannel_process_transport_op( @@ -154,6 +163,11 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *subchannel_call); +/** Must be called once per call. Sets the 'then_schedule_closure' argument for + call stack destruction. */ +void grpc_subchannel_call_set_cleanup_closure( + grpc_subchannel_call *subchannel_call, grpc_closure *closure); + grpc_call_stack *grpc_subchannel_call_get_call_stack( grpc_subchannel_call *subchannel_call); diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c index c2750634a5..4ed832671d 100644 --- a/src/core/ext/load_reporting/load_reporting_filter.c +++ b/src/core/ext/load_reporting/load_reporting_filter.c @@ -123,7 +123,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *calld = elem->call_data; /* TODO(dgq): do something with the data diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index da4c7dc7b2..082078c72f 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -511,6 +511,10 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_error *error) { if (!t->closed) { + if (!grpc_error_has_clear_grpc_status(error)) { + error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); + } if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) { if (t->close_transport_on_writes_finished == NULL) { t->close_transport_on_writes_finished = @@ -520,10 +524,6 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, grpc_error_add_child(t->close_transport_on_writes_finished, error); return; } - if (!grpc_error_has_clear_grpc_status(error)) { - error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAVAILABLE); - } t->closed = 1; connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), "close_transport"); @@ -575,7 +575,7 @@ void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) { static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_stream *gs, grpc_stream_refcount *refcount, - const void *server_data) { + const void *server_data, gpr_arena *arena) { GPR_TIMER_BEGIN("init_stream", 0); grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; @@ -588,8 +588,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, gpr_ref_init(&s->active_streams, 1); GRPC_CHTTP2_STREAM_REF(s, "chttp2"); - grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]); - grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]); + grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena); + grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena); grpc_chttp2_data_parser_init(&s->data_parser); grpc_slice_buffer_init(&s->flow_controlled_buffer); s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); @@ -665,16 +665,17 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, GPR_TIMER_END("destroy_stream", 0); - gpr_free(s->destroy_stream_arg); + grpc_closure_sched(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE); } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, void *and_free_memory) { + grpc_stream *gs, + grpc_closure *then_schedule_closure) { GPR_TIMER_BEGIN("destroy_stream", 0); grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; - s->destroy_stream_arg = and_free_memory; + s->destroy_stream_arg = then_schedule_closure; grpc_closure_sched( exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s, grpc_combiner_scheduler(t->combiner, false)), @@ -1629,15 +1630,19 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, s->recv_trailing_metadata_finished != NULL) { char status_string[GPR_LTOA_MIN_BUFSIZE]; gpr_ltoa(status, status_string); - grpc_chttp2_incoming_metadata_buffer_replace_or_add( - exec_ctx, &s->metadata_buffer[1], - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS, - grpc_slice_from_copied_string(status_string))); + GRPC_LOG_IF_ERROR("add_status", + grpc_chttp2_incoming_metadata_buffer_replace_or_add( + exec_ctx, &s->metadata_buffer[1], + grpc_mdelem_from_slices( + exec_ctx, GRPC_MDSTR_GRPC_STATUS, + grpc_slice_from_copied_string(status_string)))); if (msg != NULL) { - grpc_chttp2_incoming_metadata_buffer_replace_or_add( - exec_ctx, &s->metadata_buffer[1], - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, - grpc_slice_from_copied_string(msg))); + GRPC_LOG_IF_ERROR( + "add_status_message", + grpc_chttp2_incoming_metadata_buffer_replace_or_add( + exec_ctx, &s->metadata_buffer[1], + grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + grpc_slice_from_copied_string(msg)))); } s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE; grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c index f487533c41..9b4b1a7b84 100644 --- a/src/core/ext/transport/chttp2/transport/frame_ping.c +++ b/src/core/ext/transport/chttp2/transport/frame_ping.c @@ -91,7 +91,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_ping_parser *p = parser; while (p->byte != 8 && cur != end) { - p->opaque_8bytes |= (((uint64_t)*cur) << (8 * p->byte)); + p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte)); cur++; p->byte++; } diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c index 40f5120308..1865b997b7 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c @@ -1620,13 +1620,18 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx, grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, grpc_slice slice) { - /* TODO(ctiller): limit the distance of end from beg, and perform multiple - steps in the event of a large chunk of data to limit - stack space usage when no tail call optimization is - available */ +/* max number of bytes to parse at a time... limits call stack depth on + * compilers without TCO */ +#define MAX_PARSE_LENGTH 1024 p->current_slice_refcount = slice.refcount; - grpc_error *error = p->state(exec_ctx, p, GRPC_SLICE_START_PTR(slice), - GRPC_SLICE_END_PTR(slice)); + uint8_t *start = GRPC_SLICE_START_PTR(slice); + uint8_t *end = GRPC_SLICE_END_PTR(slice); + grpc_error *error = GRPC_ERROR_NONE; + while (start != end && error == GRPC_ERROR_NONE) { + uint8_t *target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start); + error = p->state(exec_ctx, p, start, target); + start = target; + } p->current_slice_refcount = NULL; return error; } diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c index c91b019aa0..da0a34d32a 100644 --- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c +++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c @@ -41,69 +41,48 @@ #include <grpc/support/log.h> void grpc_chttp2_incoming_metadata_buffer_init( - grpc_chttp2_incoming_metadata_buffer *buffer) { - buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) { + buffer->arena = arena; + grpc_metadata_batch_init(&buffer->batch); + buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); } void grpc_chttp2_incoming_metadata_buffer_destroy( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer) { - size_t i; - if (!buffer->published) { - for (i = 0; i < buffer->count; i++) { - GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md); - } - } - gpr_free(buffer->elems); + grpc_metadata_batch_destroy(exec_ctx, &buffer->batch); } -void grpc_chttp2_incoming_metadata_buffer_add( - grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) { - GPR_ASSERT(!buffer->published); - if (buffer->capacity == buffer->count) { - buffer->capacity = GPR_MAX(8, 2 * buffer->capacity); - buffer->elems = - gpr_realloc(buffer->elems, sizeof(*buffer->elems) * buffer->capacity); - } - buffer->elems[buffer->count++].md = elem; +grpc_error *grpc_chttp2_incoming_metadata_buffer_add( + grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, + grpc_mdelem elem) { buffer->size += GRPC_MDELEM_LENGTH(elem); + return grpc_metadata_batch_add_tail( + exec_ctx, &buffer->batch, + gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem); } -void grpc_chttp2_incoming_metadata_buffer_replace_or_add( +grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) { - for (size_t i = 0; i < buffer->count; i++) { - if (grpc_slice_eq(GRPC_MDKEY(buffer->elems[i].md), GRPC_MDKEY(elem))) { - GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md); - buffer->elems[i].md = elem; - return; + for (grpc_linked_mdelem *l = buffer->batch.list.head; l != NULL; + l = l->next) { + if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) { + GRPC_MDELEM_UNREF(exec_ctx, l->md); + l->md = elem; + return GRPC_ERROR_NONE; } } - grpc_chttp2_incoming_metadata_buffer_add(buffer, elem); + return grpc_chttp2_incoming_metadata_buffer_add(exec_ctx, buffer, elem); } void grpc_chttp2_incoming_metadata_buffer_set_deadline( grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) { - GPR_ASSERT(!buffer->published); - buffer->deadline = deadline; + buffer->batch.deadline = deadline; } void grpc_chttp2_incoming_metadata_buffer_publish( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) { - GPR_ASSERT(!buffer->published); - buffer->published = 1; - if (buffer->count > 0) { - size_t i; - for (i = 0; i < buffer->count; i++) { - /* TODO(ctiller): do something better here */ - if (!GRPC_LOG_IF_ERROR("grpc_chttp2_incoming_metadata_buffer_publish", - grpc_metadata_batch_link_tail( - exec_ctx, batch, &buffer->elems[i]))) { - GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md); - } - } - } else { - batch->list.head = batch->list.tail = NULL; - } - batch->deadline = buffer->deadline; + *batch = buffer->batch; + grpc_metadata_batch_init(&buffer->batch); } diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h index 1eac6fc150..288c917e65 100644 --- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h +++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h @@ -37,28 +37,26 @@ #include "src/core/lib/transport/transport.h" typedef struct { - grpc_linked_mdelem *elems; - size_t count; - size_t capacity; - gpr_timespec deadline; - int published; + gpr_arena *arena; + grpc_metadata_batch batch; size_t size; // total size of metadata } grpc_chttp2_incoming_metadata_buffer; /** assumes everything initially zeroed */ void grpc_chttp2_incoming_metadata_buffer_init( - grpc_chttp2_incoming_metadata_buffer *buffer); + grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena); void grpc_chttp2_incoming_metadata_buffer_destroy( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer); void grpc_chttp2_incoming_metadata_buffer_publish( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch); -void grpc_chttp2_incoming_metadata_buffer_add( - grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem); -void grpc_chttp2_incoming_metadata_buffer_replace_or_add( +grpc_error *grpc_chttp2_incoming_metadata_buffer_add( grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, - grpc_mdelem elem); + grpc_mdelem elem) GRPC_MUST_USE_RESULT; +grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( + grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, + grpc_mdelem elem) GRPC_MUST_USE_RESULT; void grpc_chttp2_incoming_metadata_buffer_set_deadline( grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline); diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index d26812ad6b..3c56c21599 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -425,7 +425,7 @@ struct grpc_chttp2_stream { grpc_stream_refcount *refcount; grpc_closure destroy_stream; - void *destroy_stream_arg; + grpc_closure *destroy_stream_arg; grpc_chttp2_stream_link links[STREAM_LIST_COUNT]; uint8_t included[STREAM_LIST_COUNT]; diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 7ed00522c3..7efc8c63c9 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -381,16 +381,38 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx, s->incoming_window_delta + t->settings[GRPC_ACKED_SETTINGS] [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]) { - char *msg; - gpr_asprintf(&msg, - "frame of size %d overflows incoming window of %" PRId64, - t->incoming_frame_size, - s->incoming_window_delta + - t->settings[GRPC_ACKED_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); - grpc_error *err = GRPC_ERROR_CREATE(msg); - gpr_free(msg); - return err; + if (incoming_frame_size <= + s->incoming_window_delta + + t->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]) { + gpr_log( + GPR_ERROR, + "Incoming frame of size %d exceeds incoming window size of %" PRId64 + ".\n" + "The (un-acked, future) window size would be %" PRId64 + " which is not exceeded.\n" + "This would usually cause a disconnection, but allowing it due to " + "broken HTTP2 implementations in the wild.\n" + "See (for example) https://github.com/netty/netty/issues/6520.", + t->incoming_frame_size, + s->incoming_window_delta + + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], + s->incoming_window_delta + + t->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); + } else { + char *msg; + gpr_asprintf(&msg, + "frame of size %d overflows incoming window of %" PRId64, + t->incoming_frame_size, + s->incoming_window_delta + + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); + grpc_error *err = GRPC_ERROR_CREATE(msg); + gpr_free(msg); + return err; + } } GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s, @@ -526,7 +548,14 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp, s->seen_error = true; GRPC_MDELEM_UNREF(exec_ctx, md); } else { - grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md); + grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add( + exec_ctx, &s->metadata_buffer[0], md); + if (error != GRPC_ERROR_NONE) { + grpc_chttp2_cancel_stream(exec_ctx, t, s, error); + grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + s->seen_error = true; + GRPC_MDELEM_UNREF(exec_ctx, md); + } } } @@ -576,7 +605,14 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp, s->seen_error = true; GRPC_MDELEM_UNREF(exec_ctx, md); } else { - grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md); + grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add( + exec_ctx, &s->metadata_buffer[1], md); + if (error != GRPC_ERROR_NONE) { + grpc_chttp2_cancel_stream(exec_ctx, t, s, error); + grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + s->seen_error = true; + GRPC_MDELEM_UNREF(exec_ctx, md); + } } GPR_TIMER_END("on_trailing_header", 0); diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 01a03533da..450d9ab23a 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -54,6 +54,7 @@ #include "third_party/objective_c/Cronet/bidirectional_stream_c.h" #define GRPC_HEADER_SIZE_IN_BYTES 5 +#define GRPC_FLUSH_READ_SIZE 4096 #define CRONET_LOG(...) \ do { \ @@ -151,11 +152,17 @@ struct write_state { struct op_state { bool state_op_done[OP_NUM_OPS]; bool state_callback_received[OP_NUM_OPS]; + /* A non-zero gRPC status code has been seen */ bool fail_state; + /* Transport is discarding all buffered messages */ bool flush_read; bool flush_cronet_when_ready; bool pending_write_for_trailer; - bool unprocessed_send_message; + bool pending_send_message; + /* User requested RECV_TRAILING_METADATA */ + bool pending_recv_trailing_metadata; + /* Cronet has not issued a callback of a bidirectional read */ + bool pending_read_from_cronet; grpc_error *cancel_error; /* data structure for storing data coming from server */ struct read_state rs; @@ -177,6 +184,7 @@ struct op_storage { }; struct stream_obj { + gpr_arena *arena; struct op_and_state *oas; grpc_transport_stream_op *curr_op; grpc_cronet_transport *curr_ct; @@ -248,11 +256,35 @@ static const char *op_id_string(enum e_op_id i) { return "UNKNOWN"; } -static void free_read_buffer(stream_obj *s) { +static void null_and_maybe_free_read_buffer(stream_obj *s) { if (s->state.rs.read_buffer && s->state.rs.read_buffer != s->state.rs.grpc_header_bytes) { gpr_free(s->state.rs.read_buffer); - s->state.rs.read_buffer = NULL; + } + s->state.rs.read_buffer = NULL; +} + +static void maybe_flush_read(stream_obj *s) { + /* To enter flush read state (discarding all the buffered messages in + * transport layer), two conditions must be satisfied: 1) non-zero grpc status + * has been received, and 2) an op requesting the status code + * (RECV_TRAILING_METADATA) is issued by the user. (See + * doc/status_ordering.md) */ + /* Whenever the evaluation of any of the two condition is changed, we check + * whether we should enter the flush read state. */ + if (s->state.pending_recv_trailing_metadata && s->state.fail_state) { + if (!s->state.flush_read && !s->state.rs.read_stream_closed) { + CRONET_LOG(GPR_DEBUG, "%p: Flush read", s); + s->state.flush_read = true; + null_and_maybe_free_read_buffer(s); + s->state.rs.read_buffer = gpr_malloc(GRPC_FLUSH_READ_SIZE); + if (!s->state.pending_read_from_cronet) { + CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs); + bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, + GRPC_FLUSH_READ_SIZE); + s->state.pending_read_from_cronet = true; + } + } } } @@ -279,7 +311,11 @@ static void add_to_storage(struct stream_obj *s, grpc_transport_stream_op *op) { storage->head = new_op; storage->num_pending_ops++; if (op->send_message) { - s->state.unprocessed_send_message = true; + s->state.pending_send_message = true; + } + if (op->recv_trailing_metadata) { + s->state.pending_recv_trailing_metadata = true; + maybe_flush_read(s); } CRONET_LOG(GPR_DEBUG, "adding new op %p. %d in the queue.", new_op, storage->num_pending_ops); @@ -367,7 +403,7 @@ static void on_failed(bidirectional_stream *stream, int net_error) { gpr_free(s->state.ws.write_buffer); s->state.ws.write_buffer = NULL; } - free_read_buffer(s); + null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); } @@ -390,7 +426,7 @@ static void on_canceled(bidirectional_stream *stream) { gpr_free(s->state.ws.write_buffer); s->state.ws.write_buffer = NULL; } - free_read_buffer(s); + null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); } @@ -405,7 +441,7 @@ static void on_succeeded(bidirectional_stream *stream) { bidirectional_stream_destroy(s->cbs); s->state.state_callback_received[OP_SUCCEEDED] = true; s->cbs = NULL; - free_read_buffer(s); + null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); } @@ -451,15 +487,18 @@ static void on_response_headers_received( gpr_mu_lock(&s->mu); memset(&s->state.rs.initial_metadata, 0, sizeof(s->state.rs.initial_metadata)); - grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata); + grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata, + s->arena); for (size_t i = 0; i < headers->count; i++) { - grpc_chttp2_incoming_metadata_buffer_add( - &s->state.rs.initial_metadata, - grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string( - headers->headers[i].key)), - grpc_slice_intern( - grpc_slice_from_static_string(headers->headers[i].value)))); + GRPC_LOG_IF_ERROR( + "on_response_headers_received", + grpc_chttp2_incoming_metadata_buffer_add( + &exec_ctx, &s->state.rs.initial_metadata, + grpc_mdelem_from_slices( + &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string( + headers->headers[i].key)), + grpc_slice_intern(grpc_slice_from_static_string( + headers->headers[i].value))))); } s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true; if (!(s->state.state_op_done[OP_CANCEL_ERROR] || @@ -473,6 +512,7 @@ static void on_response_headers_received( CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs); bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, s->state.rs.remaining_bytes); + s->state.pending_read_from_cronet = true; } gpr_mu_unlock(&s->mu); grpc_exec_ctx_finish(&exec_ctx); @@ -504,10 +544,13 @@ static void on_read_completed(bidirectional_stream *stream, char *data, CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data, count); gpr_mu_lock(&s->mu); + s->state.pending_read_from_cronet = false; s->state.state_callback_received[OP_RECV_MESSAGE] = true; if (count > 0 && s->state.flush_read) { CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs); - bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096); + bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, + GRPC_FLUSH_READ_SIZE); + s->state.pending_read_from_cronet = true; gpr_mu_unlock(&s->mu); } else if (count > 0) { s->state.rs.received_bytes += count; @@ -518,16 +561,14 @@ static void on_read_completed(bidirectional_stream *stream, char *data, bidirectional_stream_read( s->cbs, s->state.rs.read_buffer + s->state.rs.received_bytes, s->state.rs.remaining_bytes); + s->state.pending_read_from_cronet = true; gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); execute_from_storage(s); } } else { - if (s->state.flush_read) { - gpr_free(s->state.rs.read_buffer); - s->state.rs.read_buffer = NULL; - } + null_and_maybe_free_read_buffer(s); s->state.rs.read_stream_closed = true; gpr_mu_unlock(&s->mu); execute_from_storage(s); @@ -549,21 +590,25 @@ static void on_response_trailers_received( memset(&s->state.rs.trailing_metadata, 0, sizeof(s->state.rs.trailing_metadata)); s->state.rs.trailing_metadata_valid = false; - grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata); + grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata, + s->arena); for (size_t i = 0; i < trailers->count; i++) { CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key, trailers->headers[i].value); - grpc_chttp2_incoming_metadata_buffer_add( - &s->state.rs.trailing_metadata, - grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string( - trailers->headers[i].key)), - grpc_slice_intern( - grpc_slice_from_static_string(trailers->headers[i].value)))); + GRPC_LOG_IF_ERROR( + "on_response_trailers_received", + grpc_chttp2_incoming_metadata_buffer_add( + &exec_ctx, &s->state.rs.trailing_metadata, + grpc_mdelem_from_slices( + &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string( + trailers->headers[i].key)), + grpc_slice_intern(grpc_slice_from_static_string( + trailers->headers[i].value))))); s->state.rs.trailing_metadata_valid = true; if (0 == strcmp(trailers->headers[i].key, "grpc-status") && 0 != strcmp(trailers->headers[i].value, "0")) { s->state.fail_state = true; + maybe_flush_read(s); } } s->state.state_callback_received[OP_RECV_TRAILING_METADATA] = true; @@ -778,7 +823,7 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op, else if (!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA]) result = false; /* we haven't sent message yet */ - else if (stream_state->unprocessed_send_message && + else if (stream_state->pending_send_message && !stream_state->state_op_done[OP_SEND_MESSAGE]) result = false; /* we haven't got on_write_completed for the send yet */ @@ -900,7 +945,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, } else if (stream_op->send_message && op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas); - stream_state->unprocessed_send_message = false; + stream_state->pending_send_message = false; if (stream_state->state_callback_received[OP_FAILED]) { result = NO_ACTION_POSSIBLE; CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed."); @@ -1009,6 +1054,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; result = ACTION_TAKEN_NO_CALLBACK; + } else if (stream_state->flush_read) { + CRONET_LOG(GPR_DEBUG, "flush read"); + grpc_closure_sched(exec_ctx, stream_op->recv_message_ready, + GRPC_ERROR_NONE); + stream_state->state_op_done[OP_RECV_MESSAGE] = true; + oas->state.state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; } else if (stream_state->rs.length_field_received == false) { if (stream_state->rs.received_bytes == GRPC_HEADER_SIZE_IN_BYTES && stream_state->rs.remaining_bytes == 0) { @@ -1029,6 +1081,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, true; /* Indicates that at least one read request has been made */ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, stream_state->rs.remaining_bytes); + stream_state->pending_read_from_cronet = true; result = ACTION_TAKEN_WITH_CALLBACK; } else { stream_state->rs.remaining_bytes = 0; @@ -1047,11 +1100,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes; stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES; stream_state->rs.received_bytes = 0; + stream_state->rs.length_field_received = false; CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs); stream_state->state_op_done[OP_READ_REQ_MADE] = true; /* Indicates that at least one read request has been made */ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, stream_state->rs.remaining_bytes); + stream_state->pending_read_from_cronet = true; result = ACTION_TAKEN_NO_CALLBACK; } } else if (stream_state->rs.remaining_bytes == 0) { @@ -1064,6 +1119,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, true; /* Indicates that at least one read request has been made */ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, stream_state->rs.remaining_bytes); + stream_state->pending_read_from_cronet = true; result = ACTION_TAKEN_WITH_CALLBACK; } else { result = NO_ACTION_POSSIBLE; @@ -1075,7 +1131,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, uint8_t *dst_p = GRPC_SLICE_START_PTR(read_data_slice); memcpy(dst_p, stream_state->rs.read_buffer, (size_t)stream_state->rs.length_field); - free_read_buffer(s); + null_and_maybe_free_read_buffer(s); grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer); grpc_slice_buffer_add(&stream_state->rs.read_slice_buffer, read_data_slice); @@ -1096,6 +1152,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs); bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, stream_state->rs.remaining_bytes); + stream_state->pending_read_from_cronet = true; result = ACTION_TAKEN_NO_CALLBACK; } } else if (stream_op->recv_trailing_metadata && @@ -1153,15 +1210,6 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, make a note */ if (stream_op->recv_message) stream_state->state_op_done[OP_RECV_MESSAGE_AND_ON_COMPLETE] = true; - } else if (stream_state->fail_state && !stream_state->flush_read) { - CRONET_LOG(GPR_DEBUG, "running: %p flush read", oas); - if (stream_state->rs.read_buffer && - stream_state->rs.read_buffer != stream_state->rs.grpc_header_bytes) { - gpr_free(stream_state->rs.read_buffer); - stream_state->rs.read_buffer = NULL; - } - stream_state->rs.read_buffer = gpr_malloc(4096); - stream_state->flush_read = true; } else { result = NO_ACTION_POSSIBLE; } @@ -1174,7 +1222,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_stream *gs, grpc_stream_refcount *refcount, - const void *server_data) { + const void *server_data, gpr_arena *arena) { stream_obj *s = (stream_obj *)gs; memset(&s->storage, 0, sizeof(s->storage)); s->storage.head = NULL; @@ -1190,10 +1238,13 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, s->state.fail_state = s->state.flush_read = false; s->state.cancel_error = NULL; s->state.flush_cronet_when_ready = s->state.pending_write_for_trailer = false; - s->state.unprocessed_send_message = false; + s->state.pending_send_message = false; + s->state.pending_recv_trailing_metadata = false; + s->state.pending_read_from_cronet = false; s->curr_gs = gs; s->curr_ct = (grpc_cronet_transport *)gt; + s->arena = arena; gpr_mu_init(&s->mu); return 0; @@ -1209,38 +1260,33 @@ static void set_pollset_set_do_nothing(grpc_exec_ctx *exec_ctx, static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_stream *gs, grpc_transport_stream_op *op) { CRONET_LOG(GPR_DEBUG, "perform_stream_op"); - stream_obj *s = (stream_obj *)gs; - add_to_storage(s, op); if (op->send_initial_metadata && header_has_authority(op->send_initial_metadata->list.head)) { /* Cronet does not support :authority header field. We cancel the call when - this field is present in metadata */ - bidirectional_stream_header_array header_array; - bidirectional_stream_header *header; - bidirectional_stream cbs; - CRONET_LOG(GPR_DEBUG, - ":authority header is provided but not supported;" - " cancel operations"); - /* Notify application that operation is cancelled by forging trailers */ - header_array.count = 1; - header_array.capacity = 1; - header_array.headers = gpr_malloc(sizeof(bidirectional_stream_header)); - header = (bidirectional_stream_header *)header_array.headers; - header->key = "grpc-status"; - header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */ - cbs.annotation = (void *)s; - s->state.state_op_done[OP_CANCEL_ERROR] = true; - on_response_trailers_received(&cbs, &header_array); - gpr_free(header_array.headers); - } else { - execute_from_storage(s); + this field is present in metadata */ + if (op->recv_initial_metadata_ready) { + grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready, + GRPC_ERROR_CANCELLED); + } + if (op->recv_message_ready) { + grpc_closure_sched(exec_ctx, op->recv_message_ready, + GRPC_ERROR_CANCELLED); + } + grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED); + return; } + stream_obj *s = (stream_obj *)gs; + add_to_storage(s, op); + execute_from_storage(s); } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, void *and_free_memory) { + grpc_stream *gs, + grpc_closure *then_schedule_closure) { stream_obj *s = (stream_obj *)gs; + null_and_maybe_free_read_buffer(s); GRPC_ERROR_UNREF(s->state.cancel_error); + grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); } static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index 3fb2a60ac7..6d53b0576e 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -166,41 +166,32 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, } } -grpc_error *grpc_call_stack_init( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, - grpc_call_context_element *context, const void *transport_server_data, - grpc_slice path, gpr_timespec start_time, gpr_timespec deadline, - grpc_call_stack *call_stack) { +grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, + grpc_channel_stack *channel_stack, + int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, + const grpc_call_element_args *elem_args) { grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); size_t count = channel_stack->count; grpc_call_element *call_elems; char *user_data; size_t i; - call_stack->count = count; - GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy, + elem_args->call_stack->count = count; + GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy, destroy_arg, "CALL_STACK"); - call_elems = CALL_ELEMS_FROM_STACK(call_stack); + call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack); user_data = ((char *)call_elems) + ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element)); /* init per-filter data */ grpc_error *first_error = GRPC_ERROR_NONE; - const grpc_call_element_args args = { - .start_time = start_time, - .call_stack = call_stack, - .server_transport_data = transport_server_data, - .context = context, - .path = path, - .deadline = deadline, - }; for (i = 0; i < count; i++) { call_elems[i].filter = channel_elems[i].filter; call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].call_data = user_data; - grpc_error *error = - call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args); + grpc_error *error = call_elems[i].filter->init_call_elem( + exec_ctx, &call_elems[i], elem_args); if (error != GRPC_ERROR_NONE) { if (first_error == GRPC_ERROR_NONE) { first_error = error; @@ -241,15 +232,16 @@ void grpc_call_stack_ignore_set_pollset_or_pollset_set( void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, const grpc_call_final_info *final_info, - void *and_free_memory) { + grpc_closure *then_schedule_closure) { grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack); size_t count = stack->count; size_t i; /* destroy per-filter data */ for (i = 0; i < count; i++) { - elems[i].filter->destroy_call_elem(exec_ctx, &elems[i], final_info, - i == count - 1 ? and_free_memory : NULL); + elems[i].filter->destroy_call_elem( + exec_ctx, &elems[i], final_info, + i == count - 1 ? then_schedule_closure : NULL); } } diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 6d3340bcbf..80e3603e8d 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -56,6 +56,7 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/polling_entity.h" +#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/transport.h" #ifdef __cplusplus @@ -84,6 +85,7 @@ typedef struct { grpc_slice path; gpr_timespec start_time; gpr_timespec deadline; + gpr_arena *arena; } grpc_call_element_args; typedef struct { @@ -139,12 +141,12 @@ typedef struct { /* Destroy per call data. The filter does not need to do any chaining. The bottom filter of a stack will be passed a non-NULL pointer to - \a and_free_memory that should be passed to gpr_free when destruction - is complete. \a final_info contains data about the completed call, mainly - for reporting purposes. */ + \a then_schedule_closure that should be passed to grpc_closure_sched when + destruction is complete. \a final_info contains data about the completed + call, mainly for reporting purposes. */ void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *and_free_memory); + grpc_closure *then_schedule_closure); /* sizeof(per channel data) */ size_t sizeof_channel_data; @@ -236,12 +238,11 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, /* Initialize a call stack given a channel stack. transport_server_data is expected to be NULL on a client, or an opaque transport owned pointer on the server. */ -grpc_error *grpc_call_stack_init( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg, - grpc_call_context_element *context, const void *transport_server_data, - grpc_slice path, gpr_timespec start_time, gpr_timespec deadline, - grpc_call_stack *call_stack); +grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, + grpc_channel_stack *channel_stack, + int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, + const grpc_call_element_args *elem_args); /* Set a pollset or a pollset_set for a call stack: must occur before the first * op is started */ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, @@ -271,7 +272,7 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, /* Destroy a call stack */ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, const grpc_call_final_info *final_info, - void *and_free_memory); + grpc_closure *then_schedule_closure); /* Ignore set pollset{_set} - used by filters if they don't care about pollsets * at all. Does nothing. */ diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c index aa41014a21..02dc479f3a 100644 --- a/src/core/lib/channel/compress_filter.c +++ b/src/core/lib/channel/compress_filter.c @@ -292,7 +292,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c index 29796f7ca7..42ef7b7806 100644 --- a/src/core/lib/channel/connected_channel.c +++ b/src/core/lib/channel/connected_channel.c @@ -88,7 +88,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, channel_data *chand = elem->channel_data; int r = grpc_transport_init_stream( exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), - &args->call_stack->refcount, args->server_transport_data); + &args->call_stack->refcount, args->server_transport_data, args->arena); return r == 0 ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE("transport stream initialization failed"); } @@ -105,12 +105,12 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *and_free_memory) { + grpc_closure *then_schedule_closure) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; grpc_transport_destroy_stream(exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), - and_free_memory); + then_schedule_closure); } /* Constructor for channel_data */ diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c index 5a12d62f1d..34114bbebf 100644 --- a/src/core/lib/channel/deadline_filter.c +++ b/src/core/lib/channel/deadline_filter.c @@ -256,7 +256,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, // Destructor for call_data. Used for both client and server filters. static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_final_info* final_info, - void* and_free_memory) { + grpc_closure* ignored) { grpc_deadline_state_destroy(exec_ctx, elem); } diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c index c031533dd8..f9d0d689ac 100644 --- a/src/core/lib/channel/http_client_filter.c +++ b/src/core/lib/channel/http_client_filter.c @@ -412,7 +412,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *calld = elem->call_data; grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); } diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c index fb70de8e96..bebd3af335 100644 --- a/src/core/lib/channel/http_server_filter.c +++ b/src/core/lib/channel/http_server_filter.c @@ -358,7 +358,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *calld = elem->call_data; grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer); } diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c index b424c0d2ac..5ba13fe251 100644 --- a/src/core/lib/channel/message_size_filter.c +++ b/src/core/lib/channel/message_size_filter.c @@ -200,7 +200,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, // Destructor for call_data. static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_final_info* final_info, - void* ignored) {} + grpc_closure* ignored) {} // Constructor for channel_data. static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c index dbe5b139f9..1127fff756 100644 --- a/src/core/lib/iomgr/error.c +++ b/src/core/lib/iomgr/error.c @@ -35,6 +35,7 @@ #include <string.h> +#include <grpc/slice.h> #include <grpc/status.h> #include <grpc/support/alloc.h> #include <grpc/support/log.h> @@ -47,46 +48,7 @@ #include "src/core/lib/iomgr/error_internal.h" #include "src/core/lib/profiling/timers.h" - -static void destroy_integer(void *key) {} - -static void *copy_integer(void *key) { return key; } - -static long compare_integers(void *key1, void *key2) { - return GPR_ICMP((uintptr_t)key1, (uintptr_t)key2); -} - -static void destroy_string(void *str) { gpr_free(str); } - -static void *copy_string(void *str) { return gpr_strdup(str); } - -static void destroy_err(void *err) { GRPC_ERROR_UNREF(err); } - -static void *copy_err(void *err) { return GRPC_ERROR_REF(err); } - -static void destroy_time(void *tm) { gpr_free(tm); } - -static gpr_timespec *box_time(gpr_timespec tm) { - gpr_timespec *out = gpr_malloc(sizeof(*out)); - *out = tm; - return out; -} - -static void *copy_time(void *tm) { return box_time(*(gpr_timespec *)tm); } - -static const gpr_avl_vtable avl_vtable_ints = {destroy_integer, copy_integer, - compare_integers, - destroy_integer, copy_integer}; - -static const gpr_avl_vtable avl_vtable_strs = {destroy_integer, copy_integer, - compare_integers, destroy_string, - copy_string}; - -static const gpr_avl_vtable avl_vtable_times = { - destroy_integer, copy_integer, compare_integers, destroy_time, copy_time}; - -static const gpr_avl_vtable avl_vtable_errs = { - destroy_integer, copy_integer, compare_integers, destroy_err, copy_err}; +#include "src/core/lib/slice/slice_internal.h" static const char *error_int_name(grpc_error_ints key) { switch (key) { @@ -120,6 +82,8 @@ static const char *error_int_name(grpc_error_ints key) { return "limit"; case GRPC_ERROR_INT_OCCURRED_DURING_WRITE: return "occurred_during_write"; + case GRPC_ERROR_INT_MAX: + GPR_UNREACHABLE_CODE(return "unknown"); } GPR_UNREACHABLE_CODE(return "unknown"); } @@ -150,6 +114,8 @@ static const char *error_str_name(grpc_error_strs key) { return "filename"; case GRPC_ERROR_STR_QUEUED_BUFFERS: return "queued_buffers"; + case GRPC_ERROR_STR_MAX: + GPR_UNREACHABLE_CODE(return "unknown"); } GPR_UNREACHABLE_CODE(return "unknown"); } @@ -158,6 +124,8 @@ static const char *error_time_name(grpc_error_times key) { switch (key) { case GRPC_ERROR_TIME_CREATED: return "created"; + case GRPC_ERROR_TIME_MAX: + GPR_UNREACHABLE_CODE(return "unknown"); } GPR_UNREACHABLE_CODE(return "unknown"); } @@ -172,25 +140,51 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line, const char *func) { if (grpc_error_is_special(err)) return err; gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err, - err->refs.count, err->refs.count + 1, file, line, func); - gpr_ref(&err->refs); + gpr_atm_no_barrier_load(&err->atomics.refs.count), + gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line, + func); + gpr_ref(&err->atomics.refs); return err; } #else grpc_error *grpc_error_ref(grpc_error *err) { if (grpc_error_is_special(err)) return err; - gpr_ref(&err->refs); + gpr_ref(&err->atomics.refs); return err; } #endif +static void unref_errs(grpc_error *err) { + uint8_t slot = err->first_err; + while (slot != UINT8_MAX) { + grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + GRPC_ERROR_UNREF(lerr->err); + GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX + : lerr->next != UINT8_MAX); + slot = lerr->next; + } +} + +static void unref_slice(grpc_slice slice) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_slice_unref_internal(&exec_ctx, slice); + grpc_exec_ctx_finish(&exec_ctx); +} + +static void unref_strs(grpc_error *err) { + for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) { + uint8_t slot = err->strs[which]; + if (slot != UINT8_MAX) { + unref_slice(*(grpc_slice *)(err->arena + slot)); + } + } +} + static void error_destroy(grpc_error *err) { GPR_ASSERT(!grpc_error_is_special(err)); - gpr_avl_unref(err->ints); - gpr_avl_unref(err->strs); - gpr_avl_unref(err->errs); - gpr_avl_unref(err->times); - gpr_free((void *)gpr_atm_acq_load(&err->error_string)); + unref_errs(err); + unref_strs(err); + gpr_free((void *)gpr_atm_acq_load(&err->atomics.error_string)); gpr_free(err); } @@ -199,81 +193,209 @@ void grpc_error_unref(grpc_error *err, const char *file, int line, const char *func) { if (grpc_error_is_special(err)) return; gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err, - err->refs.count, err->refs.count - 1, file, line, func); - if (gpr_unref(&err->refs)) { + gpr_atm_no_barrier_load(&err->atomics.refs.count), + gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line, + func); + if (gpr_unref(&err->atomics.refs)) { error_destroy(err); } } #else void grpc_error_unref(grpc_error *err) { if (grpc_error_is_special(err)) return; - if (gpr_unref(&err->refs)) { + if (gpr_unref(&err->atomics.refs)) { error_destroy(err); } } #endif +static uint8_t get_placement(grpc_error **err, size_t size) { + GPR_ASSERT(*err); + uint8_t slots = (uint8_t)(size / sizeof(intptr_t)); + if ((*err)->arena_size + slots > (*err)->arena_capacity) { + (*err)->arena_capacity = (uint8_t)(3 * (*err)->arena_capacity / 2); + *err = gpr_realloc( + *err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t)); + } + uint8_t placement = (*err)->arena_size; + (*err)->arena_size = (uint8_t)((*err)->arena_size + slots); + return placement; +} + +static void internal_set_int(grpc_error **err, grpc_error_ints which, + intptr_t value) { + // GPR_ASSERT((*err)->ints[which] == UINT8_MAX); // TODO, enforce this + uint8_t slot = (*err)->ints[which]; + if (slot == UINT8_MAX) { + slot = get_placement(err, sizeof(value)); + } + (*err)->ints[which] = slot; + (*err)->arena[slot] = value; +} + +static void internal_set_str(grpc_error **err, grpc_error_strs which, + grpc_slice value) { + // GPR_ASSERT((*err)->strs[which] == UINT8_MAX); // TODO, enforce this + uint8_t slot = (*err)->strs[which]; + if (slot == UINT8_MAX) { + slot = get_placement(err, sizeof(value)); + } else { + unref_slice(*(grpc_slice *)((*err)->arena + slot)); + } + (*err)->strs[which] = slot; + memcpy((*err)->arena + slot, &value, sizeof(value)); +} + +static void internal_set_time(grpc_error **err, grpc_error_times which, + gpr_timespec value) { + // GPR_ASSERT((*err)->times[which] == UINT8_MAX); // TODO, enforce this + uint8_t slot = (*err)->times[which]; + if (slot == UINT8_MAX) { + slot = get_placement(err, sizeof(value)); + } + (*err)->times[which] = slot; + memcpy((*err)->arena + slot, &value, sizeof(value)); +} + +static void internal_add_error(grpc_error **err, grpc_error *new) { + grpc_linked_error new_last = {new, UINT8_MAX}; + uint8_t slot = get_placement(err, sizeof(grpc_linked_error)); + if ((*err)->first_err == UINT8_MAX) { + GPR_ASSERT((*err)->last_err == UINT8_MAX); + (*err)->last_err = slot; + (*err)->first_err = slot; + } else { + GPR_ASSERT((*err)->last_err != UINT8_MAX); + grpc_linked_error *old_last = + (grpc_linked_error *)((*err)->arena + (*err)->last_err); + old_last->next = slot; + (*err)->last_err = slot; + } + memcpy((*err)->arena + slot, &new_last, sizeof(grpc_linked_error)); +} + +#define SLOTS_PER_INT (sizeof(intptr_t) / sizeof(intptr_t)) +#define SLOTS_PER_STR (sizeof(grpc_slice) / sizeof(intptr_t)) +#define SLOTS_PER_TIME (sizeof(gpr_timespec) / sizeof(intptr_t)) +#define SLOTS_PER_LINKED_ERROR (sizeof(grpc_linked_error) / sizeof(intptr_t)) + +// size of storing one int and two slices and a timespec. For line, desc, file, +// and time created +#define DEFAULT_ERROR_CAPACITY \ + (SLOTS_PER_INT + (SLOTS_PER_STR * 2) + SLOTS_PER_TIME) + +// It is very common to include and extra int and string in an error +#define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME) + grpc_error *grpc_error_create(const char *file, int line, const char *desc, grpc_error **referencing, size_t num_referencing) { GPR_TIMER_BEGIN("grpc_error_create", 0); - grpc_error *err = gpr_malloc(sizeof(*err)); + uint8_t initial_arena_capacity = (uint8_t)( + DEFAULT_ERROR_CAPACITY + + (uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY); + grpc_error *err = + gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t)); if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL return GRPC_ERROR_OOM; } #ifdef GRPC_ERROR_REFCOUNT_DEBUG gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line); #endif - err->ints = gpr_avl_add(gpr_avl_create(&avl_vtable_ints), - (void *)(uintptr_t)GRPC_ERROR_INT_FILE_LINE, - (void *)(uintptr_t)line); - err->strs = gpr_avl_add( - gpr_avl_add(gpr_avl_create(&avl_vtable_strs), - (void *)(uintptr_t)GRPC_ERROR_STR_FILE, gpr_strdup(file)), - (void *)(uintptr_t)GRPC_ERROR_STR_DESCRIPTION, gpr_strdup(desc)); - err->errs = gpr_avl_create(&avl_vtable_errs); - err->next_err = 0; - for (size_t i = 0; i < num_referencing; i++) { + + err->arena_size = 0; + err->arena_capacity = initial_arena_capacity; + err->first_err = UINT8_MAX; + err->last_err = UINT8_MAX; + + memset(err->ints, UINT8_MAX, GRPC_ERROR_INT_MAX); + memset(err->strs, UINT8_MAX, GRPC_ERROR_STR_MAX); + memset(err->times, UINT8_MAX, GRPC_ERROR_TIME_MAX); + + internal_set_int(&err, GRPC_ERROR_INT_FILE_LINE, line); + internal_set_str(&err, GRPC_ERROR_STR_FILE, + grpc_slice_from_static_string(file)); + internal_set_str( + &err, GRPC_ERROR_STR_DESCRIPTION, + grpc_slice_from_copied_buffer( + desc, + strlen(desc) + + 1)); // TODO, pull this up. // TODO(ncteisen), pull this up. + + for (size_t i = 0; i < num_referencing; ++i) { if (referencing[i] == GRPC_ERROR_NONE) continue; - err->errs = gpr_avl_add(err->errs, (void *)(err->next_err++), - GRPC_ERROR_REF(referencing[i])); - } - err->times = gpr_avl_add(gpr_avl_create(&avl_vtable_times), - (void *)(uintptr_t)GRPC_ERROR_TIME_CREATED, - box_time(gpr_now(GPR_CLOCK_REALTIME))); - gpr_atm_no_barrier_store(&err->error_string, 0); - gpr_ref_init(&err->refs, 1); + internal_add_error( + &err, + GRPC_ERROR_REF( + referencing[i])); // TODO(ncteisen), change ownership semantics + } + + internal_set_time(&err, GRPC_ERROR_TIME_CREATED, gpr_now(GPR_CLOCK_REALTIME)); + + gpr_atm_no_barrier_store(&err->atomics.error_string, 0); + gpr_ref_init(&err->atomics.refs, 1); GPR_TIMER_END("grpc_error_create", 0); return err; } +static void ref_strs(grpc_error *err) { + for (size_t i = 0; i < GRPC_ERROR_STR_MAX; ++i) { + uint8_t slot = err->strs[i]; + if (slot != UINT8_MAX) { + grpc_slice_ref_internal(*(grpc_slice *)(err->arena + slot)); + } + } +} + +static void ref_errs(grpc_error *err) { + uint8_t slot = err->first_err; + while (slot != UINT8_MAX) { + grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + GRPC_ERROR_REF(lerr->err); + slot = lerr->next; + } +} + static grpc_error *copy_error_and_unref(grpc_error *in) { GPR_TIMER_BEGIN("copy_error_and_unref", 0); grpc_error *out; if (grpc_error_is_special(in)) { - if (in == GRPC_ERROR_NONE) - out = grpc_error_set_int(GRPC_ERROR_CREATE("no error"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK); - else if (in == GRPC_ERROR_OOM) - out = GRPC_ERROR_CREATE("oom"); - else if (in == GRPC_ERROR_CANCELLED) - out = - grpc_error_set_int(GRPC_ERROR_CREATE("cancelled"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED); - else - out = GRPC_ERROR_CREATE("unknown"); + out = GRPC_ERROR_CREATE("unknown"); + if (in == GRPC_ERROR_NONE) { + internal_set_str(&out, GRPC_ERROR_STR_DESCRIPTION, + grpc_slice_from_static_string("no error")); + internal_set_int(&out, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK); + } else if (in == GRPC_ERROR_OOM) { + internal_set_str(&out, GRPC_ERROR_STR_DESCRIPTION, + grpc_slice_from_static_string("oom")); + } else if (in == GRPC_ERROR_CANCELLED) { + internal_set_str(&out, GRPC_ERROR_STR_DESCRIPTION, + grpc_slice_from_static_string("cancelled")); + internal_set_int(&out, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED); + } + } else if (gpr_ref_is_unique(&in->atomics.refs)) { + out = in; } else { - out = gpr_malloc(sizeof(*out)); + uint8_t new_arena_capacity = in->arena_capacity; + // the returned err will be added to, so we ensure this is room to avoid + // unneeded allocations. + if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) { + new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2); + } + out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t)); #ifdef GRPC_ERROR_REFCOUNT_DEBUG gpr_log(GPR_DEBUG, "%p create copying %p", out, in); #endif - out->ints = gpr_avl_ref(in->ints); - out->strs = gpr_avl_ref(in->strs); - out->errs = gpr_avl_ref(in->errs); - out->times = gpr_avl_ref(in->times); - gpr_atm_no_barrier_store(&out->error_string, 0); - out->next_err = in->next_err; - gpr_ref_init(&out->refs, 1); + // bulk memcpy of the rest of the struct. + size_t skip = sizeof(&out->atomics); + memcpy((void *)((uintptr_t)out + skip), (void *)((uintptr_t)in + skip), + sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip); + // manually set the atomics and the new capacity + gpr_atm_no_barrier_store(&out->atomics.error_string, 0); + gpr_ref_init(&out->atomics.refs, 1); + out->arena_capacity = new_arena_capacity; + ref_strs(out); + ref_errs(out); GRPC_ERROR_UNREF(in); } GPR_TIMER_END("copy_error_and_unref", 0); @@ -284,7 +406,7 @@ grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, intptr_t value) { GPR_TIMER_BEGIN("grpc_error_set_int", 0); grpc_error *new = copy_error_and_unref(src); - new->ints = gpr_avl_add(new->ints, (void *)(uintptr_t)which, (void *)value); + internal_set_int(&new, which, value); GPR_TIMER_END("grpc_error_set_int", 0); return new; } @@ -302,7 +424,6 @@ static special_error_status_map error_status_map[] = { bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) { GPR_TIMER_BEGIN("grpc_error_get_int", 0); - void *pp; if (grpc_error_is_special(err)) { if (which == GRPC_ERROR_INT_GRPC_STATUS) { for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) { @@ -316,8 +437,9 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) { GPR_TIMER_END("grpc_error_get_int", 0); return false; } - if (gpr_avl_maybe_get(err->ints, (void *)(uintptr_t)which, &pp)) { - if (p != NULL) *p = (intptr_t)pp; + uint8_t slot = err->ints[which]; + if (slot != UINT8_MAX) { + if (p != NULL) *p = err->arena[slot]; GPR_TIMER_END("grpc_error_get_int", 0); return true; } @@ -329,8 +451,9 @@ grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, const char *value) { GPR_TIMER_BEGIN("grpc_error_set_str", 0); grpc_error *new = copy_error_and_unref(src); - new->strs = - gpr_avl_add(new->strs, (void *)(uintptr_t)which, gpr_strdup(value)); + internal_set_str(&new, which, + grpc_slice_from_copied_buffer( + value, strlen(value) + 1)); // TODO, pull this up. GPR_TIMER_END("grpc_error_set_str", 0); return new; } @@ -346,13 +469,19 @@ const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) { } return NULL; } - return gpr_avl_get(err->strs, (void *)(uintptr_t)which); + uint8_t slot = err->strs[which]; + if (slot != UINT8_MAX) { + return (const char *)GRPC_SLICE_START_PTR( + *(grpc_slice *)(err->arena + slot)); + } else { + return NULL; + } } grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) { GPR_TIMER_BEGIN("grpc_error_add_child", 0); grpc_error *new = copy_error_and_unref(src); - new->errs = gpr_avl_add(new->errs, (void *)(new->next_err++), child); + internal_add_error(&new, child); GPR_TIMER_END("grpc_error_add_child", 0); return new; } @@ -372,42 +501,6 @@ typedef struct { size_t cap_kvs; } kv_pairs; -static void append_kv(kv_pairs *kvs, char *key, char *value) { - if (kvs->num_kvs == kvs->cap_kvs) { - kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4); - kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs); - } - kvs->kvs[kvs->num_kvs].key = key; - kvs->kvs[kvs->num_kvs].value = value; - kvs->num_kvs++; -} - -static void collect_kvs(gpr_avl_node *node, char *key(void *k), - char *fmt(void *v), kv_pairs *kvs) { - if (node == NULL) return; - append_kv(kvs, key(node->key), fmt(node->value)); - collect_kvs(node->left, key, fmt, kvs); - collect_kvs(node->right, key, fmt, kvs); -} - -static char *key_int(void *p) { - return gpr_strdup(error_int_name((grpc_error_ints)(uintptr_t)p)); -} - -static char *key_str(void *p) { - return gpr_strdup(error_str_name((grpc_error_strs)(uintptr_t)p)); -} - -static char *key_time(void *p) { - return gpr_strdup(error_time_name((grpc_error_times)(uintptr_t)p)); -} - -static char *fmt_int(void *p) { - char *s; - gpr_asprintf(&s, "%" PRIdPTR, (intptr_t)p); - return s; -} - static void append_chr(char c, char **s, size_t *sz, size_t *cap) { if (*sz == *cap) { *cap = GPR_MAX(8, 3 * *cap / 2); @@ -459,6 +552,40 @@ static void append_esc_str(const char *str, char **s, size_t *sz, size_t *cap) { append_chr('"', s, sz, cap); } +static void append_kv(kv_pairs *kvs, char *key, char *value) { + if (kvs->num_kvs == kvs->cap_kvs) { + kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4); + kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs); + } + kvs->kvs[kvs->num_kvs].key = key; + kvs->kvs[kvs->num_kvs].value = value; + kvs->num_kvs++; +} + +static char *key_int(grpc_error_ints which) { + return gpr_strdup(error_int_name(which)); +} + +static char *fmt_int(intptr_t p) { + char *s; + gpr_asprintf(&s, "%" PRIdPTR, p); + return s; +} + +static void collect_ints_kvs(grpc_error *err, kv_pairs *kvs) { + for (size_t which = 0; which < GRPC_ERROR_INT_MAX; ++which) { + uint8_t slot = err->ints[which]; + if (slot != UINT8_MAX) { + append_kv(kvs, key_int((grpc_error_ints)which), + fmt_int(err->arena[slot])); + } + } +} + +static char *key_str(grpc_error_strs which) { + return gpr_strdup(error_str_name(which)); +} + static char *fmt_str(void *p) { char *s = NULL; size_t sz = 0; @@ -468,8 +595,22 @@ static char *fmt_str(void *p) { return s; } -static char *fmt_time(void *p) { - gpr_timespec tm = *(gpr_timespec *)p; +static void collect_strs_kvs(grpc_error *err, kv_pairs *kvs) { + for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) { + uint8_t slot = err->strs[which]; + if (slot != UINT8_MAX) { + append_kv( + kvs, key_str((grpc_error_strs)which), + fmt_str(GRPC_SLICE_START_PTR(*(grpc_slice *)(err->arena + slot)))); + } + } +} + +static char *key_time(grpc_error_times which) { + return gpr_strdup(error_time_name(which)); +} + +static char *fmt_time(gpr_timespec tm) { char *out; char *pfx = "!!"; switch (tm.clock_type) { @@ -490,24 +631,37 @@ static char *fmt_time(void *p) { return out; } -static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap, - bool *first) { - if (n == NULL) return; - add_errs(n->left, s, sz, cap, first); - if (!*first) append_chr(',', s, sz, cap); - *first = false; - const char *e = grpc_error_string(n->value); - append_str(e, s, sz, cap); - add_errs(n->right, s, sz, cap, first); +static void collect_times_kvs(grpc_error *err, kv_pairs *kvs) { + for (size_t which = 0; which < GRPC_ERROR_TIME_MAX; ++which) { + uint8_t slot = err->times[which]; + if (slot != UINT8_MAX) { + append_kv(kvs, key_time((grpc_error_times)which), + fmt_time(*(gpr_timespec *)(err->arena + slot))); + } + } +} + +static void add_errs(grpc_error *err, char **s, size_t *sz, size_t *cap) { + uint8_t slot = err->first_err; + bool first = true; + while (slot != UINT8_MAX) { + grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + if (!first) append_chr(',', s, sz, cap); + first = false; + const char *e = grpc_error_string(lerr->err); + append_str(e, s, sz, cap); + GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX + : lerr->next != UINT8_MAX); + slot = lerr->next; + } } static char *errs_string(grpc_error *err) { char *s = NULL; size_t sz = 0; size_t cap = 0; - bool first = true; append_chr('[', &s, &sz, &cap); - add_errs(err->errs.root, &s, &sz, &cap, &first); + add_errs(err, &s, &sz, &cap); append_chr(']', &s, &sz, &cap); append_chr(0, &s, &sz, &cap); return s; @@ -546,7 +700,7 @@ const char *grpc_error_string(grpc_error *err) { if (err == GRPC_ERROR_OOM) return oom_error_string; if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string; - void *p = (void *)gpr_atm_acq_load(&err->error_string); + void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string); if (p != NULL) { GPR_TIMER_END("grpc_error_string", 0); return p; @@ -555,10 +709,10 @@ const char *grpc_error_string(grpc_error *err) { kv_pairs kvs; memset(&kvs, 0, sizeof(kvs)); - collect_kvs(err->ints.root, key_int, fmt_int, &kvs); - collect_kvs(err->strs.root, key_str, fmt_str, &kvs); - collect_kvs(err->times.root, key_time, fmt_time, &kvs); - if (!gpr_avl_is_empty(err->errs)) { + collect_ints_kvs(err, &kvs); + collect_strs_kvs(err, &kvs); + collect_times_kvs(err, &kvs); + if (err->first_err != UINT8_MAX) { append_kv(&kvs, gpr_strdup("referenced_errors"), errs_string(err)); } @@ -566,9 +720,9 @@ const char *grpc_error_string(grpc_error *err) { char *out = finish_kvs(&kvs); - if (!gpr_atm_rel_cas(&err->error_string, 0, (gpr_atm)out)) { + if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) { gpr_free(out); - out = (char *)gpr_atm_no_barrier_load(&err->error_string); + out = (char *)gpr_atm_no_barrier_load(&err->atomics.error_string); } GPR_TIMER_END("grpc_error_string", 0); diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h index 2613512acb..eb953947ae 100644 --- a/src/core/lib/iomgr/error.h +++ b/src/core/lib/iomgr/error.h @@ -102,6 +102,9 @@ typedef enum { GRPC_ERROR_INT_LIMIT, /// chttp2: did the error occur while a write was in progress GRPC_ERROR_INT_OCCURRED_DURING_WRITE, + + /// Must always be last + GRPC_ERROR_INT_MAX, } grpc_error_ints; typedef enum { @@ -129,11 +132,17 @@ typedef enum { GRPC_ERROR_STR_KEY, /// value associated with the error GRPC_ERROR_STR_VALUE, + + /// Must always be last + GRPC_ERROR_STR_MAX, } grpc_error_strs; typedef enum { /// timestamp of error creation GRPC_ERROR_TIME_CREATED, + + /// Must always be last + GRPC_ERROR_TIME_MAX, } grpc_error_times; /// The following "special" errors can be propagated without allocating memory. @@ -184,8 +193,6 @@ void grpc_error_unref(grpc_error *err); grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, intptr_t value) GRPC_MUST_USE_RESULT; bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p); -grpc_error *grpc_error_set_time(grpc_error *src, grpc_error_times which, - gpr_timespec value) GRPC_MUST_USE_RESULT; grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, const char *value) GRPC_MUST_USE_RESULT; /// Returns NULL if the specified string is not set. diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h index 1c89ead4ed..7f204df1b2 100644 --- a/src/core/lib/iomgr/error_internal.h +++ b/src/core/lib/iomgr/error_internal.h @@ -35,18 +35,39 @@ #define GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H #include <inttypes.h> -#include <stdbool.h> +#include <stdbool.h> // TODO, do we need this? -#include <grpc/support/avl.h> +#include <grpc/support/sync.h> +typedef struct grpc_linked_error grpc_linked_error; + +struct grpc_linked_error { + grpc_error *err; + uint8_t next; +}; + +// c core representation of an error. See error.h for high level description of +// this object. struct grpc_error { - gpr_refcount refs; - gpr_avl ints; - gpr_avl strs; - gpr_avl times; - gpr_avl errs; - uintptr_t next_err; - gpr_atm error_string; + // All atomics in grpc_error must be stored in this nested struct. The rest of + // the object is memcpy-ed in bulk in copy_and_unref. + struct atomics { + gpr_refcount refs; + gpr_atm error_string; + } atomics; + // These arrays index into dynamic arena at the bottom of the struct. + // UINT8_MAX is used as a sentinel value. + uint8_t ints[GRPC_ERROR_INT_MAX]; + uint8_t strs[GRPC_ERROR_STR_MAX]; + uint8_t times[GRPC_ERROR_TIME_MAX]; + // The child errors are stored in the arena, but are effectively a linked list + // structure, since they are contained withing grpc_linked_error objects. + uint8_t first_err; + uint8_t last_err; + // The arena is dynamically reallocated with a grow factor of 1.5. + uint8_t arena_size; + uint8_t arena_capacity; + intptr_t arena[0]; }; bool grpc_error_is_special(grpc_error *err); diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index af33949c69..a2f81bcd78 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -39,6 +39,7 @@ #include <string.h> +#include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/sync.h> @@ -61,25 +62,30 @@ gpr_mu grpc_polling_mu; immediately in the next loop iteration. Note: In the future, if there is a bug that involves missing wakeups in the future, try adding a uv_async_t to kick the loop differently */ -uv_timer_t dummy_uv_handle; +uv_timer_t *dummy_uv_handle; size_t grpc_pollset_size() { return sizeof(grpc_pollset); } void dummy_timer_cb(uv_timer_t *handle) {} +void dummy_handle_close_cb(uv_handle_t *handle) { gpr_free(handle); } + void grpc_pollset_global_init(void) { gpr_mu_init(&grpc_polling_mu); - uv_timer_init(uv_default_loop(), &dummy_uv_handle); + dummy_uv_handle = gpr_malloc(sizeof(uv_timer_t)); + uv_timer_init(uv_default_loop(), dummy_uv_handle); grpc_pollset_work_run_loop = 1; } -static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } - void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); - uv_close((uv_handle_t *)&dummy_uv_handle, timer_close_cb); + uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb); } +static void timer_run_cb(uv_timer_t *timer) {} + +static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } + void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { *mu = &grpc_polling_mu; uv_timer_init(uv_default_loop(), &pollset->timer); @@ -95,7 +101,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, uv_run(uv_default_loop(), UV_RUN_NOWAIT); } else { // kick the loop once - uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0); + uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); } grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE); } @@ -111,8 +117,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) { } } -static void timer_run_cb(uv_timer_t *timer) {} - grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, gpr_timespec now, gpr_timespec deadline) { @@ -145,7 +149,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { - uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0); + uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); return GRPC_ERROR_NONE; } diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h index f1897bb91f..94a454c0b7 100644 --- a/src/core/lib/iomgr/port.h +++ b/src/core/lib/iomgr/port.h @@ -39,6 +39,7 @@ #if defined(GRPC_UV) // Do nothing #elif defined(GPR_MANYLINUX1) +#define GRPC_HAVE_IFADDRS 1 #define GRPC_HAVE_IPV6_RECVPKTINFO 1 #define GRPC_HAVE_IP_PKTINFO 1 #define GRPC_HAVE_MSG_NOSIGNAL 1 @@ -65,6 +66,7 @@ #define GRPC_POSIX_WAKEUP_FD 1 #define GRPC_TIMER_USE_GENERIC 1 #elif defined(GPR_LINUX) +#define GRPC_HAVE_IFADDRS 1 #define GRPC_HAVE_IPV6_RECVPKTINFO 1 #define GRPC_HAVE_IP_PKTINFO 1 #define GRPC_HAVE_MSG_NOSIGNAL 1 @@ -90,6 +92,7 @@ #define GRPC_POSIX_SOCKETUTILS #endif #elif defined(GPR_APPLE) +#define GRPC_HAVE_IFADDRS 1 #define GRPC_HAVE_SO_NOSIGPIPE 1 #define GRPC_HAVE_UNIX_SOCKET 1 #define GRPC_MSG_IOVLEN_TYPE int @@ -100,6 +103,7 @@ #define GRPC_POSIX_WAKEUP_FD 1 #define GRPC_TIMER_USE_GENERIC 1 #elif defined(GPR_FREEBSD) +#define GRPC_HAVE_IFADDRS 1 #define GRPC_HAVE_IPV6_RECVPKTINFO 1 #define GRPC_HAVE_SO_NOSIGPIPE 1 #define GRPC_HAVE_UNIX_SOCKET 1 diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index 79ff910738..4d715be94c 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -40,6 +40,7 @@ #include <grpc/support/host_port.h> #include <grpc/support/log.h> #include <grpc/support/string_util.h> +#include <grpc/support/useful.h> #include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/error.h" @@ -54,8 +55,36 @@ typedef struct request { grpc_closure *on_done; grpc_resolved_addresses **addresses; struct addrinfo *hints; + char *host; + char *port; } request; +static int retry_named_port_failure(int status, request *r, + uv_getaddrinfo_cb getaddrinfo_cb) { + if (status != 0) { + // This loop is copied from resolve_address_posix.c + char *svc[][2] = {{"http", "80"}, {"https", "443"}}; + for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) { + if (strcmp(r->port, svc[i][0]) == 0) { + int retry_status; + uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t)); + req->data = r; + retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb, + r->host, svc[i][1], r->hints); + if (retry_status < 0 || getaddrinfo_cb == NULL) { + // The callback will not be called + gpr_free(req); + } + return retry_status; + } + } + } + /* If this function calls uv_getaddrinfo, it will return that function's + return value. That function only returns numbers <=0, so we can safely + return 1 to indicate that we never retried */ + return 1; +} + static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result, grpc_resolved_addresses **addresses) { struct addrinfo *resp; @@ -97,13 +126,21 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, request *r = (request *)req->data; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_error *error; + int retry_status; + + gpr_free(req); + retry_status = retry_named_port_failure(status, r, getaddrinfo_callback); + if (retry_status == 0) { + // The request is being retried. Nothing should be done here + return; + } + /* Either no retry was attempted, or the retry failed. Either way, the + original error probably has more interesting information */ error = handle_addrinfo_result(status, res, r->addresses); grpc_closure_sched(&exec_ctx, r->on_done, error); grpc_exec_ctx_finish(&exec_ctx); - gpr_free(r->hints); gpr_free(r); - gpr_free(req); uv_freeaddrinfo(res); } @@ -143,6 +180,7 @@ static grpc_error *blocking_resolve_address_impl( uv_getaddrinfo_t req; int s; grpc_error *err; + int retry_status; req.addrinfo = NULL; @@ -158,6 +196,12 @@ static grpc_error *blocking_resolve_address_impl( hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */ s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints); + request r = { + .addresses = addresses, .hints = &hints, .host = host, .port = port}; + retry_status = retry_named_port_failure(s, &r, NULL); + if (retry_status <= 0) { + s = retry_status; + } err = handle_addrinfo_result(s, req.addrinfo, addresses); done: @@ -200,6 +244,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, r = gpr_malloc(sizeof(request)); r->on_done = on_done; r->addresses = addrs; + r->host = host; + r->port = port; req = gpr_malloc(sizeof(uv_getaddrinfo_t)); req->data = r; @@ -222,6 +268,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, gpr_free(r); gpr_free(req); gpr_free(hints); + gpr_free(host); + gpr_free(port); } } diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index ae66577caf..618483d9cb 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -76,7 +76,6 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", connect->addr_name, str); - grpc_error_free_string(str); } if (error == GRPC_ERROR_NONE) { /* error == NONE implies that the timer ran out, and wasn't cancelled. If diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 5f286a6723..e242631fc0 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -44,11 +44,8 @@ #include <errno.h> #include <fcntl.h> -#include <ifaddrs.h> -#include <limits.h> #include <netinet/in.h> #include <netinet/tcp.h> -#include <stdio.h> #include <string.h> #include <sys/socket.h> #include <sys/stat.h> @@ -67,82 +64,10 @@ #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/socket_utils_posix.h" #include "src/core/lib/iomgr/tcp_posix.h" +#include "src/core/lib/iomgr/tcp_server_utils_posix.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" #include "src/core/lib/support/string.h" -#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100 - -static gpr_once s_init_max_accept_queue_size; -static int s_max_accept_queue_size; - -/* one listening port */ -typedef struct grpc_tcp_listener grpc_tcp_listener; -struct grpc_tcp_listener { - int fd; - grpc_fd *emfd; - grpc_tcp_server *server; - grpc_resolved_address addr; - int port; - unsigned port_index; - unsigned fd_index; - grpc_closure read_closure; - grpc_closure destroyed_closure; - struct grpc_tcp_listener *next; - /* sibling is a linked list of all listeners for a given port. add_port and - clone_port place all new listeners in the same sibling list. A member of - the 'sibling' list is also a member of the 'next' list. The head of each - sibling list has is_sibling==0, and subsequent members of sibling lists - have is_sibling==1. is_sibling allows separate sibling lists to be - identified while iterating through 'next'. */ - struct grpc_tcp_listener *sibling; - int is_sibling; -}; - -/* the overall server */ -struct grpc_tcp_server { - gpr_refcount refs; - /* Called whenever accept() succeeds on a server port. */ - grpc_tcp_server_cb on_accept_cb; - void *on_accept_cb_arg; - - gpr_mu mu; - - /* active port count: how many ports are actually still listening */ - size_t active_ports; - /* destroyed port count: how many ports are completely destroyed */ - size_t destroyed_ports; - - /* is this server shutting down? */ - bool shutdown; - /* have listeners been shutdown? */ - bool shutdown_listeners; - /* use SO_REUSEPORT */ - bool so_reuseport; - /* expand wildcard addresses to a list of all local addresses */ - bool expand_wildcard_addrs; - - /* linked list of server ports */ - grpc_tcp_listener *head; - grpc_tcp_listener *tail; - unsigned nports; - - /* List of closures passed to shutdown_starting_add(). */ - grpc_closure_list shutdown_starting; - - /* shutdown callback */ - grpc_closure *shutdown_complete; - - /* all pollsets interested in new connections */ - grpc_pollset **pollsets; - /* number of pollsets in the pollsets array */ - size_t pollset_count; - - /* next pollset to assign a channel to */ - gpr_atm next_pollset_to_assign; - - grpc_resource_quota *resource_quota; -}; - static gpr_once check_init = GPR_ONCE_INIT; static bool has_so_reuseport = false; @@ -301,99 +226,6 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } } -/* get max listen queue size on linux */ -static void init_max_accept_queue_size(void) { - int n = SOMAXCONN; - char buf[64]; - FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r"); - if (fp == NULL) { - /* 2.4 kernel. */ - s_max_accept_queue_size = SOMAXCONN; - return; - } - if (fgets(buf, sizeof buf, fp)) { - char *end; - long i = strtol(buf, &end, 10); - if (i > 0 && i <= INT_MAX && end && *end == 0) { - n = (int)i; - } - } - fclose(fp); - s_max_accept_queue_size = n; - - if (s_max_accept_queue_size < MIN_SAFE_ACCEPT_QUEUE_SIZE) { - gpr_log(GPR_INFO, - "Suspiciously small accept queue (%d) will probably lead to " - "connection drops", - s_max_accept_queue_size); - } -} - -static int get_max_accept_queue_size(void) { - gpr_once_init(&s_init_max_accept_queue_size, init_max_accept_queue_size); - return s_max_accept_queue_size; -} - -/* Prepare a recently-created socket for listening. */ -static grpc_error *prepare_socket(int fd, const grpc_resolved_address *addr, - bool so_reuseport, int *port) { - grpc_resolved_address sockname_temp; - grpc_error *err = GRPC_ERROR_NONE; - - GPR_ASSERT(fd >= 0); - - if (so_reuseport && !grpc_is_unix_socket(addr)) { - err = grpc_set_socket_reuse_port(fd, 1); - if (err != GRPC_ERROR_NONE) goto error; - } - - err = grpc_set_socket_nonblocking(fd, 1); - if (err != GRPC_ERROR_NONE) goto error; - err = grpc_set_socket_cloexec(fd, 1); - if (err != GRPC_ERROR_NONE) goto error; - if (!grpc_is_unix_socket(addr)) { - err = grpc_set_socket_low_latency(fd, 1); - if (err != GRPC_ERROR_NONE) goto error; - err = grpc_set_socket_reuse_addr(fd, 1); - if (err != GRPC_ERROR_NONE) goto error; - } - err = grpc_set_socket_no_sigpipe_if_possible(fd); - if (err != GRPC_ERROR_NONE) goto error; - - GPR_ASSERT(addr->len < ~(socklen_t)0); - if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) { - err = GRPC_OS_ERROR(errno, "bind"); - goto error; - } - - if (listen(fd, get_max_accept_queue_size()) < 0) { - err = GRPC_OS_ERROR(errno, "listen"); - goto error; - } - - sockname_temp.len = sizeof(struct sockaddr_storage); - - if (getsockname(fd, (struct sockaddr *)sockname_temp.addr, - (socklen_t *)&sockname_temp.len) < 0) { - err = GRPC_OS_ERROR(errno, "getsockname"); - goto error; - } - - *port = grpc_sockaddr_get_port(&sockname_temp); - return GRPC_ERROR_NONE; - -error: - GPR_ASSERT(err != GRPC_ERROR_NONE); - if (fd >= 0) { - close(fd); - } - grpc_error *ret = grpc_error_set_int( - GRPC_ERROR_CREATE_REFERENCING("Unable to configure socket", &err, 1), - GRPC_ERROR_INT_FD, fd); - GRPC_ERROR_UNREF(err); - return ret; -} - /* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_tcp_listener *sp = arg; @@ -477,216 +309,6 @@ error: } } -static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, - const grpc_resolved_address *addr, - unsigned port_index, unsigned fd_index, - grpc_tcp_listener **listener) { - grpc_tcp_listener *sp = NULL; - int port = -1; - char *addr_str; - char *name; - - grpc_error *err = prepare_socket(fd, addr, s->so_reuseport, &port); - if (err == GRPC_ERROR_NONE) { - GPR_ASSERT(port > 0); - grpc_sockaddr_to_string(&addr_str, addr, 1); - gpr_asprintf(&name, "tcp-server-listener:%s", addr_str); - gpr_mu_lock(&s->mu); - s->nports++; - GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); - sp = gpr_malloc(sizeof(grpc_tcp_listener)); - sp->next = NULL; - if (s->head == NULL) { - s->head = sp; - } else { - s->tail->next = sp; - } - s->tail = sp; - sp->server = s; - sp->fd = fd; - sp->emfd = grpc_fd_create(fd, name); - memcpy(&sp->addr, addr, sizeof(grpc_resolved_address)); - sp->port = port; - sp->port_index = port_index; - sp->fd_index = fd_index; - sp->is_sibling = 0; - sp->sibling = NULL; - GPR_ASSERT(sp->emfd); - gpr_mu_unlock(&s->mu); - gpr_free(addr_str); - gpr_free(name); - } - - *listener = sp; - return err; -} - -/* If successful, add a listener to s for addr, set *dsmode for the socket, and - return the *listener. */ -static grpc_error *add_addr_to_server(grpc_tcp_server *s, - const grpc_resolved_address *addr, - unsigned port_index, unsigned fd_index, - grpc_dualstack_mode *dsmode, - grpc_tcp_listener **listener) { - grpc_resolved_address addr4_copy; - int fd; - grpc_error *err = - grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd); - if (err != GRPC_ERROR_NONE) { - return err; - } - if (*dsmode == GRPC_DSMODE_IPV4 && - grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { - addr = &addr4_copy; - } - return add_socket_to_server(s, fd, addr, port_index, fd_index, listener); -} - -/* Bind to "::" to get a port number not used by any address. */ -static grpc_error *get_unused_port(int *port) { - grpc_resolved_address wild; - grpc_sockaddr_make_wildcard6(0, &wild); - grpc_dualstack_mode dsmode; - int fd; - grpc_error *err = - grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd); - if (err != GRPC_ERROR_NONE) { - return err; - } - if (dsmode == GRPC_DSMODE_IPV4) { - grpc_sockaddr_make_wildcard4(0, &wild); - } - if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) { - err = GRPC_OS_ERROR(errno, "bind"); - close(fd); - return err; - } - if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) != - 0) { - err = GRPC_OS_ERROR(errno, "getsockname"); - close(fd); - return err; - } - close(fd); - *port = grpc_sockaddr_get_port(&wild); - return *port <= 0 ? GRPC_ERROR_CREATE("Bad port") : GRPC_ERROR_NONE; -} - -/* Return the listener in s with address addr or NULL. */ -static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s, - grpc_resolved_address *addr) { - grpc_tcp_listener *l; - gpr_mu_lock(&s->mu); - for (l = s->head; l != NULL; l = l->next) { - if (l->addr.len != addr->len) { - continue; - } - if (memcmp(l->addr.addr, addr->addr, addr->len) == 0) { - break; - } - } - gpr_mu_unlock(&s->mu); - return l; -} - -/* Get all addresses assigned to network interfaces on the machine and create a - listener for each. requested_port is the port to use for every listener, or 0 - to select one random port that will be used for every listener. Set *out_port - to the port selected. Return GRPC_ERROR_NONE only if all listeners were - added. */ -static grpc_error *add_all_local_addrs_to_server(grpc_tcp_server *s, - unsigned port_index, - int requested_port, - int *out_port) { - struct ifaddrs *ifa = NULL; - struct ifaddrs *ifa_it; - unsigned fd_index = 0; - grpc_tcp_listener *sp = NULL; - grpc_error *err = GRPC_ERROR_NONE; - if (requested_port == 0) { - /* Note: There could be a race where some local addrs can listen on the - selected port and some can't. The sane way to handle this would be to - retry by recreating the whole grpc_tcp_server. Backing out individual - listeners and orphaning the FDs looks like too much trouble. */ - if ((err = get_unused_port(&requested_port)) != GRPC_ERROR_NONE) { - return err; - } else if (requested_port <= 0) { - return GRPC_ERROR_CREATE("Bad get_unused_port()"); - } - gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port); - } - if (getifaddrs(&ifa) != 0 || ifa == NULL) { - return GRPC_OS_ERROR(errno, "getifaddrs"); - } - for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) { - grpc_resolved_address addr; - char *addr_str = NULL; - grpc_dualstack_mode dsmode; - grpc_tcp_listener *new_sp = NULL; - const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>"); - if (ifa_it->ifa_addr == NULL) { - continue; - } else if (ifa_it->ifa_addr->sa_family == AF_INET) { - addr.len = sizeof(struct sockaddr_in); - } else if (ifa_it->ifa_addr->sa_family == AF_INET6) { - addr.len = sizeof(struct sockaddr_in6); - } else { - continue; - } - memcpy(addr.addr, ifa_it->ifa_addr, addr.len); - if (!grpc_sockaddr_set_port(&addr, requested_port)) { - /* Should never happen, because we check sa_family above. */ - err = GRPC_ERROR_CREATE("Failed to set port"); - break; - } - if (grpc_sockaddr_to_string(&addr_str, &addr, 0) < 0) { - addr_str = gpr_strdup("<error>"); - } - gpr_log(GPR_DEBUG, - "Adding local addr from interface %s flags 0x%x to server: %s", - ifa_name, ifa_it->ifa_flags, addr_str); - /* We could have multiple interfaces with the same address (e.g., bonding), - so look for duplicates. */ - if (find_listener_with_addr(s, &addr) != NULL) { - gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str, - ifa_name); - gpr_free(addr_str); - continue; - } - if ((err = add_addr_to_server(s, &addr, port_index, fd_index, &dsmode, - &new_sp)) != GRPC_ERROR_NONE) { - char *err_str = NULL; - grpc_error *root_err; - if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) { - err_str = gpr_strdup("Failed to add listener"); - } - root_err = GRPC_ERROR_CREATE(err_str); - gpr_free(err_str); - gpr_free(addr_str); - err = grpc_error_add_child(root_err, err); - break; - } else { - GPR_ASSERT(requested_port == new_sp->port); - ++fd_index; - if (sp != NULL) { - new_sp->is_sibling = 1; - sp->sibling = new_sp; - } - sp = new_sp; - } - gpr_free(addr_str); - } - freeifaddrs(ifa); - if (err != GRPC_ERROR_NONE) { - return err; - } else if (sp == NULL) { - return GRPC_ERROR_CREATE("No local addresses"); - } else { - *out_port = sp->port; - return GRPC_ERROR_NONE; - } -} - /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, unsigned port_index, @@ -701,14 +323,16 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, grpc_error *v6_err = GRPC_ERROR_NONE; grpc_error *v4_err = GRPC_ERROR_NONE; *out_port = -1; - if (s->expand_wildcard_addrs) { - return add_all_local_addrs_to_server(s, port_index, requested_port, - out_port); + + if (grpc_tcp_server_have_ifaddrs() && s->expand_wildcard_addrs) { + return grpc_tcp_server_add_all_local_addrs(s, port_index, requested_port, + out_port); } + grpc_sockaddr_make_wildcards(requested_port, &wild4, &wild6); /* Try listening on IPv6 first. */ - if ((v6_err = add_addr_to_server(s, &wild6, port_index, fd_index, &dsmode, - &sp)) == GRPC_ERROR_NONE) { + if ((v6_err = grpc_tcp_server_add_addr(s, &wild6, port_index, fd_index, + &dsmode, &sp)) == GRPC_ERROR_NONE) { ++fd_index; requested_port = *out_port = sp->port; if (dsmode == GRPC_DSMODE_DUALSTACK || dsmode == GRPC_DSMODE_IPV4) { @@ -717,8 +341,8 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, } /* If we got a v6-only socket or nothing, try adding 0.0.0.0. */ grpc_sockaddr_set_port(&wild4, requested_port); - if ((v4_err = add_addr_to_server(s, &wild4, port_index, fd_index, &dsmode, - &sp2)) == GRPC_ERROR_NONE) { + if ((v4_err = grpc_tcp_server_add_addr(s, &wild4, port_index, fd_index, + &dsmode, &sp2)) == GRPC_ERROR_NONE) { *out_port = sp2->port; if (sp != NULL) { sp2->is_sibling = 1; @@ -726,8 +350,20 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, } } if (*out_port > 0) { - GRPC_LOG_IF_ERROR("Failed to add :: listener", v6_err); - GRPC_LOG_IF_ERROR("Failed to add 0.0.0.0 listener", v4_err); + if (v6_err != GRPC_ERROR_NONE) { + gpr_log(GPR_INFO, + "Failed to add :: listener, " + "the environment may not support IPv6: %s", + grpc_error_string(v6_err)); + GRPC_ERROR_UNREF(v6_err); + } + if (v4_err != GRPC_ERROR_NONE) { + gpr_log(GPR_INFO, + "Failed to add 0.0.0.0 listener, " + "the environment may not support IPv4: %s", + grpc_error_string(v4_err)); + GRPC_ERROR_UNREF(v4_err); + } return GRPC_ERROR_NONE; } else { grpc_error *root_err = @@ -756,7 +392,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode, &fd); if (err != GRPC_ERROR_NONE) return err; - err = prepare_socket(fd, &listener->addr, true, &port); + err = grpc_tcp_server_prepare_socket(fd, &listener->addr, true, &port); if (err != GRPC_ERROR_NONE) return err; listener->server->nports++; grpc_sockaddr_to_string(&addr_str, &listener->addr, 1); @@ -828,7 +464,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { addr = &addr6_v4mapped; } - if ((err = add_addr_to_server(s, addr, port_index, 0, &dsmode, &sp)) == + if ((err = grpc_tcp_server_add_addr(s, addr, port_index, 0, &dsmode, &sp)) == GRPC_ERROR_NONE) { *out_port = sp->port; } diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h new file mode 100644 index 0000000000..f5dc8532f9 --- /dev/null +++ b/src/core/lib/iomgr/tcp_server_utils_posix.h @@ -0,0 +1,134 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H +#define GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H + +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/socket_utils_posix.h" +#include "src/core/lib/iomgr/tcp_server.h" + +/* one listening port */ +typedef struct grpc_tcp_listener { + int fd; + grpc_fd *emfd; + grpc_tcp_server *server; + grpc_resolved_address addr; + int port; + unsigned port_index; + unsigned fd_index; + grpc_closure read_closure; + grpc_closure destroyed_closure; + struct grpc_tcp_listener *next; + /* sibling is a linked list of all listeners for a given port. add_port and + clone_port place all new listeners in the same sibling list. A member of + the 'sibling' list is also a member of the 'next' list. The head of each + sibling list has is_sibling==0, and subsequent members of sibling lists + have is_sibling==1. is_sibling allows separate sibling lists to be + identified while iterating through 'next'. */ + struct grpc_tcp_listener *sibling; + int is_sibling; +} grpc_tcp_listener; + +/* the overall server */ +struct grpc_tcp_server { + gpr_refcount refs; + /* Called whenever accept() succeeds on a server port. */ + grpc_tcp_server_cb on_accept_cb; + void *on_accept_cb_arg; + + gpr_mu mu; + + /* active port count: how many ports are actually still listening */ + size_t active_ports; + /* destroyed port count: how many ports are completely destroyed */ + size_t destroyed_ports; + + /* is this server shutting down? */ + bool shutdown; + /* have listeners been shutdown? */ + bool shutdown_listeners; + /* use SO_REUSEPORT */ + bool so_reuseport; + /* expand wildcard addresses to a list of all local addresses */ + bool expand_wildcard_addrs; + + /* linked list of server ports */ + grpc_tcp_listener *head; + grpc_tcp_listener *tail; + unsigned nports; + + /* List of closures passed to shutdown_starting_add(). */ + grpc_closure_list shutdown_starting; + + /* shutdown callback */ + grpc_closure *shutdown_complete; + + /* all pollsets interested in new connections */ + grpc_pollset **pollsets; + /* number of pollsets in the pollsets array */ + size_t pollset_count; + + /* next pollset to assign a channel to */ + gpr_atm next_pollset_to_assign; + + grpc_resource_quota *resource_quota; +}; + +/* If successful, add a listener to \a s for \a addr, set \a dsmode for the + socket, and return the \a listener. */ +grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s, + const grpc_resolved_address *addr, + unsigned port_index, unsigned fd_index, + grpc_dualstack_mode *dsmode, + grpc_tcp_listener **listener); + +/* Get all addresses assigned to network interfaces on the machine and create a + listener for each. requested_port is the port to use for every listener, or 0 + to select one random port that will be used for every listener. Set *out_port + to the port selected. Return GRPC_ERROR_NONE only if all listeners were + added. */ +grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, + unsigned port_index, + int requested_port, + int *out_port); + +/* Prepare a recently-created socket for listening. */ +grpc_error *grpc_tcp_server_prepare_socket(int fd, + const grpc_resolved_address *addr, + bool so_reuseport, int *port); +/* Ruturn true if the platform supports ifaddrs */ +bool grpc_tcp_server_have_ifaddrs(void); + +#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H */ diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/src/core/lib/iomgr/tcp_server_utils_posix_common.c new file mode 100644 index 0000000000..e45e27d5ab --- /dev/null +++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.c @@ -0,0 +1,220 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_HAVE_IFADDRS + +#include "src/core/lib/iomgr/tcp_server_utils_posix.h" + +#include <errno.h> +#include <limits.h> +#include <stdio.h> +#include <string.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/string_util.h> +#include <grpc/support/sync.h> + +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/unix_sockets_posix.h" + +#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100 + +static gpr_once s_init_max_accept_queue_size; +static int s_max_accept_queue_size; + +/* get max listen queue size on linux */ +static void init_max_accept_queue_size(void) { + int n = SOMAXCONN; + char buf[64]; + FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r"); + if (fp == NULL) { + /* 2.4 kernel. */ + s_max_accept_queue_size = SOMAXCONN; + return; + } + if (fgets(buf, sizeof buf, fp)) { + char *end; + long i = strtol(buf, &end, 10); + if (i > 0 && i <= INT_MAX && end && *end == 0) { + n = (int)i; + } + } + fclose(fp); + s_max_accept_queue_size = n; + + if (s_max_accept_queue_size < MIN_SAFE_ACCEPT_QUEUE_SIZE) { + gpr_log(GPR_INFO, + "Suspiciously small accept queue (%d) will probably lead to " + "connection drops", + s_max_accept_queue_size); + } +} + +static int get_max_accept_queue_size(void) { + gpr_once_init(&s_init_max_accept_queue_size, init_max_accept_queue_size); + return s_max_accept_queue_size; +} + +static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, + const grpc_resolved_address *addr, + unsigned port_index, unsigned fd_index, + grpc_tcp_listener **listener) { + grpc_tcp_listener *sp = NULL; + int port = -1; + char *addr_str; + char *name; + + grpc_error *err = + grpc_tcp_server_prepare_socket(fd, addr, s->so_reuseport, &port); + if (err == GRPC_ERROR_NONE) { + GPR_ASSERT(port > 0); + grpc_sockaddr_to_string(&addr_str, addr, 1); + gpr_asprintf(&name, "tcp-server-listener:%s", addr_str); + gpr_mu_lock(&s->mu); + s->nports++; + GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); + sp = gpr_malloc(sizeof(grpc_tcp_listener)); + sp->next = NULL; + if (s->head == NULL) { + s->head = sp; + } else { + s->tail->next = sp; + } + s->tail = sp; + sp->server = s; + sp->fd = fd; + sp->emfd = grpc_fd_create(fd, name); + memcpy(&sp->addr, addr, sizeof(grpc_resolved_address)); + sp->port = port; + sp->port_index = port_index; + sp->fd_index = fd_index; + sp->is_sibling = 0; + sp->sibling = NULL; + GPR_ASSERT(sp->emfd); + gpr_mu_unlock(&s->mu); + gpr_free(addr_str); + gpr_free(name); + } + + *listener = sp; + return err; +} + +/* If successful, add a listener to s for addr, set *dsmode for the socket, and + return the *listener. */ +grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s, + const grpc_resolved_address *addr, + unsigned port_index, unsigned fd_index, + grpc_dualstack_mode *dsmode, + grpc_tcp_listener **listener) { + grpc_resolved_address addr4_copy; + int fd; + grpc_error *err = + grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd); + if (err != GRPC_ERROR_NONE) { + return err; + } + if (*dsmode == GRPC_DSMODE_IPV4 && + grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { + addr = &addr4_copy; + } + return add_socket_to_server(s, fd, addr, port_index, fd_index, listener); +} + +/* Prepare a recently-created socket for listening. */ +grpc_error *grpc_tcp_server_prepare_socket(int fd, + const grpc_resolved_address *addr, + bool so_reuseport, int *port) { + grpc_resolved_address sockname_temp; + grpc_error *err = GRPC_ERROR_NONE; + + GPR_ASSERT(fd >= 0); + + if (so_reuseport && !grpc_is_unix_socket(addr)) { + err = grpc_set_socket_reuse_port(fd, 1); + if (err != GRPC_ERROR_NONE) goto error; + } + + err = grpc_set_socket_nonblocking(fd, 1); + if (err != GRPC_ERROR_NONE) goto error; + err = grpc_set_socket_cloexec(fd, 1); + if (err != GRPC_ERROR_NONE) goto error; + if (!grpc_is_unix_socket(addr)) { + err = grpc_set_socket_low_latency(fd, 1); + if (err != GRPC_ERROR_NONE) goto error; + err = grpc_set_socket_reuse_addr(fd, 1); + if (err != GRPC_ERROR_NONE) goto error; + } + err = grpc_set_socket_no_sigpipe_if_possible(fd); + if (err != GRPC_ERROR_NONE) goto error; + + GPR_ASSERT(addr->len < ~(socklen_t)0); + if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) { + err = GRPC_OS_ERROR(errno, "bind"); + goto error; + } + + if (listen(fd, get_max_accept_queue_size()) < 0) { + err = GRPC_OS_ERROR(errno, "listen"); + goto error; + } + + sockname_temp.len = sizeof(struct sockaddr_storage); + + if (getsockname(fd, (struct sockaddr *)sockname_temp.addr, + (socklen_t *)&sockname_temp.len) < 0) { + err = GRPC_OS_ERROR(errno, "getsockname"); + goto error; + } + + *port = grpc_sockaddr_get_port(&sockname_temp); + return GRPC_ERROR_NONE; + +error: + GPR_ASSERT(err != GRPC_ERROR_NONE); + if (fd >= 0) { + close(fd); + } + grpc_error *ret = grpc_error_set_int( + GRPC_ERROR_CREATE_REFERENCING("Unable to configure socket", &err, 1), + GRPC_ERROR_INT_FD, fd); + GRPC_ERROR_UNREF(err); + return ret; +} + +#endif /* GRPC_HAVE_IFADDRS */ diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c new file mode 100644 index 0000000000..6354a6bdc1 --- /dev/null +++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_HAVE_IFADDRS + +#include "src/core/lib/iomgr/tcp_server_utils_posix.h" + +#include <errno.h> +#include <ifaddrs.h> +#include <stddef.h> +#include <string.h> + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> +#include <grpc/support/string_util.h> + +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" + +/* Return the listener in s with address addr or NULL. */ +static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s, + grpc_resolved_address *addr) { + grpc_tcp_listener *l; + gpr_mu_lock(&s->mu); + for (l = s->head; l != NULL; l = l->next) { + if (l->addr.len != addr->len) { + continue; + } + if (memcmp(l->addr.addr, addr->addr, addr->len) == 0) { + break; + } + } + gpr_mu_unlock(&s->mu); + return l; +} + +/* Bind to "::" to get a port number not used by any address. */ +static grpc_error *get_unused_port(int *port) { + grpc_resolved_address wild; + grpc_sockaddr_make_wildcard6(0, &wild); + grpc_dualstack_mode dsmode; + int fd; + grpc_error *err = + grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd); + if (err != GRPC_ERROR_NONE) { + return err; + } + if (dsmode == GRPC_DSMODE_IPV4) { + grpc_sockaddr_make_wildcard4(0, &wild); + } + if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) { + err = GRPC_OS_ERROR(errno, "bind"); + close(fd); + return err; + } + if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) != + 0) { + err = GRPC_OS_ERROR(errno, "getsockname"); + close(fd); + return err; + } + close(fd); + *port = grpc_sockaddr_get_port(&wild); + return *port <= 0 ? GRPC_ERROR_CREATE("Bad port") : GRPC_ERROR_NONE; +} + +grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, + unsigned port_index, + int requested_port, + int *out_port) { + struct ifaddrs *ifa = NULL; + struct ifaddrs *ifa_it; + unsigned fd_index = 0; + grpc_tcp_listener *sp = NULL; + grpc_error *err = GRPC_ERROR_NONE; + if (requested_port == 0) { + /* Note: There could be a race where some local addrs can listen on the + selected port and some can't. The sane way to handle this would be to + retry by recreating the whole grpc_tcp_server. Backing out individual + listeners and orphaning the FDs looks like too much trouble. */ + if ((err = get_unused_port(&requested_port)) != GRPC_ERROR_NONE) { + return err; + } else if (requested_port <= 0) { + return GRPC_ERROR_CREATE("Bad get_unused_port()"); + } + gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port); + } + if (getifaddrs(&ifa) != 0 || ifa == NULL) { + return GRPC_OS_ERROR(errno, "getifaddrs"); + } + for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) { + grpc_resolved_address addr; + char *addr_str = NULL; + grpc_dualstack_mode dsmode; + grpc_tcp_listener *new_sp = NULL; + const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>"); + if (ifa_it->ifa_addr == NULL) { + continue; + } else if (ifa_it->ifa_addr->sa_family == AF_INET) { + addr.len = sizeof(struct sockaddr_in); + } else if (ifa_it->ifa_addr->sa_family == AF_INET6) { + addr.len = sizeof(struct sockaddr_in6); + } else { + continue; + } + memcpy(addr.addr, ifa_it->ifa_addr, addr.len); + if (!grpc_sockaddr_set_port(&addr, requested_port)) { + /* Should never happen, because we check sa_family above. */ + err = GRPC_ERROR_CREATE("Failed to set port"); + break; + } + if (grpc_sockaddr_to_string(&addr_str, &addr, 0) < 0) { + addr_str = gpr_strdup("<error>"); + } + gpr_log(GPR_DEBUG, + "Adding local addr from interface %s flags 0x%x to server: %s", + ifa_name, ifa_it->ifa_flags, addr_str); + /* We could have multiple interfaces with the same address (e.g., bonding), + so look for duplicates. */ + if (find_listener_with_addr(s, &addr) != NULL) { + gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str, + ifa_name); + gpr_free(addr_str); + continue; + } + if ((err = grpc_tcp_server_add_addr(s, &addr, port_index, fd_index, &dsmode, + &new_sp)) != GRPC_ERROR_NONE) { + char *err_str = NULL; + grpc_error *root_err; + if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) { + err_str = gpr_strdup("Failed to add listener"); + } + root_err = GRPC_ERROR_CREATE(err_str); + gpr_free(err_str); + gpr_free(addr_str); + err = grpc_error_add_child(root_err, err); + break; + } else { + GPR_ASSERT(requested_port == new_sp->port); + ++fd_index; + if (sp != NULL) { + new_sp->is_sibling = 1; + sp->sibling = new_sp; + } + sp = new_sp; + } + gpr_free(addr_str); + } + freeifaddrs(ifa); + if (err != GRPC_ERROR_NONE) { + return err; + } else if (sp == NULL) { + return GRPC_ERROR_CREATE("No local addresses"); + } else { + *out_port = sp->port; + return GRPC_ERROR_NONE; + } +} + +bool grpc_tcp_server_have_ifaddrs(void) { return true; } + +#endif /* GRPC_HAVE_IFADDRS */ diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c new file mode 100644 index 0000000000..95c3198be6 --- /dev/null +++ b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c @@ -0,0 +1,49 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/lib/iomgr/port.h" + +#if defined(GRPC_POSIX_SOCKET) && !defined(GRPC_HAVE_IFADDRS) + +#include "src/core/lib/iomgr/tcp_server_utils_posix.h" + +grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, + unsigned port_index, + int requested_port, + int *out_port) { + return GRPC_ERROR_CREATE("no ifaddrs available"); +} + +bool grpc_tcp_server_have_ifaddrs(void) { return false; } + +#endif /* defined(GRPC_POSIX_SOCKET) && !defined(GRPC_HAVE_IFADDRS) */ diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c index 2a1c8d39fa..71e295770a 100644 --- a/src/core/lib/iomgr/udp_server.c +++ b/src/core/lib/iomgr/udp_server.c @@ -109,8 +109,8 @@ struct grpc_udp_server { grpc_pollset **pollsets; /* number of pollsets in the pollsets array */ size_t pollset_count; - /* The parent grpc server */ - grpc_server *grpc_server; + /* opaque object to pass to callbacks */ + void *user_data; }; grpc_udp_server *grpc_udp_server_create(void) { @@ -178,7 +178,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { /* Call the orphan_cb to signal that the FD is about to be closed and * should no longer be used. */ GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(exec_ctx, sp->emfd); + sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data); grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, "udp_listener_shutdown"); @@ -204,7 +204,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, if (s->active_ports) { for (sp = s->head; sp; sp = sp->next) { GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(exec_ctx, sp->emfd); + sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data); grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE("Server destroyed")); } @@ -299,7 +299,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* Tell the registered callback that data is available to read. */ GPR_ASSERT(sp->read_cb); - sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server); + sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data); /* Re-arm the notification event so we get another chance to read. */ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); @@ -322,7 +322,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* Tell the registered callback that the socket is writeable. */ GPR_ASSERT(sp->write_cb); - sp->write_cb(exec_ctx, sp->emfd); + sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data); /* Re-arm the notification event so we get another chance to write. */ grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); @@ -464,13 +464,13 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) { void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, grpc_pollset **pollsets, size_t pollset_count, - grpc_server *server) { + void *user_data) { size_t i; gpr_mu_lock(&s->mu); grpc_udp_listener *sp; GPR_ASSERT(s->active_ports == 0); s->pollsets = pollsets; - s->grpc_server = server; + s->user_data = user_data; sp = s->head; while (sp != NULL) { @@ -485,7 +485,11 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); - s->active_ports++; + /* Registered for both read and write callbacks: increment active_ports + * twice to account for this, and delay free-ing of memory until both + * on_read and on_write have fired. */ + s->active_ports += 2; + sp = sp->next; } diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h index ed63fa7d81..90842a47f0 100644 --- a/src/core/lib/iomgr/udp_server.h +++ b/src/core/lib/iomgr/udp_server.h @@ -47,23 +47,23 @@ typedef struct grpc_udp_server grpc_udp_server; /* Called when data is available to read from the socket. */ typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, - struct grpc_server *server); + void *user_data); /* Called when the socket is writeable. */ -typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, - grpc_fd *emfd); +typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, + void *user_data); /* Called when the grpc_fd is about to be orphaned (and the FD closed). */ typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx, - grpc_fd *emfd); + grpc_fd *emfd, void *user_data); /* Create a server, initially not bound to any ports */ grpc_udp_server *grpc_udp_server_create(void); -/* Start listening to bound ports */ +/* Start listening to bound ports. user_data is passed to callbacks. */ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server, grpc_pollset **pollsets, size_t pollset_count, - struct grpc_server *server); + void *user_data); int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index); diff --git a/src/core/lib/iomgr/wakeup_fd_posix.h b/src/core/lib/iomgr/wakeup_fd_posix.h index 71d32d97ba..c8dd242c75 100644 --- a/src/core/lib/iomgr/wakeup_fd_posix.h +++ b/src/core/lib/iomgr/wakeup_fd_posix.h @@ -46,7 +46,7 @@ * * Setup: * 1. Before calling anything, call global_init() at least once. - * 1. Call grpc_wakeup_fd_create() to get a wakeup_fd. + * 1. Call grpc_wakeup_fd_init() to set up a wakeup_fd. * 2. Add the result of GRPC_WAKEUP_FD_FD to the set of monitored file * descriptors for the poll() style API you are using. Monitor the file * descriptor for readability. diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index a23082a866..8dea1d98ff 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -318,7 +318,7 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { call_data *calld = elem->call_data; grpc_call_credentials_unref(exec_ctx, calld->creds); if (calld->have_host) { diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index 14619d97ca..01cb473177 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -227,7 +227,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* Destructor for call_data */ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) {} + grpc_closure *ignored) {} /* Constructor for channel_data */ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/support/arena.c b/src/core/lib/support/arena.c new file mode 100644 index 0000000000..7bcb983f24 --- /dev/null +++ b/src/core/lib/support/arena.c @@ -0,0 +1,98 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "src/core/lib/support/arena.h" +#include <grpc/support/alloc.h> +#include <grpc/support/atm.h> +#include <grpc/support/log.h> +#include <grpc/support/useful.h> + +#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ + (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) + +typedef struct zone { + size_t size_begin; + size_t size_end; + gpr_atm next_atm; +} zone; + +struct gpr_arena { + gpr_atm size_so_far; + zone initial_zone; +}; + +gpr_arena *gpr_arena_create(size_t initial_size) { + initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size); + gpr_arena *a = gpr_zalloc(sizeof(gpr_arena) + initial_size); + a->initial_zone.size_end = initial_size; + return a; +} + +size_t gpr_arena_destroy(gpr_arena *arena) { + gpr_atm size = gpr_atm_no_barrier_load(&arena->size_so_far); + zone *z = (zone *)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm); + gpr_free(arena); + while (z) { + zone *next_z = (zone *)gpr_atm_no_barrier_load(&z->next_atm); + gpr_free(z); + z = next_z; + } + return (size_t)size; +} + +void *gpr_arena_alloc(gpr_arena *arena, size_t size) { + size = ROUND_UP_TO_ALIGNMENT_SIZE(size); + size_t start = + (size_t)gpr_atm_no_barrier_fetch_add(&arena->size_so_far, size); + zone *z = &arena->initial_zone; + while (start > z->size_end) { + zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm); + if (next_z == NULL) { + size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far); + next_z = gpr_zalloc(sizeof(zone) + next_z_size); + next_z->size_begin = z->size_end; + next_z->size_end = z->size_end + next_z_size; + if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) { + gpr_free(next_z); + next_z = (zone *)gpr_atm_acq_load(&z->next_atm); + } + } + z = next_z; + } + if (start + size > z->size_end) { + return gpr_arena_alloc(arena, size); + } + GPR_ASSERT(start >= z->size_begin); + GPR_ASSERT(start + size <= z->size_end); + return ((char *)(z + 1)) + start - z->size_begin; +} diff --git a/src/core/lib/support/arena.h b/src/core/lib/support/arena.h new file mode 100644 index 0000000000..c28033ffc3 --- /dev/null +++ b/src/core/lib/support/arena.h @@ -0,0 +1,54 @@ +/* + * + * Copyright 2017, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// \file Arena based allocator +// Allows very fast allocation of memory, but that memory cannot be freed until +// the arena as a whole is freed +// Tracks the total memory allocated against it, so that future arenas can +// pre-allocate the right amount of memory + +#ifndef GRPC_CORE_LIB_SUPPORT_ARENA_H +#define GRPC_CORE_LIB_SUPPORT_ARENA_H + +#include <stddef.h> + +typedef struct gpr_arena gpr_arena; + +// Create an arena, with \a initial_size bytes in the first allocated buffer +gpr_arena *gpr_arena_create(size_t initial_size); +// Allocate \a size bytes from the arena +void *gpr_arena_alloc(gpr_arena *arena, size_t size); +// Destroy an arena, returning the total number of bytes allocated +size_t gpr_arena_destroy(gpr_arena *arena); + +#endif /* GRPC_CORE_LIB_SUPPORT_ARENA_H */ diff --git a/src/core/lib/support/sync.c b/src/core/lib/support/sync.c index e4a7fce646..b52f004f74 100644 --- a/src/core/lib/support/sync.c +++ b/src/core/lib/support/sync.c @@ -119,6 +119,10 @@ int gpr_unref(gpr_refcount *r) { return prior == 1; } +int gpr_ref_is_unique(gpr_refcount *r) { + return gpr_atm_acq_load(&r->count) == 1; +} + void gpr_stats_init(gpr_stats_counter *c, intptr_t n) { gpr_atm_rel_store(&c->value, n); } diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index c2547c5147..2c5d8c0ff3 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -51,6 +51,7 @@ #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/support/arena.h" #include "src/core/lib/support/string.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" @@ -138,14 +139,15 @@ typedef struct batch_control { } batch_control; struct grpc_call { + gpr_arena *arena; grpc_completion_queue *cq; grpc_polling_entity pollent; grpc_channel *channel; grpc_call *parent; grpc_call *first_child; gpr_timespec start_time; - /* TODO(ctiller): share with cq if possible? */ - gpr_mu mu; + /* protects first_child, and child next/prev links */ + gpr_mu child_list_mu; /* client or server call */ bool is_client; @@ -160,8 +162,8 @@ struct grpc_call { bool received_initial_metadata; bool receiving_message; bool requested_final_op; - bool received_final_op; - bool sent_any_op; + gpr_atm any_ops_sent_atm; + gpr_atm received_final_op_atm; /* have we received initial metadata */ bool has_initial_md_been_received; @@ -212,6 +214,8 @@ struct grpc_call { grpc_closure receiving_initial_metadata_ready; uint32_t test_only_last_message_flags; + grpc_closure release_call; + union { struct { grpc_status_code *status; @@ -273,9 +277,13 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, grpc_channel_get_channel_stack(args->channel); grpc_call *call; GPR_TIMER_BEGIN("grpc_call_create", 0); - call = gpr_zalloc(sizeof(grpc_call) + channel_stack->call_stack_size); + gpr_arena *arena = + gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel)); + call = gpr_arena_alloc(arena, + sizeof(grpc_call) + channel_stack->call_stack_size); + call->arena = arena; *out_call = call; - gpr_mu_init(&call->mu); + gpr_mu_init(&call->child_list_mu); call->channel = args->channel; call->cq = args->cq; call->parent = args->parent_call; @@ -313,7 +321,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, GPR_ASSERT(call->is_client); GPR_ASSERT(!args->parent_call->is_client); - gpr_mu_lock(&args->parent_call->mu); + gpr_mu_lock(&args->parent_call->child_list_mu); if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) { send_deadline = gpr_time_min( @@ -341,6 +349,10 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, } if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) { call->cancellation_is_inherited = 1; + if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) { + cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE, + GRPC_ERROR_CANCELLED); + } } if (args->parent_call->first_child == NULL) { @@ -353,18 +365,23 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, call; } - gpr_mu_unlock(&args->parent_call->mu); + gpr_mu_unlock(&args->parent_call->child_list_mu); } call->send_deadline = send_deadline; GRPC_CHANNEL_INTERNAL_REF(args->channel, "call"); /* initial refcount dropped by grpc_call_destroy */ + grpc_call_element_args call_args = { + .call_stack = CALL_STACK_FROM_CALL(call), + .server_transport_data = args->server_transport_data, + .context = call->context, + .path = path, + .start_time = call->start_time, + .deadline = send_deadline, + .arena = call->arena}; add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1, - destroy_call, call, call->context, - args->server_transport_data, path, - call->start_time, send_deadline, - CALL_STACK_FROM_CALL(call))); + destroy_call, call, &call_args)); if (error != GRPC_ERROR_NONE) { cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error)); @@ -421,6 +438,14 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) { GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON); } +static void release_call(grpc_exec_ctx *exec_ctx, void *call, + grpc_error *error) { + grpc_call *c = call; + grpc_channel *channel = c->channel; + grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena)); + GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); +} + static void set_status_value_directly(grpc_status_code status, void *dest); static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, grpc_error *error) { @@ -435,7 +460,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, if (c->receiving_stream != NULL) { grpc_byte_stream_destroy(exec_ctx, c->receiving_stream); } - gpr_mu_destroy(&c->mu); + gpr_mu_destroy(&c->child_list_mu); for (ii = 0; ii < c->send_extra_metadata_count; ii++) { GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md); } @@ -447,7 +472,6 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, if (c->cq) { GRPC_CQ_INTERNAL_UNREF(c->cq, "bind"); } - grpc_channel *channel = c->channel; get_final_status(call, set_status_value_directly, &c->final_info.final_status, NULL); @@ -456,11 +480,12 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, for (i = 0; i < STATUS_SOURCE_COUNT; i++) { GRPC_ERROR_UNREF( - unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error); + unpack_received_status(gpr_atm_acq_load(&c->status[i])).error); } - grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); + grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, + grpc_closure_init(&c->release_call, release_call, c, + grpc_schedule_on_exec_ctx)); GPR_TIMER_END("destroy_call", 0); } @@ -473,7 +498,7 @@ void grpc_call_destroy(grpc_call *c) { GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c)); if (parent) { - gpr_mu_lock(&parent->mu); + gpr_mu_lock(&parent->child_list_mu); if (c == parent->first_child) { parent->first_child = c->sibling_next; if (c == parent->first_child) { @@ -482,15 +507,14 @@ void grpc_call_destroy(grpc_call *c) { c->sibling_prev->sibling_next = c->sibling_next; c->sibling_next->sibling_prev = c->sibling_prev; } - gpr_mu_unlock(&parent->mu); + gpr_mu_unlock(&parent->child_list_mu); GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child"); } - gpr_mu_lock(&c->mu); GPR_ASSERT(!c->destroy_called); c->destroy_called = 1; - cancel = c->sent_any_op && !c->received_final_op; - gpr_mu_unlock(&c->mu); + cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) && + !gpr_atm_acq_load(&c->received_final_op_atm); if (cancel) { cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); @@ -555,53 +579,25 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c, "c=%p, status=%d, description=%s, reserved=%p)", 4, (c, (int)status, description, reserved)); GPR_ASSERT(reserved == NULL); - gpr_mu_lock(&c->mu); cancel_with_status(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, status, description); - gpr_mu_unlock(&c->mu); grpc_exec_ctx_finish(&exec_ctx); return GRPC_CALL_OK; } -typedef struct termination_closure { - grpc_closure closure; - grpc_call *call; - grpc_transport_stream_op op; -} termination_closure; - -static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp, +static void done_termination(grpc_exec_ctx *exec_ctx, void *call, grpc_error *error) { - termination_closure *tc = tcp; - GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "termination"); - gpr_free(tc); -} - -static void send_termination(grpc_exec_ctx *exec_ctx, void *tcp, - grpc_error *error) { - termination_closure *tc = tcp; - memset(&tc->op, 0, sizeof(tc->op)); - tc->op.cancel_error = GRPC_ERROR_REF(error); - /* reuse closure to catch completion */ - tc->op.on_complete = grpc_closure_init(&tc->closure, done_termination, tc, - grpc_schedule_on_exec_ctx); - execute_op(exec_ctx, tc->call, &tc->op); -} - -static void terminate_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, - grpc_error *error) { - termination_closure *tc = gpr_malloc(sizeof(*tc)); - memset(tc, 0, sizeof(*tc)); - tc->call = c; - GRPC_CALL_INTERNAL_REF(tc->call, "termination"); - grpc_closure_sched(exec_ctx, grpc_closure_init(&tc->closure, send_termination, - tc, grpc_schedule_on_exec_ctx), - error); + GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination"); } static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, status_source source, grpc_error *error) { + GRPC_CALL_INTERNAL_REF(c, "termination"); set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); - terminate_with_error(exec_ctx, c, error); + grpc_transport_stream_op *op = grpc_make_transport_stream_op( + grpc_closure_create(done_termination, c, grpc_schedule_on_exec_ctx)); + op->cancel_error = error; + execute_op(exec_ctx, c, op); } static grpc_error *error_from_status(grpc_status_code status, @@ -715,9 +711,7 @@ static void set_incoming_compression_algorithm( grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm( grpc_call *call) { grpc_compression_algorithm algorithm; - gpr_mu_lock(&call->mu); algorithm = call->incoming_compression_algorithm; - gpr_mu_unlock(&call->mu); return algorithm; } @@ -729,9 +723,7 @@ static grpc_compression_algorithm compression_algorithm_for_level_locked( uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) { uint32_t flags; - gpr_mu_lock(&call->mu); flags = call->test_only_last_message_flags; - gpr_mu_unlock(&call->mu); return flags; } @@ -785,9 +777,7 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) { uint32_t encodings_accepted_by_peer; - gpr_mu_lock(&call->mu); encodings_accepted_by_peer = call->encodings_accepted_by_peer; - gpr_mu_unlock(&call->mu); return encodings_accepted_by_peer; } @@ -1056,7 +1046,7 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data, } static grpc_error *consolidate_batch_errors(batch_control *bctl) { - size_t n = (size_t)gpr_atm_no_barrier_load(&bctl->num_errors); + size_t n = (size_t)gpr_atm_acq_load(&bctl->num_errors); if (n == 0) { return GRPC_ERROR_NONE; } else if (n == 1) { @@ -1083,8 +1073,6 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, grpc_call *call = bctl->call; grpc_error *error = consolidate_batch_errors(bctl); - gpr_mu_lock(&call->mu); - if (bctl->send_initial_metadata) { grpc_metadata_batch_destroy( exec_ctx, @@ -1103,20 +1091,23 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; recv_trailing_filter(exec_ctx, call, md); - call->received_final_op = true; /* propagate cancellation to any interested children */ + gpr_atm_rel_store(&call->received_final_op_atm, 1); + gpr_mu_lock(&call->child_list_mu); child_call = call->first_child; if (child_call != NULL) { do { next_child_call = child_call->sibling_next; if (child_call->cancellation_is_inherited) { GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel"); - grpc_call_cancel(child_call, NULL); + cancel_with_error(exec_ctx, child_call, STATUS_FROM_API_OVERRIDE, + GRPC_ERROR_CANCELLED); GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel"); } child_call = next_child_call; } while (child_call != call->first_child); } + gpr_mu_unlock(&call->child_list_mu); if (call->is_client) { get_final_status(call, set_status_value_directly, @@ -1130,7 +1121,6 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(error); error = GRPC_ERROR_NONE; } - gpr_mu_unlock(&call->mu); if (bctl->is_notify_tag_closure) { /* unrefs bctl->error */ @@ -1221,7 +1211,6 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp, grpc_error *error) { batch_control *bctl = bctlp; grpc_call *call = bctl->call; - gpr_mu_lock(&bctl->call->mu); if (error != GRPC_ERROR_NONE) { if (call->receiving_stream != NULL) { grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); @@ -1233,11 +1222,9 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp, } if (call->has_initial_md_been_received || error != GRPC_ERROR_NONE || call->receiving_stream == NULL) { - gpr_mu_unlock(&bctl->call->mu); process_data_after_md(exec_ctx, bctlp); } else { call->saved_receiving_stream_ready_bctlp = bctlp; - gpr_mu_unlock(&bctl->call->mu); } } @@ -1296,7 +1283,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl, grpc_error *error, bool has_cancelled) { if (error == GRPC_ERROR_NONE) return; - int idx = (int)gpr_atm_no_barrier_fetch_add(&bctl->num_errors, 1); + int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1); if (idx == 0 && !has_cancelled) { cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE, GRPC_ERROR_REF(error)); @@ -1309,8 +1296,6 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, batch_control *bctl = bctlp; grpc_call *call = bctl->call; - gpr_mu_lock(&call->mu); - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); if (error == GRPC_ERROR_NONE) { grpc_metadata_batch *md = @@ -1336,11 +1321,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, receiving_stream_ready, call->saved_receiving_stream_ready_bctlp, grpc_schedule_on_exec_ctx); call->saved_receiving_stream_ready_bctlp = NULL; - grpc_closure_sched(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error)); + grpc_closure_run(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error)); } - gpr_mu_unlock(&call->mu); - finish_batch_step(exec_ctx, bctl); } @@ -1393,7 +1376,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, bctl->notify_tag = notify_tag; bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0); - gpr_mu_lock(&call->mu); grpc_transport_stream_op *stream_op = &bctl->op; memset(stream_op, 0, sizeof(*stream_op)); stream_op->covered_by_poller = true; @@ -1679,8 +1661,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, grpc_closure_init(&bctl->finish_batch, finish_batch, bctl, grpc_schedule_on_exec_ctx); stream_op->on_complete = &bctl->finish_batch; - call->sent_any_op = true; - gpr_mu_unlock(&call->mu); + gpr_atm_rel_store(&call->any_ops_sent_atm, 1); execute_op(exec_ctx, call, stream_op); @@ -1711,7 +1692,6 @@ done_with_error: if (bctl->recv_final_op) { call->requested_final_op = 0; } - gpr_mu_unlock(&call->mu); goto done; } @@ -1760,10 +1740,8 @@ uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; } grpc_compression_algorithm grpc_call_compression_for_level( grpc_call *call, grpc_compression_level level) { - gpr_mu_lock(&call->mu); grpc_compression_algorithm algo = compression_algorithm_for_level_locked(call, level); - gpr_mu_unlock(&call->mu); return algo; } diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c index d6acd392c1..2b700b2f67 100644 --- a/src/core/lib/surface/channel.c +++ b/src/core/lib/surface/channel.c @@ -68,6 +68,8 @@ struct grpc_channel { grpc_compression_options compression_options; grpc_mdelem default_authority; + gpr_atm call_size_estimate; + gpr_mu registered_call_mu; registered_call *registered_calls; @@ -115,6 +117,10 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, gpr_mu_init(&channel->registered_call_mu); channel->registered_calls = NULL; + gpr_atm_no_barrier_store( + &channel->call_size_estimate, + (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size); + grpc_compression_options_init(&channel->compression_options); for (size_t i = 0; i < args->num_args; i++) { if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { @@ -177,6 +183,32 @@ done: return channel; } +size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) { +#define ROUND_UP_SIZE 256 + return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) + + ROUND_UP_SIZE) & + ~(size_t)(ROUND_UP_SIZE - 1); +} + +void grpc_channel_update_call_size_estimate(grpc_channel *channel, + size_t size) { + size_t cur = (size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate); + if (cur < size) { + /* size grew: update estimate */ + gpr_atm_no_barrier_cas(&channel->call_size_estimate, (gpr_atm)cur, + (gpr_atm)size); + /* if we lose: never mind, something else will likely update soon enough */ + } else if (cur == size) { + /* no change: holding pattern */ + } else if (cur > 0) { + /* size shrank: decrease estimate */ + gpr_atm_no_barrier_cas( + &channel->call_size_estimate, (gpr_atm)cur, + (gpr_atm)(GPR_MIN(cur - 1, (255 * cur + size) / 256))); + /* if we lose: never mind, something else will likely update soon enough */ + } +} + char *grpc_channel_get_target(grpc_channel *channel) { GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel)); return gpr_strdup(channel->target); diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h index 3a441d7add..c4aebd8b9b 100644 --- a/src/core/lib/surface/channel.h +++ b/src/core/lib/surface/channel.h @@ -66,6 +66,9 @@ grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, grpc_channel *channel, int status_code); +size_t grpc_channel_get_call_size_estimate(grpc_channel *channel); +void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size); + #ifdef GRPC_STREAM_REFCOUNT_DEBUG void grpc_channel_internal_ref(grpc_channel *channel, const char *reason); void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel, diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c index 49bc4c114b..9ddb88bd11 100644 --- a/src/core/lib/surface/lame_client.c +++ b/src/core/lib/surface/lame_client.c @@ -130,8 +130,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *and_free_memory) { - gpr_free(and_free_memory); + grpc_closure *then_schedule_closure) { + grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); } static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 7ebe130751..acc8b7016a 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -898,7 +898,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, - void *ignored) { + grpc_closure *ignored) { channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index 1143a9e044..ba80bd801e 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -38,4 +38,4 @@ const char *grpc_version_string(void) { return "3.0.0-dev"; } -const char *grpc_g_stands_for(void) { return "green"; } +const char *grpc_g_stands_for(void) { return "gentle"; } diff --git a/src/core/lib/transport/error_utils.c b/src/core/lib/transport/error_utils.c index da77828d9c..ef55e561fb 100644 --- a/src/core/lib/transport/error_utils.c +++ b/src/core/lib/transport/error_utils.c @@ -44,12 +44,12 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error, } if (grpc_error_is_special(error)) return NULL; // Otherwise, search through its children. - intptr_t key = 0; - while (true) { - grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++); - if (child_error == NULL) break; - grpc_error *result = recursively_find_error_with_field(child_error, which); - if (result != NULL) return result; + uint8_t slot = error->first_err; + while (slot != UINT8_MAX) { + grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot); + grpc_error *result = recursively_find_error_with_field(lerr->err, which); + if (result) return result; + slot = lerr->next; } return NULL; } @@ -112,13 +112,13 @@ bool grpc_error_has_clear_grpc_status(grpc_error *error) { if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) { return true; } - intptr_t key = 0; - while (true) { - grpc_error *child_error = gpr_avl_get(error->errs, (void *)key++); - if (child_error == NULL) break; - if (grpc_error_has_clear_grpc_status(child_error)) { + uint8_t slot = error->first_err; + while (slot != UINT8_MAX) { + grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot); + if (grpc_error_has_clear_grpc_status(lerr->err)) { return true; } + slot = lerr->next; } return false; } diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c index 165950e288..d56cb31ee0 100644 --- a/src/core/lib/transport/transport.c +++ b/src/core/lib/transport/transport.c @@ -162,9 +162,9 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *transport, grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data) { + const void *server_data, gpr_arena *arena) { return transport->vtable->init_stream(exec_ctx, transport, stream, refcount, - server_data); + server_data, arena); } void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx, @@ -197,9 +197,10 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, void *and_free_memory) { + grpc_stream *stream, + grpc_closure *then_schedule_closure) { transport->vtable->destroy_stream(exec_ctx, transport, stream, - and_free_memory); + then_schedule_closure); } char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx, @@ -254,8 +255,9 @@ typedef struct { static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { made_transport_stream_op *op = arg; - grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); + grpc_closure *c = op->inner_on_complete; gpr_free(op); + grpc_closure_run(exec_ctx, c, GRPC_ERROR_REF(error)); } grpc_transport_stream_op *grpc_make_transport_stream_op( diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index cc1c277b35..950b18aeda 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -41,6 +41,7 @@ #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_set.h" +#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/byte_stream.h" #include "src/core/lib/transport/metadata_batch.h" @@ -229,7 +230,7 @@ size_t grpc_transport_stream_size(grpc_transport *transport); int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *transport, grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data); + const void *server_data, gpr_arena *arena); void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, grpc_stream *stream, grpc_polling_entity *pollent); @@ -246,7 +247,8 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, caller, but any child memory must be cleaned up) */ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, void *and_free_memory); + grpc_stream *stream, + grpc_closure *then_schedule_closure); void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx, grpc_transport_stream_op *op, diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h index 8553148c35..6f688bf8d2 100644 --- a/src/core/lib/transport/transport_impl.h +++ b/src/core/lib/transport/transport_impl.h @@ -47,7 +47,7 @@ typedef struct grpc_transport_vtable { /* implementation of grpc_transport_init_stream */ int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data); + const void *server_data, gpr_arena *arena); /* implementation of grpc_transport_set_pollset */ void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self, @@ -67,7 +67,8 @@ typedef struct grpc_transport_vtable { /* implementation of grpc_transport_destroy_stream */ void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, void *and_free_memory); + grpc_stream *stream, + grpc_closure *then_schedule_closure); /* implementation of grpc_transport_destroy */ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self); |