diff options
Diffstat (limited to 'src/core')
28 files changed, 1190 insertions, 637 deletions
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index c507a2750e..a16b44d3dc 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, char* user_cred = NULL; *name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred); if (*name_to_resolve == NULL) return false; + char* no_proxy_str = NULL; grpc_uri* uri = grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); if (uri == NULL || uri->path[0] == '\0') { @@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, "'http_proxy' environment variable set, but cannot " "parse server URI '%s' -- not using proxy", server_uri); - if (uri != NULL) { - gpr_free(user_cred); - grpc_uri_destroy(uri); - } - return false; + goto no_use_proxy; } if (strcmp(uri->scheme, "unix") == 0) { gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'", server_uri); - gpr_free(user_cred); - grpc_uri_destroy(uri); - return false; + goto no_use_proxy; } - char* no_proxy_str = gpr_getenv("no_proxy"); + no_proxy_str = gpr_getenv("no_proxy"); if (no_proxy_str != NULL) { static const char* NO_PROXY_SEPARATOR = ","; bool use_proxy = true; @@ -147,12 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, gpr_free(no_proxy_hosts); gpr_free(server_host); gpr_free(server_port); - if (!use_proxy) { - grpc_uri_destroy(uri); - gpr_free(*name_to_resolve); - *name_to_resolve = NULL; - return false; - } + if (!use_proxy) goto no_use_proxy; } } grpc_arg args_to_add[2]; @@ -173,9 +163,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, } else { *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1); } - gpr_free(user_cred); grpc_uri_destroy(uri); + gpr_free(user_cred); return true; +no_use_proxy: + if (uri != NULL) grpc_uri_destroy(uri); + gpr_free(*name_to_resolve); + *name_to_resolve = NULL; + gpr_free(user_cred); + return false; } static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx, diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index 0423615105..cb5fdd13ba 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -123,6 +123,7 @@ #define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6 #define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120 #define GRPC_GRPCLB_RECONNECT_JITTER 0.2 +#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000 grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb"); @@ -299,6 +300,10 @@ typedef struct glb_lb_policy { /** timeout in milliseconds for the LB call. 0 means no deadline. */ int lb_call_timeout_ms; + /** timeout in milliseconds for before using fallback backend addresses. + * 0 means not using fallback. */ + int lb_fallback_timeout_ms; + /** for communicating with the LB server */ grpc_channel *lb_channel; @@ -325,6 +330,9 @@ typedef struct glb_lb_policy { * Otherwise, we delegate to the RR policy. */ size_t serverlist_index; + /** stores the backend addresses from the resolver */ + grpc_lb_addresses *fallback_backend_addresses; + /** list of picks that are waiting on RR's policy connectivity */ pending_pick *pending_picks; @@ -345,6 +353,9 @@ typedef struct glb_lb_policy { /** is \a lb_call_retry_timer active? */ bool retry_timer_active; + /** is \a lb_fallback_timer active? */ + bool fallback_timer_active; + /** called upon changes to the LB channel's connectivity. */ grpc_closure lb_channel_on_connectivity_changed; @@ -354,9 +365,6 @@ typedef struct glb_lb_policy { /************************************************************/ /* client data associated with the LB server communication */ /************************************************************/ - /* Finished sending initial request. */ - grpc_closure lb_on_sent_initial_request; - /* Status from the LB server has been received. This signals the end of the LB * call. */ grpc_closure lb_on_server_status_received; @@ -367,6 +375,9 @@ typedef struct glb_lb_policy { /* LB call retry timer callback. */ grpc_closure lb_on_call_retry; + /* LB fallback timer callback. */ + grpc_closure lb_on_fallback; + grpc_call *lb_call; /* streaming call to the LB server, */ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */ @@ -390,7 +401,9 @@ typedef struct glb_lb_policy { /** LB call retry timer */ grpc_timer lb_call_retry_timer; - bool initial_request_sent; + /** LB fallback timer */ + grpc_timer lb_fallback_timer; + bool seen_initial_response; /* Stats for client-side load reporting. Should be unreffed and @@ -536,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked( return lb_addresses; } +/* Returns the backend addresses extracted from the given addresses */ +static grpc_lb_addresses *extract_backend_addresses_locked( + grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) { + /* first pass: count the number of backend addresses */ + size_t num_backends = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (!addresses->addresses[i].is_balancer) { + ++num_backends; + } + } + /* second pass: actually populate the addresses and (empty) LB tokens */ + grpc_lb_addresses *backend_addresses = + grpc_lb_addresses_create(num_backends, &lb_token_vtable); + size_t num_copied = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) continue; + const grpc_resolved_address *addr = &addresses->addresses[i].address; + grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr, + addr->len, false /* is_balancer */, + NULL /* balancer_name */, + (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload); + ++num_copied; + } + return backend_addresses; +} + static void update_lb_connectivity_status_locked( grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, grpc_connectivity_state rr_state, grpc_error *rr_state_error) { @@ -603,35 +642,38 @@ static bool pick_from_internal_rr_locked( grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, const grpc_lb_policy_pick_args *pick_args, bool force_async, grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) { - // Look at the index into the serverlist to see if we should drop this call. - grpc_grpclb_server *server = - glb_policy->serverlist->servers[glb_policy->serverlist_index++]; - if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) { - glb_policy->serverlist_index = 0; // Wrap-around. - } - if (server->drop) { - // Not using the RR policy, so unref it. - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", - (intptr_t)wc_arg->rr_policy); + // Check for drops if we are not using fallback backend addresses. + if (glb_policy->serverlist != NULL) { + // Look at the index into the serverlist to see if we should drop this call. + grpc_grpclb_server *server = + glb_policy->serverlist->servers[glb_policy->serverlist_index++]; + if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) { + glb_policy->serverlist_index = 0; // Wrap-around. } - GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); - // Update client load reporting stats to indicate the number of - // dropped calls. Note that we have to do this here instead of in - // the client_load_reporting filter, because we do not create a - // subchannel call (and therefore no client_load_reporting filter) - // for dropped calls. - grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token, - wc_arg->client_stats); - grpc_grpclb_client_stats_unref(wc_arg->client_stats); - if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); - GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); + if (server->drop) { + // Not using the RR policy, so unref it. + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", + (intptr_t)wc_arg->rr_policy); + } + GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); + // Update client load reporting stats to indicate the number of + // dropped calls. Note that we have to do this here instead of in + // the client_load_reporting filter, because we do not create a + // subchannel call (and therefore no client_load_reporting filter) + // for dropped calls. + grpc_grpclb_client_stats_add_call_dropped_locked( + server->load_balance_token, wc_arg->client_stats); + grpc_grpclb_client_stats_unref(wc_arg->client_stats); + if (force_async) { + GPR_ASSERT(wc_arg->wrapped_closure != NULL); + GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); + gpr_free(wc_arg->free_when_done); + return false; + } gpr_free(wc_arg->free_when_done); - return false; + return true; } - gpr_free(wc_arg->free_when_done); - return true; } // Pick via the RR policy. const bool pick_done = grpc_lb_policy_pick_locked( @@ -669,8 +711,18 @@ static bool pick_from_internal_rr_locked( static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { - grpc_lb_addresses *addresses = - process_serverlist_locked(exec_ctx, glb_policy->serverlist); + grpc_lb_addresses *addresses; + if (glb_policy->serverlist != NULL) { + GPR_ASSERT(glb_policy->serverlist->num_servers > 0); + addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist); + } else { + // If rr_handover_locked() is invoked when we haven't received any + // serverlist from the balancer, we use the fallback backends returned by + // the resolver. Note that the fallback backend list may be empty, in which + // case the new round_robin policy will keep the requested picks pending. + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses); + } GPR_ASSERT(addresses != NULL); grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args)); args->client_channel_factory = glb_policy->cc_factory; @@ -776,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, /* glb_policy->rr_policy may be NULL (initial handover) */ static void rr_handover_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { - GPR_ASSERT(glb_policy->serverlist != NULL && - glb_policy->serverlist->num_servers > 0); if (glb_policy->shutting_down) return; grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy); GPR_ASSERT(args != NULL); @@ -926,6 +976,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { if (glb_policy->serverlist != NULL) { grpc_grpclb_destroy_serverlist(glb_policy->serverlist); } + if (glb_policy->fallback_backend_addresses != NULL) { + grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); + } grpc_fake_resolver_response_generator_unref(glb_policy->response_generator); grpc_subchannel_index_unref(); if (glb_policy->pending_update_args != NULL) { @@ -1067,10 +1120,26 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(error); } +static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error); static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy); static void start_picking_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { + /* start a timer to fall back */ + if (glb_policy->lb_fallback_timeout_ms > 0 && + glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) { + grpc_millis deadline = + grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms; + GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer"); + GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked, + glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + glb_policy->fallback_timer_active = true; + grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline, + &glb_policy->lb_on_fallback); + } + glb_policy->started_picking = true; grpc_backoff_reset(&glb_policy->lb_call_backoff_state); query_for_backends_locked(exec_ctx, glb_policy); @@ -1173,6 +1242,56 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx, exec_ctx, &glb_policy->state_tracker, current, notify); } +static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + glb_lb_policy *glb_policy = (glb_lb_policy *)arg; + glb_policy->retry_timer_active = false; + if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", + (void *)glb_policy); + } + GPR_ASSERT(glb_policy->lb_call == NULL); + query_for_backends_locked(exec_ctx, glb_policy); + } + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); +} + +static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx, + glb_lb_policy *glb_policy) { + if (glb_policy->started_picking && glb_policy->updating_lb_call) { + if (glb_policy->retry_timer_active) { + grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); + } + if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy); + glb_policy->updating_lb_call = false; + } else if (!glb_policy->shutting_down) { + /* if we aren't shutting down, restart the LB client call after some time */ + grpc_millis next_try = + grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state); + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", + (void *)glb_policy); + grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); + if (timeout > 0) { + gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.", + timeout); + } else { + gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); + } + } + GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); + GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, + lb_call_on_retry_timer_locked, glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + glb_policy->retry_timer_active = true; + grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, + &glb_policy->lb_on_call_retry); + } + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, + "lb_on_server_status_received_locked"); +} + static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); @@ -1202,21 +1321,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg, schedule_next_client_load_report(exec_ctx, glb_policy); } -static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - grpc_op op; - memset(&op, 0, sizeof(op)); - op.op = GRPC_OP_SEND_MESSAGE; - op.data.send_message.send_message = glb_policy->client_load_report_payload; - GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, - client_load_report_done_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - grpc_call_error call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, &op, 1, - &glb_policy->client_load_report_closure); - GPR_ASSERT(GRPC_CALL_OK == call_error); -} - static bool load_report_counters_are_zero(grpc_grpclb_request *request) { grpc_grpclb_dropped_call_counts *drop_entries = (grpc_grpclb_dropped_call_counts *) @@ -1236,6 +1340,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, glb_policy->client_load_report_timer_pending = false; GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "client_load_report"); + if (glb_policy->lb_call == NULL) { + maybe_restart_lb_call(exec_ctx, glb_policy); + } return; } // Construct message payload. @@ -1259,17 +1366,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_raw_byte_buffer_create(&request_payload_slice, 1); grpc_slice_unref_internal(exec_ctx, request_payload_slice); grpc_grpclb_request_destroy(request); - // If we've already sent the initial request, then we can go ahead and - // sent the load report. Otherwise, we need to wait until the initial - // request has been sent to send this - // (see lb_on_sent_initial_request_locked() below). - if (glb_policy->initial_request_sent) { - do_send_client_load_report_locked(exec_ctx, glb_policy); + // Send load report message. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_SEND_MESSAGE; + op.data.send_message.send_message = glb_policy->client_load_report_payload; + GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, + client_load_report_done_locked, glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + grpc_call_error call_error = grpc_call_start_batch_and_execute( + exec_ctx, glb_policy->lb_call, &op, 1, + &glb_policy->client_load_report_closure); + if (call_error != GRPC_CALL_OK) { + gpr_log(GPR_ERROR, "call_error=%d", call_error); + GPR_ASSERT(GRPC_CALL_OK == call_error); } } -static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error); static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, @@ -1312,9 +1425,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, grpc_slice_unref_internal(exec_ctx, request_payload_slice); grpc_grpclb_request_destroy(request); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request, - lb_on_sent_initial_request_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received, lb_on_server_status_received_locked, glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner)); @@ -1329,7 +1439,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000, GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000); - glb_policy->initial_request_sent = false; glb_policy->seen_initial_response = false; glb_policy->last_client_load_report_counters_were_zero = false; } @@ -1346,7 +1455,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx, grpc_byte_buffer_destroy(glb_policy->lb_request_payload); grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details); - if (!glb_policy->client_load_report_timer_pending) { + if (glb_policy->client_load_report_timer_pending) { grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer); } } @@ -1370,7 +1479,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, GPR_ASSERT(glb_policy->lb_call != NULL); grpc_call_error call_error; - grpc_op ops[4]; + grpc_op ops[3]; memset(ops, 0, sizeof(ops)); grpc_op *op = ops; @@ -1391,13 +1500,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, op->flags = 0; op->reserved = NULL; op++; - /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref - * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */ - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, - "lb_on_sent_initial_request_locked"); - call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), - &glb_policy->lb_on_sent_initial_request); + call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call, + ops, (size_t)(op - ops), NULL); GPR_ASSERT(GRPC_CALL_OK == call_error); op = ops; @@ -1434,19 +1538,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, GPR_ASSERT(GRPC_CALL_OK == call_error); } -static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->initial_request_sent = true; - // If we attempted to send a client load report before the initial - // request was sent, send the load report now. - if (glb_policy->client_load_report_payload != NULL) { - do_send_client_load_report_locked(exec_ctx, glb_policy); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_sent_initial_request_locked"); -} - static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)arg; @@ -1520,6 +1611,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, if (glb_policy->serverlist != NULL) { /* dispose of the old serverlist */ grpc_grpclb_destroy_serverlist(glb_policy->serverlist); + } else { + /* or dispose of the fallback */ + grpc_lb_addresses_destroy(exec_ctx, + glb_policy->fallback_backend_addresses); + glb_policy->fallback_backend_addresses = NULL; + if (glb_policy->fallback_timer_active) { + grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer); + glb_policy->fallback_timer_active = false; + } } /* and update the copy in the glb_lb_policy instance. This * serverlist instance will be destroyed either upon the next @@ -1530,9 +1630,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, } } else { if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Received empty server list. Picks will stay pending until " - "a response with > 0 servers is received"); + gpr_log(GPR_INFO, "Received empty server list, ignoring."); } grpc_grpclb_destroy_serverlist(serverlist); } @@ -1567,19 +1665,25 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, } } -static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { +static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->retry_timer_active = false; - if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", - (void *)glb_policy); + glb_policy->fallback_timer_active = false; + /* If we receive a serverlist after the timer fires but before this callback + * actually runs, don't fall back. */ + if (glb_policy->serverlist == NULL) { + if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, + "Falling back to use backends from resolver (grpclb %p)", + (void *)glb_policy); + } + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + rr_handover_locked(exec_ctx, glb_policy); } - GPR_ASSERT(glb_policy->lb_call == NULL); - query_for_backends_locked(exec_ctx, glb_policy); } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, + "grpclb_fallback_timer"); } static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, @@ -1598,65 +1702,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, } /* We need to perform cleanups no matter what. */ lb_call_destroy_locked(exec_ctx, glb_policy); - if (glb_policy->started_picking && glb_policy->updating_lb_call) { - if (glb_policy->retry_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); - } - if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy); - glb_policy->updating_lb_call = false; - } else if (!glb_policy->shutting_down) { - /* if we aren't shutting down, restart the LB client call after some time */ - grpc_millis next_try = - grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", - (void *)glb_policy); - grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); - if (timeout > 0) { - gpr_log(GPR_DEBUG, - "... retry_timer_active in %" PRIdPTR " milliseconds.", - timeout); - } else { - gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); - } - } - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, - lb_call_on_retry_timer_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - glb_policy->retry_timer_active = true; - grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, - &glb_policy->lb_on_call_retry); + // If the load report timer is still pending, we wait for it to be + // called before restarting the call. Otherwise, we restart the call + // here. + if (!glb_policy->client_load_report_timer_pending) { + maybe_restart_lb_call(exec_ctx, glb_policy); + } +} + +static void fallback_update_locked(grpc_exec_ctx *exec_ctx, + glb_lb_policy *glb_policy, + const grpc_lb_addresses *addresses) { + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); + glb_policy->fallback_backend_addresses = + extract_backend_addresses_locked(exec_ctx, addresses); + if (glb_policy->lb_fallback_timeout_ms > 0 && + !glb_policy->fallback_timer_active) { + rr_handover_locked(exec_ctx, glb_policy); } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_server_status_received_locked"); } static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, const grpc_lb_policy_args *args) { glb_lb_policy *glb_policy = (glb_lb_policy *)policy; - if (glb_policy->updating_lb_channel) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Update already in progress for grpclb %p. Deferring update.", - (void *)glb_policy); - } - if (glb_policy->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, - glb_policy->pending_update_args->args); - gpr_free(glb_policy->pending_update_args); - } - glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc( - sizeof(*glb_policy->pending_update_args)); - glb_policy->pending_update_args->client_channel_factory = - args->client_channel_factory; - glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args); - glb_policy->pending_update_args->combiner = args->combiner; - return; - } - - glb_policy->updating_lb_channel = true; - // Propagate update to lb_channel (pick first). const grpc_arg *arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); if (arg == NULL || arg->type != GRPC_ARG_POINTER) { @@ -1674,13 +1743,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, "ignoring.", (void *)glb_policy); } + return; } const grpc_lb_addresses *addresses = (const grpc_lb_addresses *)arg->value.pointer.p; + + if (glb_policy->serverlist == NULL) { + // If a non-empty serverlist hasn't been received from the balancer, + // propagate the update to fallback_backend_addresses. + fallback_update_locked(exec_ctx, glb_policy, addresses); + } else if (glb_policy->updating_lb_channel) { + // If we have recieved serverlist from the balancer, we need to defer update + // when there is an in-progress one. + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, + "Update already in progress for grpclb %p. Deferring update.", + (void *)glb_policy); + } + if (glb_policy->pending_update_args != NULL) { + grpc_channel_args_destroy(exec_ctx, + glb_policy->pending_update_args->args); + gpr_free(glb_policy->pending_update_args); + } + glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc( + sizeof(*glb_policy->pending_update_args)); + glb_policy->pending_update_args->client_channel_factory = + args->client_channel_factory; + glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args); + glb_policy->pending_update_args->combiner = args->combiner; + return; + } + + glb_policy->updating_lb_channel = true; GPR_ASSERT(glb_policy->lb_channel != NULL); grpc_channel_args *lb_channel_args = build_lb_channel_args( exec_ctx, addresses, glb_policy->response_generator, args->args); - /* Propagate updates to the LB channel through the fake resolver */ + /* Propagate updates to the LB channel (pick first) through the fake resolver + */ grpc_fake_resolver_response_generator_set_response( exec_ctx, glb_policy->response_generator, lb_channel_args); grpc_channel_args_destroy(exec_ctx, lb_channel_args); @@ -1783,13 +1882,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = { static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory, grpc_lb_policy_args *args) { - /* Count the number of gRPC-LB addresses. There must be at least one. - * TODO(roth): For now, we ignore non-balancer addresses, but in the - * future, we may change the behavior such that we fall back to using - * the non-balancer addresses if we cannot reach any balancers. In the - * fallback case, we should use the LB policy indicated by - * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is - * unset, we should default to pick_first). */ + /* Count the number of gRPC-LB addresses. There must be at least one. */ const grpc_arg *arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); if (arg == NULL || arg->type != GRPC_ARG_POINTER) { @@ -1825,6 +1918,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_policy->lb_call_timeout_ms = grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX}); + arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS); + glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer( + arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, + INT_MAX}); + // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, // since we use this to trigger the client_load_reporting filter. grpc_arg new_arg = grpc_channel_arg_string_create( @@ -1833,6 +1931,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_policy->args = grpc_channel_args_copy_and_add_and_remove( args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); + /* Extract the backend addresses (may be empty) from the resolver for + * fallback. */ + glb_policy->fallback_backend_addresses = + extract_backend_addresses_locked(exec_ctx, addresses); + /* Create a client channel over them to communicate with a LB service */ glb_policy->response_generator = grpc_fake_resolver_response_generator_create(); diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c index 4d1405454c..05ab43d0b6 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.c +++ b/src/core/ext/filters/client_channel/lb_policy_factory.c @@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) { } void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index, - void* address, size_t address_len, + const void* address, size_t address_len, bool is_balancer, const char* balancer_name, void* user_data) { GPR_ASSERT(index < addresses->num_addresses); diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h index 9d9fb143df..cf0f8cb615 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.h +++ b/src/core/ext/filters/client_channel/lb_policy_factory.h @@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses); * \a address is a socket address of length \a address_len. * Takes ownership of \a balancer_name. */ void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index, - void *address, size_t address_len, + const void *address, size_t address_len, bool is_balancer, const char *balancer_name, void *user_data); diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c index 7f1f57259a..c30cc93b6f 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c @@ -20,6 +20,7 @@ #if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET) #include <ares.h> +#include <sys/ioctl.h> #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -37,8 +38,6 @@ typedef struct fd_node { /** the owner of this fd node */ grpc_ares_ev_driver *ev_driver; - /** the grpc_fd owned by this fd node */ - grpc_fd *fd; /** a closure wrapping on_readable_cb, which should be invoked when the grpc_fd in this node becomes readable. */ grpc_closure read_closure; @@ -50,10 +49,14 @@ typedef struct fd_node { /** mutex guarding the rest of the state */ gpr_mu mu; + /** the grpc_fd owned by this fd node */ + grpc_fd *fd; /** if the readable closure has been registered */ bool readable_registered; /** if the writable closure has been registered */ bool writable_registered; + /** if the fd is being shut down */ + bool shutting_down; } fd_node; struct grpc_ares_ev_driver { @@ -100,7 +103,6 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { GPR_ASSERT(!fdn->readable_registered); GPR_ASSERT(!fdn->writable_registered); gpr_mu_destroy(&fdn->mu); - grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->fd); /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up immediately by another thread, and should not be closed by the following grpc_fd_orphan. */ @@ -109,6 +111,19 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { gpr_free(fdn); } +static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) { + gpr_mu_lock(&fdn->mu); + fdn->shutting_down = true; + if (!fdn->readable_registered && !fdn->writable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + } else { + grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "c-ares fd shutdown")); + gpr_mu_unlock(&fdn->mu); + } +} + grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver, grpc_pollset_set *pollset_set) { *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver)); @@ -175,18 +190,33 @@ static fd_node *pop_fd_node(fd_node **head, int fd) { return NULL; } +/* Check if \a fd is still readable */ +static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver, + int fd) { + size_t bytes_available = 0; + return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0; +} + static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { fd_node *fdn = (fd_node *)arg; grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); + const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->readable_registered = false; + if (fdn->shutting_down && !fdn->writable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + grpc_ares_ev_driver_unref(ev_driver); + return; + } gpr_mu_unlock(&fdn->mu); - gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd)); + gpr_log(GPR_DEBUG, "readable on %d", fd); if (error == GRPC_ERROR_NONE) { - ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd), - ARES_SOCKET_BAD); + do { + ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD); + } while (grpc_ares_is_fd_still_readable(ev_driver, fd)); } else { // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or // timed out. The pending lookups made on this ev_driver will be cancelled @@ -207,13 +237,19 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, fd_node *fdn = (fd_node *)arg; grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); + const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->writable_registered = false; + if (fdn->shutting_down && !fdn->readable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + grpc_ares_ev_driver_unref(ev_driver); + return; + } gpr_mu_unlock(&fdn->mu); - gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd)); + gpr_log(GPR_DEBUG, "writable on %d", fd); if (error == GRPC_ERROR_NONE) { - ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, - grpc_fd_wrapped_fd(fdn->fd)); + ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd); } else { // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or // timed out. The pending lookups made on this ev_driver will be cancelled @@ -256,6 +292,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, fdn->ev_driver = ev_driver; fdn->readable_registered = false; fdn->writable_registered = false; + fdn->shutting_down = false; gpr_mu_init(&fdn->mu); GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn, grpc_schedule_on_exec_ctx); @@ -296,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, while (ev_driver->fds != NULL) { fd_node *cur = ev_driver->fds; ev_driver->fds = ev_driver->fds->next; - fd_node_destroy(exec_ctx, cur); + fd_node_shutdown(exec_ctx, cur); } ev_driver->fds = new_list; // If the ev driver has no working fd, all the tasks are done. diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 4643a867b8..1c575badb8 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -682,7 +682,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_init(&s->frame_storage); + grpc_slice_buffer_init(&s->compressed_data_buffer); + grpc_slice_buffer_init(&s->decompressed_data_buffer); s->pending_byte_stream = false; + s->decompressed_header_bytes = 0; GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s, grpc_combiner_scheduler(t->combiner)); @@ -716,14 +719,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, grpc_slice_buffer_destroy_internal(exec_ctx, &s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage); - if (s->compressed_data_buffer) { - grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer); - gpr_free(s->compressed_data_buffer); - } - if (s->decompressed_data_buffer) { - grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer); - gpr_free(s->decompressed_data_buffer); - } + grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer); + grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer); grpc_chttp2_list_remove_stalled_by_transport(t, s); grpc_chttp2_list_remove_stalled_by_stream(t, s); @@ -1420,12 +1417,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; /* Identify stream compression */ - if ((s->stream_compression_send_enabled = - (op_payload->send_initial_metadata.send_initial_metadata->idx.named - .content_encoding != NULL)) == true) { - s->compressed_data_buffer = - (grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer)); - grpc_slice_buffer_init(s->compressed_data_buffer); + if (op_payload->send_initial_metadata.send_initial_metadata->idx.named + .content_encoding == NULL || + grpc_stream_compression_method_parse( + GRPC_MDVALUE( + op_payload->send_initial_metadata.send_initial_metadata->idx + .named.content_encoding->md), + true, &s->stream_compression_method) == 0) { + s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS; } s->send_initial_metadata_finished = add_closure_barrier(on_complete); @@ -1863,20 +1862,20 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, &s->frame_storage); s->unprocessed_incoming_frames_decompressed = false; } - if (s->stream_compression_recv_enabled && - !s->unprocessed_incoming_frames_decompressed) { - GPR_ASSERT(s->decompressed_data_buffer->length == 0); + if (!s->unprocessed_incoming_frames_decompressed) { + GPR_ASSERT(s->decompressed_data_buffer.length == 0); bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + s->stream_decompression_method); } - if (!grpc_stream_decompress(s->stream_decompression_ctx, - &s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer, NULL, - GRPC_HEADER_SIZE_IN_BYTES, - &end_of_context)) { + if (!grpc_stream_decompress( + s->stream_decompression_ctx, + &s->unprocessed_incoming_frames_buffer, + &s->decompressed_data_buffer, NULL, + GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes, + &end_of_context)) { grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); grpc_slice_buffer_reset_and_unref_internal( @@ -1884,9 +1883,13 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Stream decompression error."); } else { + s->decompressed_header_bytes += s->decompressed_data_buffer.length; + if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) { + s->decompressed_header_bytes = 0; + } error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL, - s->recv_message); + exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, + NULL, s->recv_message); if (end_of_context) { grpc_stream_compression_context_destroy( s->stream_decompression_ctx); @@ -1935,15 +1938,14 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } bool pending_data = s->pending_byte_stream || s->unprocessed_incoming_frames_buffer.length > 0; - if (s->stream_compression_recv_enabled && s->read_closed && - s->frame_storage.length > 0 && !pending_data && !s->seen_error && - s->recv_trailing_metadata_finished != NULL) { + if (s->read_closed && s->frame_storage.length > 0 && !pending_data && + !s->seen_error && s->recv_trailing_metadata_finished != NULL) { /* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and * maybe decompress the next 5 bytes in the stream. */ bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + s->stream_decompression_method); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->frame_storage, @@ -1956,6 +1958,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } else { if (s->unprocessed_incoming_frames_buffer.length > 0) { s->unprocessed_incoming_frames_decompressed = true; + pending_data = true; } if (end_of_context) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); @@ -2771,7 +2774,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_UNREF(s->byte_stream_error); s->byte_stream_error = GRPC_ERROR_NONE; grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error)); - s->byte_stream_error = error; + s->byte_stream_error = GRPC_ERROR_REF(error); } } @@ -2869,24 +2872,23 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, grpc_error *error; if (s->unprocessed_incoming_frames_buffer.length > 0) { - if (s->stream_compression_recv_enabled && - !s->unprocessed_incoming_frames_decompressed) { + if (!s->unprocessed_incoming_frames_decompressed) { bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + s->stream_decompression_method); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer, NULL, MAX_SIZE_T, - &end_of_context)) { + &s->decompressed_data_buffer, NULL, + MAX_SIZE_T, &end_of_context)) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error."); return error; } GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0); grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer); + &s->decompressed_data_buffer); s->unprocessed_incoming_frames_decompressed = true; if (end_of_context) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c index 222d2177b2..73aaab1802 100644 --- a/src/core/ext/transport/chttp2/transport/frame_data.c +++ b/src/core/ext/transport/chttp2/transport/frame_data.c @@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( if (cur != end) { grpc_slice_buffer_undo_take_first( - &s->unprocessed_incoming_frames_buffer, + slices, grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); } grpc_slice_unref_internal(exec_ctx, slice); @@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( p->state = GRPC_CHTTP2_DATA_FH_0; cur += p->frame_size; grpc_slice_buffer_undo_take_first( - &s->unprocessed_incoming_frames_buffer, + slices, grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); grpc_slice_unref_internal(exec_ctx, slice); return GRPC_ERROR_NONE; diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c index 7e3ab565f5..3d1df19bc3 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c @@ -1683,17 +1683,12 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_metadata_batch *initial_metadata) { - if (initial_metadata->idx.named.content_encoding != NULL) { - grpc_slice content_encoding = - GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md); - if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) { - if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) { - s->stream_compression_recv_enabled = true; - s->decompressed_data_buffer = - (grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer)); - grpc_slice_buffer_init(s->decompressed_data_buffer); - } - } + if (initial_metadata->idx.named.content_encoding == NULL || + grpc_stream_compression_method_parse( + GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false, + &s->stream_decompression_method) == 0) { + s->stream_decompression_method = + GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; } } diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 7c890423a6..dfa5ab984a 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -588,25 +588,27 @@ struct grpc_chttp2_stream { grpc_chttp2_write_cb *finish_after_write; size_t sending_bytes; - /** Whether stream compression send is enabled */ - bool stream_compression_recv_enabled; - /** Whether stream compression recv is enabled */ - bool stream_compression_send_enabled; - /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed - */ - bool unprocessed_incoming_frames_decompressed; + /* Stream compression method to be used. */ + grpc_stream_compression_method stream_compression_method; + /* Stream decompression method to be used. */ + grpc_stream_compression_method stream_decompression_method; /** Stream compression decompress context */ grpc_stream_compression_context *stream_decompression_ctx; /** Stream compression compress context */ grpc_stream_compression_context *stream_compression_ctx; /** Buffer storing data that is compressed but not sent */ - grpc_slice_buffer *compressed_data_buffer; + grpc_slice_buffer compressed_data_buffer; /** Amount of uncompressed bytes sent out when compressed_data_buffer is * emptied */ size_t uncompressed_data_size; /** Temporary buffer storing decompressed data */ - grpc_slice_buffer *decompressed_data_buffer; + grpc_slice_buffer decompressed_data_buffer; + /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed + */ + bool unprocessed_incoming_frames_decompressed; + /** gRPC header bytes that are already decompressed */ + size_t decompressed_header_bytes; }; /** Transport writing call flow: diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 2f6228847b..f0890188ca 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -321,8 +321,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (sent_initial_metadata) { /* send any body bytes, if allowed by flow control */ if (s->flow_controlled_buffer.length > 0 || - (s->stream_compression_send_enabled && - s->compressed_data_buffer->length > 0)) { + s->compressed_data_buffer.length > 0) { uint32_t stream_remote_window = (uint32_t)GPR_MAX( 0, s->flow_control.remote_window_delta + @@ -336,56 +335,59 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( bool is_last_data_frame = false; bool is_last_frame = false; size_t sending_bytes_before = s->sending_bytes; - if (s->stream_compression_send_enabled) { - while ((s->flow_controlled_buffer.length > 0 || - s->compressed_data_buffer->length > 0) && - max_outgoing > 0) { - if (s->compressed_data_buffer->length > 0) { - uint32_t send_bytes = (uint32_t)GPR_MIN( - max_outgoing, s->compressed_data_buffer->length); - is_last_data_frame = - (send_bytes == s->compressed_data_buffer->length && - s->flow_controlled_buffer.length == 0 && - s->fetching_send_message == NULL); - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - grpc_chttp2_encode_data(s->id, s->compressed_data_buffer, - send_bytes, is_last_frame, - &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, - &s->flow_control, send_bytes); - max_outgoing -= send_bytes; - if (s->compressed_data_buffer->length == 0) { - s->sending_bytes += s->uncompressed_data_size; + while ((s->flow_controlled_buffer.length > 0 || + s->compressed_data_buffer.length > 0) && + max_outgoing > 0) { + if (s->compressed_data_buffer.length > 0) { + uint32_t send_bytes = (uint32_t)GPR_MIN( + max_outgoing, s->compressed_data_buffer.length); + is_last_data_frame = + (send_bytes == s->compressed_data_buffer.length && + s->flow_controlled_buffer.length == 0 && + s->fetching_send_message == NULL); + if (is_last_data_frame && s->send_trailing_metadata != NULL && + s->stream_compression_ctx != NULL) { + if (!grpc_stream_compress( + s->stream_compression_ctx, &s->flow_controlled_buffer, + &s->compressed_data_buffer, NULL, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) { + gpr_log(GPR_ERROR, "Stream compression failed."); } - } else { - if (s->stream_compression_ctx == NULL) { - s->stream_compression_ctx = - grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_COMPRESS); - } - s->uncompressed_data_size = s->flow_controlled_buffer.length; - GPR_ASSERT(grpc_stream_compress( - s->stream_compression_ctx, &s->flow_controlled_buffer, - s->compressed_data_buffer, NULL, MAX_SIZE_T, - GRPC_STREAM_COMPRESSION_FLUSH_SYNC)); + grpc_stream_compression_context_destroy( + s->stream_compression_ctx); + s->stream_compression_ctx = NULL; + /* After finish, bytes in s->compressed_data_buffer may be + * more than max_outgoing. Start another round of the current + * while loop so that send_bytes and is_last_data_frame are + * recalculated. */ + continue; + } + is_last_frame = + is_last_data_frame && s->send_trailing_metadata != NULL && + grpc_metadata_batch_is_empty(s->send_trailing_metadata); + grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer, + send_bytes, is_last_frame, + &s->stats.outgoing, &t->outbuf); + grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control, + send_bytes); + max_outgoing -= send_bytes; + if (s->compressed_data_buffer.length == 0) { + s->sending_bytes += s->uncompressed_data_size; + } + } else { + if (s->stream_compression_ctx == NULL) { + s->stream_compression_ctx = + grpc_stream_compression_context_create( + s->stream_compression_method); + } + s->uncompressed_data_size = s->flow_controlled_buffer.length; + if (!grpc_stream_compress( + s->stream_compression_ctx, &s->flow_controlled_buffer, + &s->compressed_data_buffer, NULL, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) { + gpr_log(GPR_ERROR, "Stream compression failed."); } } - } else { - uint32_t send_bytes = (uint32_t)GPR_MIN( - max_outgoing, s->flow_controlled_buffer.length); - is_last_data_frame = s->fetching_send_message == NULL && - send_bytes == s->flow_controlled_buffer.length; - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, - send_bytes, is_last_frame, - &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control, - send_bytes); - s->sending_bytes += send_bytes; } if (!t->is_client) { t->ping_recv_state.last_ping_recv_time = 0; @@ -409,8 +411,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( &s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE); now_writing = true; if (s->flow_controlled_buffer.length > 0 || - (s->stream_compression_send_enabled && - s->compressed_data_buffer->length > 0)) { + s->compressed_data_buffer.length > 0) { GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork"); grpc_chttp2_list_add_writable_stream(t, s); } @@ -428,8 +429,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (s->send_trailing_metadata != NULL && s->fetching_send_message == NULL && s->flow_controlled_buffer.length == 0 && - (!s->stream_compression_send_enabled || - s->compressed_data_buffer->length == 0)) { + s->compressed_data_buffer.length == 0) { GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata")); if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) { grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true, diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression.c index ba9302794f..411489f029 100644 --- a/src/core/lib/compression/stream_compression.c +++ b/src/core/lib/compression/stream_compression.c @@ -16,177 +16,62 @@ * */ -#include <grpc/support/alloc.h> #include <grpc/support/log.h> #include "src/core/lib/compression/stream_compression.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/compression/stream_compression_gzip.h" -#define OUTPUT_BLOCK_SIZE (1024) - -static bool gzip_flate(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, int flush, - bool *end_of_context) { - GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH); - /* Full flush is not allowed when inflating. */ - GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - int r; - bool eoc = false; - size_t original_max_output_size = max_output_size; - while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) { - size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size - : OUTPUT_BLOCK_SIZE; - grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size); - ctx->zs.avail_out = (uInt)slice_size; - ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out); - while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) { - grpc_slice slice = grpc_slice_buffer_take_first(in); - ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice); - ctx->zs.next_in = GRPC_SLICE_START_PTR(slice); - r = ctx->flate(&ctx->zs, Z_NO_FLUSH); - if (r < 0 && r != Z_BUF_ERROR) { - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } else if (r == Z_STREAM_END && ctx->flate == inflate) { - eoc = true; - } - if (ctx->zs.avail_in > 0) { - grpc_slice_buffer_undo_take_first( - in, - grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, - GRPC_SLICE_LENGTH(slice))); - } - grpc_slice_unref_internal(&exec_ctx, slice); - } - if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { - GPR_ASSERT(in->length == 0); - r = ctx->flate(&ctx->zs, flush); - if (flush == Z_SYNC_FLUSH) { - switch (r) { - case Z_OK: - /* Maybe flush is not complete; just made some partial progress. */ - if (ctx->zs.avail_out > 0) { - flush = 0; - } - break; - case Z_BUF_ERROR: - case Z_STREAM_END: - flush = 0; - break; - default: - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } - } else if (flush == Z_FINISH) { - switch (r) { - case Z_OK: - case Z_BUF_ERROR: - /* Wait for the next loop to assign additional output space. */ - GPR_ASSERT(ctx->zs.avail_out == 0); - break; - case Z_STREAM_END: - flush = 0; - break; - default: - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } - } - } - - if (ctx->zs.avail_out == 0) { - grpc_slice_buffer_add(out, slice_out); - } else if (ctx->zs.avail_out < slice_size) { - slice_out.data.refcounted.length -= ctx->zs.avail_out; - grpc_slice_buffer_add(out, slice_out); - } else { - grpc_slice_unref_internal(&exec_ctx, slice_out); - } - max_output_size -= (slice_size - ctx->zs.avail_out); - } - grpc_exec_ctx_finish(&exec_ctx); - if (end_of_context) { - *end_of_context = eoc; - } - if (output_size) { - *output_size = original_max_output_size - max_output_size; - } - return true; -} +extern const grpc_stream_compression_vtable + grpc_stream_compression_identity_vtable; bool grpc_stream_compress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, grpc_stream_compression_flush flush) { - GPR_ASSERT(ctx->flate == deflate); - int gzip_flush; - switch (flush) { - case GRPC_STREAM_COMPRESSION_FLUSH_NONE: - gzip_flush = 0; - break; - case GRPC_STREAM_COMPRESSION_FLUSH_SYNC: - gzip_flush = Z_SYNC_FLUSH; - break; - case GRPC_STREAM_COMPRESSION_FLUSH_FINISH: - gzip_flush = Z_FINISH; - break; - default: - gzip_flush = 0; - } - return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush, - NULL); + return ctx->vtable->compress(ctx, in, out, output_size, max_output_size, + flush); } bool grpc_stream_decompress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, bool *end_of_context) { - GPR_ASSERT(ctx->flate == inflate); - return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH, - end_of_context); + return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size, + end_of_context); } grpc_stream_compression_context *grpc_stream_compression_context_create( grpc_stream_compression_method method) { - grpc_stream_compression_context *ctx = - (grpc_stream_compression_context *)gpr_zalloc( - sizeof(grpc_stream_compression_context)); - int r; - if (ctx == NULL) { - return NULL; - } - if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) { - r = inflateInit2(&ctx->zs, 0x1F); - ctx->flate = inflate; - } else { - r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8, - Z_DEFAULT_STRATEGY); - ctx->flate = deflate; - } - if (r != Z_OK) { - gpr_free(ctx); - return NULL; + switch (method) { + case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS: + case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS: + return grpc_stream_compression_identity_vtable.context_create(method); + case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS: + case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS: + return grpc_stream_compression_gzip_vtable.context_create(method); + default: + gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method); + return NULL; } - - return ctx; } void grpc_stream_compression_context_destroy( grpc_stream_compression_context *ctx) { - if (ctx->flate == inflate) { - inflateEnd(&ctx->zs); + ctx->vtable->context_destroy(ctx); +} + +int grpc_stream_compression_method_parse( + grpc_slice value, bool is_compress, + grpc_stream_compression_method *method) { + if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { + *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS + : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; + return 1; + } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) { + *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS + : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS; + return 1; } else { - deflateEnd(&ctx->zs); + return 0; } - gpr_free(ctx); } diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h index 0daaa9e655..6d073280fa 100644 --- a/src/core/lib/compression/stream_compression.h +++ b/src/core/lib/compression/stream_compression.h @@ -24,15 +24,20 @@ #include <grpc/slice_buffer.h> #include <zlib.h> +#include "src/core/lib/transport/static_metadata.h" + +typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable; + /* Stream compression/decompression context */ typedef struct grpc_stream_compression_context { - z_stream zs; - int (*flate)(z_stream *zs, int flush); + const grpc_stream_compression_vtable *vtable; } grpc_stream_compression_context; typedef enum grpc_stream_compression_method { - GRPC_STREAM_COMPRESSION_COMPRESS = 0, - GRPC_STREAM_COMPRESSION_DECOMPRESS, + GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0, + GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS, + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS, + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS, GRPC_STREAM_COMPRESSION_METHOD_COUNT } grpc_stream_compression_method; @@ -43,6 +48,19 @@ typedef enum grpc_stream_compression_flush { GRPC_STREAM_COMPRESSION_FLUSH_COUNT } grpc_stream_compression_flush; +struct grpc_stream_compression_vtable { + bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, + grpc_slice_buffer *out, size_t *output_size, + size_t max_output_size, grpc_stream_compression_flush flush); + bool (*decompress)(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + bool *end_of_context); + grpc_stream_compression_context *(*context_create)( + grpc_stream_compression_method method); + void (*context_destroy)(grpc_stream_compression_context *ctx); +}; + /** * Compress bytes provided in \a in with a given context, with an optional flush * at the end of compression. Emits at most \a max_output_size compressed bytes @@ -87,4 +105,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create( void grpc_stream_compression_context_destroy( grpc_stream_compression_context *ctx); -#endif /* GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_H */ +/** + * Parse stream compression method based on algorithm name + */ +int grpc_stream_compression_method_parse( + grpc_slice value, bool is_compress, grpc_stream_compression_method *method); + +#endif diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c new file mode 100644 index 0000000000..abcbdb3a91 --- /dev/null +++ b/src/core/lib/compression/stream_compression_gzip.c @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> + +#include "src/core/lib/compression/stream_compression_gzip.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" + +#define OUTPUT_BLOCK_SIZE (1024) + +typedef struct grpc_stream_compression_context_gzip { + grpc_stream_compression_context base; + + z_stream zs; + int (*flate)(z_stream *zs, int flush); +} grpc_stream_compression_context_gzip; + +static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, int flush, + bool *end_of_context) { + GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH); + /* Full flush is not allowed when inflating. */ + GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + int r; + bool eoc = false; + size_t original_max_output_size = max_output_size; + while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) { + size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size + : OUTPUT_BLOCK_SIZE; + grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size); + ctx->zs.avail_out = (uInt)slice_size; + ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out); + while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) { + grpc_slice slice = grpc_slice_buffer_take_first(in); + ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice); + ctx->zs.next_in = GRPC_SLICE_START_PTR(slice); + r = ctx->flate(&ctx->zs, Z_NO_FLUSH); + if (r < 0 && r != Z_BUF_ERROR) { + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } else if (r == Z_STREAM_END && ctx->flate == inflate) { + eoc = true; + } + if (ctx->zs.avail_in > 0) { + grpc_slice_buffer_undo_take_first( + in, + grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, + GRPC_SLICE_LENGTH(slice))); + } + grpc_slice_unref_internal(&exec_ctx, slice); + } + if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { + GPR_ASSERT(in->length == 0); + r = ctx->flate(&ctx->zs, flush); + if (flush == Z_SYNC_FLUSH) { + switch (r) { + case Z_OK: + /* Maybe flush is not complete; just made some partial progress. */ + if (ctx->zs.avail_out > 0) { + flush = 0; + } + break; + case Z_BUF_ERROR: + case Z_STREAM_END: + flush = 0; + break; + default: + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } + } else if (flush == Z_FINISH) { + switch (r) { + case Z_OK: + case Z_BUF_ERROR: + /* Wait for the next loop to assign additional output space. */ + GPR_ASSERT(ctx->zs.avail_out == 0); + break; + case Z_STREAM_END: + flush = 0; + break; + default: + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } + } + } + + if (ctx->zs.avail_out == 0) { + grpc_slice_buffer_add(out, slice_out); + } else if (ctx->zs.avail_out < slice_size) { + slice_out.data.refcounted.length -= ctx->zs.avail_out; + grpc_slice_buffer_add(out, slice_out); + } else { + grpc_slice_unref_internal(&exec_ctx, slice_out); + } + max_output_size -= (slice_size - ctx->zs.avail_out); + } + grpc_exec_ctx_finish(&exec_ctx); + if (end_of_context) { + *end_of_context = eoc; + } + if (output_size) { + *output_size = original_max_output_size - max_output_size; + } + return true; +} + +static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + grpc_stream_compression_flush flush) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; + GPR_ASSERT(gzip_ctx->flate == deflate); + int gzip_flush; + switch (flush) { + case GRPC_STREAM_COMPRESSION_FLUSH_NONE: + gzip_flush = 0; + break; + case GRPC_STREAM_COMPRESSION_FLUSH_SYNC: + gzip_flush = Z_SYNC_FLUSH; + break; + case GRPC_STREAM_COMPRESSION_FLUSH_FINISH: + gzip_flush = Z_FINISH; + break; + default: + gzip_flush = 0; + } + return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush, + NULL); +} + +static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + bool *end_of_context) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; + GPR_ASSERT(gzip_ctx->flate == inflate); + return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, + Z_SYNC_FLUSH, end_of_context); +} + +static grpc_stream_compression_context * +grpc_stream_compression_context_create_gzip( + grpc_stream_compression_method method) { + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || + method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)gpr_zalloc( + sizeof(grpc_stream_compression_context_gzip)); + int r; + if (gzip_ctx == NULL) { + return NULL; + } + if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) { + r = inflateInit2(&gzip_ctx->zs, 0x1F); + gzip_ctx->flate = inflate; + } else { + r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8, + Z_DEFAULT_STRATEGY); + gzip_ctx->flate = deflate; + } + if (r != Z_OK) { + gpr_free(gzip_ctx); + return NULL; + } + + gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable; + return (grpc_stream_compression_context *)gzip_ctx; +} + +static void grpc_stream_compression_context_destroy_gzip( + grpc_stream_compression_context *ctx) { + if (ctx == NULL) { + return; + } + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; + if (gzip_ctx->flate == inflate) { + inflateEnd(&gzip_ctx->zs); + } else { + deflateEnd(&gzip_ctx->zs); + } + gpr_free(ctx); +} + +const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = { + .compress = grpc_stream_compress_gzip, + .decompress = grpc_stream_decompress_gzip, + .context_create = grpc_stream_compression_context_create_gzip, + .context_destroy = grpc_stream_compression_context_destroy_gzip}; diff --git a/src/core/lib/compression/stream_compression_gzip.h b/src/core/lib/compression/stream_compression_gzip.h new file mode 100644 index 0000000000..7cf49a0de9 --- /dev/null +++ b/src/core/lib/compression/stream_compression_gzip.h @@ -0,0 +1,26 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H +#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H + +#include "src/core/lib/compression/stream_compression.h" + +extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable; + +#endif diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c new file mode 100644 index 0000000000..3dfcf53b85 --- /dev/null +++ b/src/core/lib/compression/stream_compression_identity.c @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include <grpc/support/alloc.h> +#include <grpc/support/log.h> + +#include "src/core/lib/compression/stream_compression_identity.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" + +#define OUTPUT_BLOCK_SIZE (1024) + +/* Singleton context used for all identity streams. */ +static grpc_stream_compression_context identity_ctx = { + .vtable = &grpc_stream_compression_identity_vtable}; + +static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size) { + if (max_output_size >= in->length) { + if (output_size) { + *output_size = in->length; + } + grpc_slice_buffer_move_into(in, out); + } else { + if (output_size) { + *output_size = max_output_size; + } + grpc_slice_buffer_move_first(in, max_output_size, out); + } +} + +static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + grpc_stream_compression_flush flush) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_pass_through(in, out, output_size, max_output_size); + return true; +} + +static bool grpc_stream_decompress_identity( + grpc_stream_compression_context *ctx, grpc_slice_buffer *in, + grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, + bool *end_of_context) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_pass_through(in, out, output_size, max_output_size); + if (end_of_context) { + *end_of_context = false; + } + return true; +} + +static grpc_stream_compression_context * +grpc_stream_compression_context_create_identity( + grpc_stream_compression_method method) { + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || + method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); + /* No context needed in this case. Use fake context instead. */ + return (grpc_stream_compression_context *)&identity_ctx; +} + +static void grpc_stream_compression_context_destroy_identity( + grpc_stream_compression_context *ctx) { + return; +} + +const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = { + .compress = grpc_stream_compress_identity, + .decompress = grpc_stream_decompress_identity, + .context_create = grpc_stream_compression_context_create_identity, + .context_destroy = grpc_stream_compression_context_destroy_identity}; diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h new file mode 100644 index 0000000000..41926e949e --- /dev/null +++ b/src/core/lib/compression/stream_compression_identity.h @@ -0,0 +1,27 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H +#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H + +#include "src/core/lib/compression/stream_compression.h" + +extern const grpc_stream_compression_vtable + grpc_stream_compression_identity_vtable; + +#endif diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 498fa18545..5bd7884e28 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -110,8 +110,6 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "executor_wakeup_initiated", "executor_queue_drained", "executor_push_retries", - "executor_threads_created", - "executor_threads_used", "server_requested_calls", "server_slowpath_requests_queued", }; @@ -221,8 +219,6 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "Number of times an executor queue was drained", "Number of times we raced and were forced to retry pushing a closure to " "the executor", - "Size of the backing thread pool for overflow gRPC Core work", - "How many executor threads actually got used", "How many calls were requested (not necessarily received) by the server", "How many times was the server slow path taken (indicates too few " "outstanding requests)", @@ -240,7 +236,6 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "http2_send_message_per_write", "http2_send_trailing_metadata_per_write", "http2_send_flowctl_per_write", - "executor_closures_per_wakeup", "server_cqs_checked", }; const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { @@ -256,7 +251,6 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { "Number of streams whose payload was written per TCP write", "Number of streams terminated per TCP write", "Number of flow control updates written per TCP write", - "Number of closures executed each time an executor wakes up", "How many completion queues were checked looking for a CQ that had " "requested the incoming call", }; @@ -328,7 +322,6 @@ const uint8_t grpc_stats_table_7[102] = { const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64}; const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5}; void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 262144); if (value < 6) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, @@ -354,7 +347,6 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 29) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -381,7 +373,6 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_2, 128)); } void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, @@ -407,7 +398,6 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_4, 64)); } void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -433,7 +423,6 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_6, 64)); } void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, @@ -459,7 +448,6 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_4, 64)); } void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, @@ -486,7 +474,6 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { } void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -514,7 +501,6 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM( @@ -542,7 +528,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_initial_metadata_per_write( grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -572,7 +557,6 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write( } void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -600,7 +584,6 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_trailing_metadata_per_write( grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -630,7 +613,6 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( } void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -656,36 +638,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_histo_find_bucket_slow( (exec_ctx), value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx, - int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ - value = GPR_CLAMP(value, 0, 1024); - if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value); - return; - } - union { - double dbl; - uint64_t uint; - } _val, _bkt; - _val.dbl = value; - if (_val.uint < 4637863191261478912ull) { - int bucket = - grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; - _bkt.dbl = grpc_stats_table_6[bucket]; - bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket); - return; - } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); -} void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 64); if (value < 3) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -710,17 +663,17 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_histo_find_bucket_slow( (exec_ctx), value, grpc_stats_table_8, 8)); } -const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 8}; -const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448, - 512, 576, 640, 704, 768, 832, 896}; -const int *const grpc_stats_histo_bucket_boundaries[14] = { +const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 8}; +const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448, + 512, 576, 640, 704, 768, 832}; +const int *const grpc_stats_histo_bucket_boundaries[13] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, - grpc_stats_table_6, grpc_stats_table_8}; -void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = { + grpc_stats_table_8}; +void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_inc_call_initial_size, grpc_stats_inc_poll_events_returned, grpc_stats_inc_tcp_write_size, @@ -733,5 +686,4 @@ void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_inc_http2_send_message_per_write, grpc_stats_inc_http2_send_trailing_metadata_per_write, grpc_stats_inc_http2_send_flowctl_per_write, - grpc_stats_inc_executor_closures_per_wakeup, grpc_stats_inc_server_cqs_checked}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 9c5fb0178c..507e5f760e 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -112,8 +112,6 @@ typedef enum { GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED, GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED, GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES, - GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED, - GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED, GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS, GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED, GRPC_STATS_COUNTER_COUNT @@ -133,7 +131,6 @@ typedef enum { GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; @@ -164,11 +161,9 @@ typedef enum { GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896, + GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8, - GRPC_STATS_HISTOGRAM_BUCKETS = 904 + GRPC_STATS_HISTOGRAM_BUCKETS = 840 } grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) @@ -421,11 +416,6 @@ typedef enum { GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) #define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) -#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED) -#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED) #define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) #define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \ @@ -472,17 +462,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value)) void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \ - grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value)) -void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx, - int x); #define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \ grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value)) void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x); -extern const int grpc_stats_histo_buckets[14]; -extern const int grpc_stats_histo_start[14]; -extern const int *const grpc_stats_histo_bucket_boundaries[14]; -extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, +extern const int grpc_stats_histo_buckets[13]; +extern const int grpc_stats_histo_start[13]; +extern const int *const grpc_stats_histo_bucket_boundaries[13]; +extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x); #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 119d321746..5c0ab2262e 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -261,14 +261,6 @@ - counter: executor_push_retries doc: Number of times we raced and were forced to retry pushing a closure to the executor -- counter: executor_threads_created - doc: Size of the backing thread pool for overflow gRPC Core work -- counter: executor_threads_used - doc: How many executor threads actually got used -- histogram: executor_closures_per_wakeup - max: 1024 - buckets: 64 - doc: Number of closures executed each time an executor wakes up # server - counter: server_requested_calls doc: How many calls were requested (not necessarily received) by the server diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql index 3bcf83f9d8..54869977b0 100644 --- a/src/core/lib/debug/stats_data_bq_schema.sql +++ b/src/core/lib/debug/stats_data_bq_schema.sql @@ -85,7 +85,5 @@ executor_scheduled_to_self_per_iteration:FLOAT, executor_wakeup_initiated_per_iteration:FLOAT, executor_queue_drained_per_iteration:FLOAT, executor_push_retries_per_iteration:FLOAT, -executor_threads_created_per_iteration:FLOAT, -executor_threads_used_per_iteration:FLOAT, server_requested_calls_per_iteration:FLOAT, server_slowpath_requests_queued_per_iteration:FLOAT diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index cb4f506aed..58d9daf2dd 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -32,14 +32,16 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/spinlock.h" +#define MAX_DEPTH 2 + typedef struct { gpr_mu mu; gpr_cv cv; grpc_closure_list elems; + size_t depth; bool shutdown; bool queued_long_job; gpr_thd_id id; - grpc_closure_list local_elems; } thread_state; static thread_state *g_thread_state; @@ -54,35 +56,32 @@ static grpc_tracer_flag executor_trace = static void executor_thread(void *arg); -static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { - int n = 0; // number of closures executed +static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { + size_t n = 0; - while (!grpc_closure_list_empty(*list)) { - grpc_closure *c = list->head; - grpc_closure_list_init(list); - while (c != NULL) { - grpc_closure *next = c->next_data.next; - grpc_error *error = c->error_data.error; - if (GRPC_TRACER_ON(executor_trace)) { + grpc_closure *c = list.head; + while (c != NULL) { + grpc_closure *next = c->next_data.next; + grpc_error *error = c->error_data.error; + if (GRPC_TRACER_ON(executor_trace)) { #ifndef NDEBUG - gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, - c->file_created, c->line_created); + gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, + c->file_created, c->line_created); #else - gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); + gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); #endif - } + } #ifndef NDEBUG - c->scheduled = false; + c->scheduled = false; #endif - n++; - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - c = next; - grpc_exec_ctx_flush(exec_ctx); - } + c->cb(exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); + c = next; + n++; + grpc_exec_ctx_flush(exec_ctx); } - GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n); + return n; } bool grpc_executor_is_threaded() { @@ -127,7 +126,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { for (size_t i = 0; i < g_max_threads; i++) { gpr_mu_destroy(&g_thread_state[i].mu); gpr_cv_destroy(&g_thread_state[i].cv); - run_closures(exec_ctx, &g_thread_state[i].elems); + run_closures(exec_ctx, g_thread_state[i].elems); } gpr_free(g_thread_state); gpr_tls_destroy(&g_this_thread_state); @@ -151,14 +150,14 @@ static void executor_thread(void *arg) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); - GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx); - - bool used = false; + size_t subtract_depth = 0; for (;;) { if (GRPC_TRACER_ON(executor_trace)) { - gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state)); + gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", + (int)(ts - g_thread_state), subtract_depth); } gpr_mu_lock(&ts->mu); + ts->depth -= subtract_depth; while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { ts->queued_long_job = false; gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); @@ -171,13 +170,8 @@ static void executor_thread(void *arg) { gpr_mu_unlock(&ts->mu); break; } - if (!used) { - GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx); - used = true; - } GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); - GPR_ASSERT(grpc_closure_list_empty(ts->local_elems)); - ts->local_elems = ts->elems; + grpc_closure_list exec = ts->elems; ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); if (GRPC_TRACER_ON(executor_trace)) { @@ -185,7 +179,7 @@ static void executor_thread(void *arg) { } grpc_exec_ctx_invalidate_now(&exec_ctx); - run_closures(&exec_ctx, &ts->local_elems); + subtract_depth = run_closures(&exec_ctx, exec); } grpc_exec_ctx_finish(&exec_ctx); } @@ -218,10 +212,6 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; } else { GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); - if (is_short) { - grpc_closure_list_append(&ts->local_elems, closure, error); - return; - } } thread_state *orig_ts = ts; @@ -261,7 +251,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); - try_new_thread = ts->elems.head != closure && + ts->depth++; + try_new_thread = ts->depth > MAX_DEPTH && cur_thread_count < g_max_threads && !ts->shutdown; if (!is_short) ts->queued_long_job = true; gpr_mu_unlock(&ts->mu); diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c index 2732c159aa..6e85e4b61f 100644 --- a/src/core/lib/iomgr/socket_utils_windows.c +++ b/src/core/lib/iomgr/socket_utils_windows.c @@ -26,12 +26,8 @@ #include <grpc/support/log.h> const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { -#ifdef GPR_WIN_INET_NTOP - return inet_ntop(af, src, dst, size); -#else /* Windows InetNtopA wants a mutable ip pointer */ return InetNtopA(af, (void *)src, dst, size); -#endif /* GPR_WIN_INET_NTOP */ } #endif /* GRPC_WINDOWS_SOCKETUTILS */ diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 78765818ca..14330bd1dd 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -79,6 +79,125 @@ static timer_shard g_shards[NUM_SHARDS]; * Access to this is protected by g_shared_mutables.mu */ static timer_shard *g_shard_queue[NUM_SHARDS]; +#ifndef NDEBUG + +/* == Hash table for duplicate timer detection == */ + +#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */ + +static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */ +static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL}; + +static void init_timer_ht() { + for (int i = 0; i < NUM_HASH_BUCKETS; i++) { + gpr_mu_init(&g_hash_mu[i]); + } +} + +static bool is_in_ht(grpc_timer *t) { + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + + gpr_mu_lock(&g_hash_mu[i]); + grpc_timer *p = g_timer_ht[i]; + while (p != NULL && p != t) { + p = p->hash_table_next; + } + gpr_mu_unlock(&g_hash_mu[i]); + + return (p == t); +} + +static void add_to_ht(grpc_timer *t) { + GPR_ASSERT(!t->hash_table_next); + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + + gpr_mu_lock(&g_hash_mu[i]); + grpc_timer *p = g_timer_ht[i]; + while (p != NULL && p != t) { + p = p->hash_table_next; + } + + if (p == t) { + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** Duplicate timer (%p) being added. Closure: (%p), created at: " + "(%s:%d), scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); + abort(); + } + + /* Timer not present in the bucket. Insert at head of the list */ + t->hash_table_next = g_timer_ht[i]; + g_timer_ht[i] = t; + gpr_mu_unlock(&g_hash_mu[i]); +} + +static void remove_from_ht(grpc_timer *t) { + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + bool removed = false; + + gpr_mu_lock(&g_hash_mu[i]); + if (g_timer_ht[i] == t) { + g_timer_ht[i] = g_timer_ht[i]->hash_table_next; + removed = true; + } else if (g_timer_ht[i] != NULL) { + grpc_timer *p = g_timer_ht[i]; + while (p->hash_table_next != NULL && p->hash_table_next != t) { + p = p->hash_table_next; + } + + if (p->hash_table_next == t) { + p->hash_table_next = t->hash_table_next; + removed = true; + } + } + gpr_mu_unlock(&g_hash_mu[i]); + + if (!removed) { + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** Removing timer (%p) that is not added to hash table. Closure " + "(%p), created at: (%s:%d), scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); + abort(); + } + + t->hash_table_next = NULL; +} + +/* If a timer is added to a timer shard (either heap or a list), it cannot + * be pending. A timer is added to hash table only-if it is added to the + * timer shard. + * Therefore, if timer->pending is false, it cannot be in hash table */ +static void validate_non_pending_timer(grpc_timer *t) { + if (!t->pending && is_in_ht(t)) { + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** gpr_timer_cancel() called on a non-pending timer (%p) which " + "is in the hash table. Closure: (%p), created at: (%s:%d), " + "scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); + abort(); + } +} + +#define INIT_TIMER_HASH_TABLE() init_timer_ht() +#define ADD_TO_HASH_TABLE(t) add_to_ht((t)) +#define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t)) +#define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t)) + +#else + +#define INIT_TIMER_HASH_TABLE() +#define ADD_TO_HASH_TABLE(t) +#define REMOVE_FROM_HASH_TABLE(t) +#define VALIDATE_NON_PENDING_TIMER(t) + +#endif + /* Thread local variable that stores the deadline of the next timer the thread * has last-seen. This is an optimization to prevent the thread from checking * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock, @@ -139,6 +258,8 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) { shard->min_deadline = compute_min_deadline(shard); g_shard_queue[i] = shard; } + + INIT_TIMER_HASH_TABLE(); } void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { @@ -202,6 +323,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, timer->closure = closure; timer->deadline = deadline; +#ifndef NDEBUG + timer->hash_table_next = NULL; +#endif + if (GRPC_TRACER_ON(grpc_timer_trace)) { gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer, @@ -229,6 +354,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, grpc_time_averaged_stats_add_sample(&shard->stats, (double)(deadline - now) / 1000.0); + + ADD_TO_HASH_TABLE(timer); + if (deadline < shard->queue_deadline_cap) { is_first_timer = grpc_timer_heap_add(&shard->heap, timer); } else { @@ -290,7 +418,10 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer, timer->pending ? "true" : "false"); } + if (timer->pending) { + REMOVE_FROM_HASH_TABLE(timer); + GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { @@ -298,6 +429,8 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { } else { grpc_timer_heap_remove(&shard->heap, timer); } + } else { + VALIDATE_NON_PENDING_TIMER(timer); } gpr_mu_unlock(&shard->mu); } @@ -382,6 +515,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, grpc_timer *timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { + REMOVE_FROM_HASH_TABLE(timer); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); n++; } diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h index 72a4ac1f10..f0597f6ea0 100644 --- a/src/core/lib/iomgr/timer_generic.h +++ b/src/core/lib/iomgr/timer_generic.h @@ -29,6 +29,9 @@ struct grpc_timer { struct grpc_timer *next; struct grpc_timer *prev; grpc_closure *closure; +#ifndef NDEBUG + struct grpc_timer *hash_table_next; +#endif }; #endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */ diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c index 09fd60a12c..b67ff48d0f 100644 --- a/src/core/lib/security/credentials/composite/composite_credentials.c +++ b/src/core/lib/security/credentials/composite/composite_credentials.c @@ -87,6 +87,7 @@ static bool composite_call_get_request_metadata( ctx->on_request_metadata = on_request_metadata; GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata, composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx); + bool synchronous = true; while (ctx->creds_index < ctx->composite_creds->inner.num_creds) { grpc_call_credentials *inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; @@ -95,19 +96,12 @@ static bool composite_call_get_request_metadata( ctx->md_array, &ctx->internal_on_request_metadata, error)) { if (*error != GRPC_ERROR_NONE) break; } else { + synchronous = false; // Async return. break; } } - // If we got through all creds synchronously or we got a synchronous - // error on one of them, return synchronously. - if (ctx->creds_index == ctx->composite_creds->inner.num_creds || - *error != GRPC_ERROR_NONE) { - gpr_free(ctx); - return true; - } - // At least one inner cred is returning asynchronously, so we'll - // return asynchronously as well. - return false; + if (synchronous) gpr_free(ctx); + return synchronous; } static void composite_call_cancel_get_request_metadata( diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c index 73e0c23e0f..ee20241e3f 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.c +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c @@ -31,6 +31,9 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/validate_metadata.h" +grpc_tracer_flag grpc_plugin_credentials_trace = + GRPC_TRACER_INITIALIZER(false, "plugin_credentials"); + static void plugin_destruct(grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; @@ -53,6 +56,62 @@ static void pending_request_remove_locked( } } +// Checks if the request has been cancelled. +// If not, removes it from the pending list, so that it cannot be +// cancelled out from under us. +// When this returns, r->cancelled indicates whether the request was +// cancelled before completion. +static void pending_request_complete( + grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) { + gpr_mu_lock(&r->creds->mu); + if (!r->cancelled) pending_request_remove_locked(r->creds, r); + gpr_mu_unlock(&r->creds->mu); + // Ref to credentials not needed anymore. + grpc_call_credentials_unref(exec_ctx, &r->creds->base); +} + +static grpc_error *process_plugin_result( + grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r, + const grpc_metadata *md, size_t num_md, grpc_status_code status, + const char *error_details) { + grpc_error *error = GRPC_ERROR_NONE; + if (status != GRPC_STATUS_OK) { + char *msg; + gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s", + error_details); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + } else { + bool seen_illegal_header = false; + for (size_t i = 0; i < num_md; ++i) { + if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin", + grpc_validate_header_key_is_legal(md[i].key))) { + seen_illegal_header = true; + break; + } else if (!grpc_is_binary_header(md[i].key) && + !GRPC_LOG_IF_ERROR( + "validate_metadata_from_plugin", + grpc_validate_header_nonbin_value_is_legal(md[i].value))) { + gpr_log(GPR_ERROR, "Plugin added invalid metadata value."); + seen_illegal_header = true; + break; + } + } + if (seen_illegal_header) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"); + } else { + for (size_t i = 0; i < num_md; ++i) { + grpc_mdelem mdelem = grpc_mdelem_from_slices( + exec_ctx, grpc_slice_ref_internal(md[i].key), + grpc_slice_ref_internal(md[i].value)); + grpc_credentials_mdelem_array_add(r->md_array, mdelem); + GRPC_MDELEM_UNREF(exec_ctx, mdelem); + } + } + } + return error; +} + static void plugin_md_request_metadata_ready(void *request, const grpc_metadata *md, size_t num_md, @@ -64,54 +123,24 @@ static void plugin_md_request_metadata_ready(void *request, NULL, NULL); grpc_plugin_credentials_pending_request *r = (grpc_plugin_credentials_pending_request *)request; - // Check if the request has been cancelled. - // If not, remove it from the pending list, so that it cannot be - // cancelled out from under us. - gpr_mu_lock(&r->creds->mu); - if (!r->cancelled) pending_request_remove_locked(r->creds, r); - gpr_mu_unlock(&r->creds->mu); - grpc_call_credentials_unref(&exec_ctx, &r->creds->base); + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin returned " + "asynchronously", + r->creds, r); + } + // Remove request from pending list if not previously cancelled. + pending_request_complete(&exec_ctx, r); // If it has not been cancelled, process it. if (!r->cancelled) { - if (status != GRPC_STATUS_OK) { - char *msg; - gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s", - error_details); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, - GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg)); - gpr_free(msg); - } else { - bool seen_illegal_header = false; - for (size_t i = 0; i < num_md; ++i) { - if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin", - grpc_validate_header_key_is_legal(md[i].key))) { - seen_illegal_header = true; - break; - } else if (!grpc_is_binary_header(md[i].key) && - !GRPC_LOG_IF_ERROR( - "validate_metadata_from_plugin", - grpc_validate_header_nonbin_value_is_legal( - md[i].value))) { - gpr_log(GPR_ERROR, "Plugin added invalid metadata value."); - seen_illegal_header = true; - break; - } - } - if (seen_illegal_header) { - GRPC_CLOSURE_SCHED( - &exec_ctx, r->on_request_metadata, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata")); - } else { - for (size_t i = 0; i < num_md; ++i) { - grpc_mdelem mdelem = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_ref_internal(md[i].key), - grpc_slice_ref_internal(md[i].value)); - grpc_credentials_mdelem_array_add(r->md_array, mdelem); - GRPC_MDELEM_UNREF(&exec_ctx, mdelem); - } - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, GRPC_ERROR_NONE); - } - } + grpc_error *error = + process_plugin_result(&exec_ctx, r, md, num_md, status, error_details); + GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error); + } else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin was previously " + "cancelled", + r->creds, r); } gpr_free(r); grpc_exec_ctx_finish(&exec_ctx); @@ -125,6 +154,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, grpc_closure *on_request_metadata, grpc_error **error) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; + bool retval = true; // Synchronous return. if (c->plugin.get_metadata != NULL) { // Create pending_request object. grpc_plugin_credentials_pending_request *pending_request = @@ -142,12 +172,60 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, c->pending_requests = pending_request; gpr_mu_unlock(&c->mu); // Invoke the plugin. The callback holds a ref to us. + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin", + c, pending_request); + } grpc_call_credentials_ref(creds); - c->plugin.get_metadata(c->plugin.state, context, - plugin_md_request_metadata_ready, pending_request); - return false; + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX]; + size_t num_creds_md = 0; + grpc_status_code status = GRPC_STATUS_OK; + const char *error_details = NULL; + if (!c->plugin.get_metadata(c->plugin.state, context, + plugin_md_request_metadata_ready, + pending_request, creds_md, &num_creds_md, + &status, &error_details)) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin will return " + "asynchronously", + c, pending_request); + } + return false; // Asynchronous return. + } + // Returned synchronously. + // Remove request from pending list if not previously cancelled. + pending_request_complete(exec_ctx, pending_request); + // If the request was cancelled, the error will have been returned + // asynchronously by plugin_cancel_get_request_metadata(), so return + // false. Otherwise, process the result. + if (pending_request->cancelled) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p was cancelled, error " + "will be returned asynchronously", + c, pending_request); + } + retval = false; + } else { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin returned " + "synchronously", + c, pending_request); + } + *error = process_plugin_result(exec_ctx, pending_request, creds_md, + num_creds_md, status, error_details); + } + // Clean up. + for (size_t i = 0; i < num_creds_md; ++i) { + grpc_slice_unref_internal(exec_ctx, creds_md[i].key); + grpc_slice_unref_internal(exec_ctx, creds_md[i].value); + } + gpr_free((void *)error_details); + gpr_free(pending_request); } - return true; + return retval; } static void plugin_cancel_get_request_metadata( @@ -159,6 +237,10 @@ static void plugin_cancel_get_request_metadata( c->pending_requests; pending_request != NULL; pending_request = pending_request->next) { if (pending_request->md_array == md_array) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c, + pending_request); + } pending_request->cancelled = true; GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, GRPC_ERROR_REF(error)); diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.h b/src/core/lib/security/credentials/plugin/plugin_credentials.h index 57266d589a..f56df9eac5 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.h +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.h @@ -21,6 +21,8 @@ #include "src/core/lib/security/credentials/credentials.h" +extern grpc_tracer_flag grpc_plugin_credentials_trace; + struct grpc_plugin_credentials; typedef struct grpc_plugin_credentials_pending_request { diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c index 2366c24910..8fbde3d1b4 100644 --- a/src/core/lib/surface/init_secure.c +++ b/src/core/lib/surface/init_secure.c @@ -25,6 +25,7 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/credentials/plugin/plugin_credentials.h" #include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/security/transport/secure_endpoint.h" #include "src/core/lib/security/transport/security_connector.h" @@ -84,4 +85,7 @@ void grpc_register_security_filters(void) { maybe_prepend_server_auth_filter, NULL); } -void grpc_security_init() { grpc_security_register_handshaker_factories(); } +void grpc_security_init() { + grpc_security_register_handshaker_factories(); + grpc_register_tracer(&grpc_plugin_credentials_trace); +} |