aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/grpc_filter.c2
-rw-r--r--src/core/ext/filters/client_channel/client_channel.c556
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c1
-rw-r--r--src/core/ext/filters/client_channel/subchannel.c23
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h5
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.c112
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h8
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.c9
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.c397
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.c58
-rw-r--r--src/core/ext/filters/load_reporting/load_reporting_filter.c1
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.c1
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.c6
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c45
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c24
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c2
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c5
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.c12
-rw-r--r--src/core/lib/channel/channel_args.c98
-rw-r--r--src/core/lib/channel/channel_args.h31
-rw-r--r--src/core/lib/channel/channel_stack.c16
-rw-r--r--src/core/lib/channel/channel_stack.h11
-rw-r--r--src/core/lib/channel/connected_channel.c92
-rw-r--r--src/core/lib/compression/algorithm_metadata.h14
-rw-r--r--src/core/lib/compression/compression.c100
-rw-r--r--src/core/lib/iomgr/call_combiner.c180
-rw-r--r--src/core/lib/iomgr/call_combiner.h104
-rw-r--r--src/core/lib/profiling/timers.h26
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c183
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c89
-rw-r--r--src/core/lib/support/block_annotate.h22
-rw-r--r--src/core/lib/support/thd_internal.h24
-rw-r--r--src/core/lib/surface/call.c376
-rw-r--r--src/core/lib/surface/call_test_only.h12
-rw-r--r--src/core/lib/surface/channel.c29
-rw-r--r--src/core/lib/surface/completion_queue.c2
-rw-r--r--src/core/lib/surface/init.c2
-rw-r--r--src/core/lib/surface/lame_client.cc19
-rw-r--r--src/core/lib/surface/server.c2
-rw-r--r--src/core/lib/transport/static_metadata.c816
-rw-r--r--src/core/lib/transport/static_metadata.h296
-rw-r--r--src/core/lib/transport/transport.c21
-rw-r--r--src/core/lib/transport/transport.h13
-rw-r--r--src/core/lib/transport/transport_impl.h3
-rw-r--r--src/core/lib/transport/transport_op_string.c7
47 files changed, 2433 insertions, 1425 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 13fe2e6b1c..3e10f61154 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -179,7 +179,6 @@ const grpc_channel_filter grpc_client_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-client"};
@@ -193,6 +192,5 @@ const grpc_channel_filter grpc_server_census_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"census-server"};
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c
index 58e31d7b45..e6822ce801 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.c
@@ -796,7 +796,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-#define MAX_WAITING_BATCHES 6
+// We also add room for a single cancel_stream batch.
+#define MAX_WAITING_BATCHES 7
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -808,23 +809,25 @@ typedef struct client_channel_call_data {
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
// and this struct both independently store a pointer to the call
- // stack and each has its own mutex. If/when we have time, find a way
- // to avoid this without breaking the grpc_deadline_state abstraction.
+ // combiner. If/when we have time, find a way to avoid this without
+ // breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
+ gpr_arena *arena;
+ grpc_call_combiner *call_combiner;
+
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
- /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
- bit is 0), or a pointer to an error (if the lowest bit is 1) */
- gpr_atm subchannel_call_or_error;
- gpr_arena *arena;
+ grpc_subchannel_call *subchannel_call;
+ grpc_error *error;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
+ grpc_closure cancel_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@@ -832,10 +835,9 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
+ grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch_payload *initial_metadata_payload;
-
- grpc_call_stack *owning_call;
+ grpc_transport_stream_op_batch *initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
@@ -843,55 +845,42 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
-typedef struct {
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
-} call_or_error;
-
-static call_or_error get_call_or_error(call_data *p) {
- gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
- if (c == 0)
- return (call_or_error){NULL, NULL};
- else if (c & 1)
- return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
- else
- return (call_or_error){(grpc_subchannel_call *)c, NULL};
+grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ return calld->subchannel_call;
}
-static bool set_call_or_error(call_data *p, call_or_error coe) {
- // this should always be under a lock
- call_or_error existing = get_call_or_error(p);
- if (existing.error != GRPC_ERROR_NONE) {
- GRPC_ERROR_UNREF(coe.error);
- return false;
- }
- GPR_ASSERT(existing.subchannel_call == NULL);
- if (coe.error != GRPC_ERROR_NONE) {
- GPR_ASSERT(coe.subchannel_call == NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_add(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->initial_metadata_batch == NULL);
+ calld->initial_metadata_batch = batch;
} else {
- GPR_ASSERT(coe.subchannel_call != NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error,
- (gpr_atm)coe.subchannel_call);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
- return true;
}
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *call_elem) {
- return get_call_or_error(call_elem->call_data).subchannel_call;
-}
-
-static void waiting_for_pick_batches_add_locked(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+// This is called via the call combiner, so access to calld is synchronized.
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ call_data *calld = arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
+ GRPC_ERROR_REF(error), calld->call_combiner);
+ }
}
-static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_error *error) {
call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
@@ -900,34 +889,60 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
grpc_error_string(error));
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ fail_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_REF(error),
+ "waiting_for_pick_batches_fail");
+ }
+ if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "waiting_for_pick_batches_fail");
}
- calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- if (calld->waiting_for_pick_batches_count == 0) return;
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- waiting_for_pick_batches_fail_locked(exec_ctx, elem,
- GRPC_ERROR_REF(coe.error));
- return;
+// This is called via the call combiner, so access to calld is synchronized.
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *ignored) {
+ call_data *calld = arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_subchannel_call_process_op(
+ exec_ctx, calld->subchannel_call,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
}
+}
+
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- coe.subchannel_call);
+ chand, calld, calld->waiting_for_pick_batches_count,
+ calld->subchannel_call);
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
- calld->waiting_for_pick_batches[i]);
- }
- calld->waiting_for_pick_batches_count = 0;
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ run_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_NONE,
+ "waiting_for_pick_batches_resume");
+ }
+ GPR_ASSERT(calld->initial_metadata_batch != NULL);
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+ calld->initial_metadata_batch);
}
// Applies service config to the call. Must be invoked once we know
@@ -968,29 +983,28 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
+ channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call = NULL;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
.start_time = calld->call_start_time,
.deadline = calld->deadline,
.arena = calld->arena,
- .context = calld->subchannel_call_context};
+ .context = calld->subchannel_call_context,
+ .call_combiner = calld->call_combiner};
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args,
+ &calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
- elem->channel_data, calld, subchannel_call,
- grpc_error_string(new_error));
+ chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
} else {
- waiting_for_pick_batches_resume_locked(exec_ctx, elem);
+ waiting_for_pick_batches_resume(exec_ctx, elem);
}
GRPC_ERROR_UNREF(error);
}
@@ -1002,60 +1016,27 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
- call_or_error coe = get_call_or_error(calld);
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
- grpc_error *failure =
- error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(failure));
- }
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
- } else if (coe.error != GRPC_ERROR_NONE) {
- /* already cancelled before subchannel became ready */
- grpc_error *child_errors[] = {error, coe.error};
- grpc_error *cancellation_error =
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Cancelled before creating subchannel", child_errors,
- GPR_ARRAY_SIZE(child_errors));
- /* if due to deadline, attach the deadline exceeded status to the error */
- if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
- cancellation_error =
- grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_DEADLINE_EXCEEDED);
+ calld, grpc_error_string(calld->error));
}
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: cancelled before subchannel became ready: %s",
- chand, calld, grpc_error_string(cancellation_error));
- }
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error);
}
-static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call =
- get_call_or_error(calld).subchannel_call;
- if (subchannel_call == NULL) {
- return NULL;
- } else {
- return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- }
-}
-
/** Return true if subchannel is available immediately (in which case
subchannel_ready_locked() should not be called), or false otherwise (in
which case subchannel_ready_locked() should be called when the subchannel
@@ -1069,6 +1050,44 @@ typedef struct {
grpc_closure closure;
} pick_after_resolver_result_args;
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = arg;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ // If we don't yet have a resolver result, then a closure for
+ // pick_after_resolver_result_done_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // cancelled, so that when pick_after_resolver_result_done_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // subchannel_ready_locked() to propagate the error back to the caller.
+ for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
+ closure != NULL; closure = closure->next_data.next) {
+ pick_after_resolver_result_args *args = closure->cb_arg;
+ if (!args->cancelled && args->elem == elem) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: "
+ "cancelling pick waiting for resolver result",
+ chand, calld);
+ }
+ args->cancelled = true;
+ // Note: Although we are not in the call combiner here, we are
+ // basically stealing the call combiner from the pending pick, so
+ // it's safe to call subchannel_ready_locked() here -- we are
+ // essentially calling it here instead of calling it in
+ // pick_after_resolver_result_done_locked().
+ subchannel_ready_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
+ }
+ }
+}
+
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
@@ -1079,21 +1098,24 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
} else {
- channel_data *chand = args->elem->channel_data;
- call_data *calld = args->elem->call_data;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ NULL);
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
chand, calld);
}
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
+ subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
} else {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld);
}
- if (pick_subchannel_locked(exec_ctx, args->elem)) {
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
}
}
@@ -1116,41 +1138,33 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
args, grpc_combiner_scheduler(chand->combiner));
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
&args->closure, GRPC_ERROR_NONE);
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->cancel_closure,
+ pick_after_resolver_result_cancel_locked, elem,
+ grpc_combiner_scheduler(chand->combiner)));
}
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- // If we don't yet have a resolver result, then a closure for
- // pick_after_resolver_result_done_locked() will have been added to
- // chand->waiting_for_resolver_result_closures, and it may not be invoked
- // until after this call has been destroyed. We mark the operation as
- // cancelled, so that when pick_after_resolver_result_done_locked()
- // is called, it will be a no-op. We also immediately invoke
- // subchannel_ready_locked() to propagate the error back to the caller.
- for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
- closure != NULL; closure = closure->next_data.next) {
- pick_after_resolver_result_args *args = closure->cb_arg;
- if (!args->cancelled && args->elem == elem) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: "
- "cancelling pick waiting for resolver result",
- chand, calld);
- }
- args->cancelled = true;
- subchannel_ready_locked(exec_ctx, elem,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ if (calld->lb_policy != NULL) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
+ chand, calld, calld->lb_policy);
}
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
+ &calld->connected_subchannel,
+ GRPC_ERROR_REF(error));
}
- GRPC_ERROR_UNREF(error);
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
-// Unrefs the LB policy after invoking subchannel_ready_locked().
+// Unrefs the LB policy and invokes subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
@@ -1160,6 +1174,7 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
}
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
@@ -1194,24 +1209,15 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
+ } else {
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked,
+ elem, grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- GPR_ASSERT(calld->lb_policy != NULL);
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
- }
- grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
- &calld->connected_subchannel, error);
-}
-
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
@@ -1224,7 +1230,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
// Otherwise, if the service config specified a value for this
// method, use that.
uint32_t initial_metadata_flags =
- calld->initial_metadata_payload->send_initial_metadata
+ calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata_flags;
const bool wait_for_ready_set_from_api =
initial_metadata_flags &
@@ -1241,7 +1247,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
}
}
const grpc_lb_policy_pick_args inputs = {
- calld->initial_metadata_payload->send_initial_metadata
+ calld->initial_metadata_batch->payload->send_initial_metadata
.send_initial_metadata,
initial_metadata_flags, &calld->lb_token_mdelem};
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
@@ -1258,91 +1264,33 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
return pick_done;
}
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
- grpc_transport_stream_op_batch *batch = arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- /* need to recheck that another thread hasn't set the call */
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
- }
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
- goto done;
- }
- if (coe.subchannel_call != NULL) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
- }
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- // Add to waiting-for-pick list. If we succeed in getting a
- // subchannel call below, we'll handle this batch (along with any
- // other waiting batches) in waiting_for_pick_batches_resume_locked().
- waiting_for_pick_batches_add_locked(calld, batch);
- // If this is a cancellation, cancel the pending pick (if any) and
- // fail any pending batches.
- if (batch->cancel_stream) {
- grpc_error *error = batch->payload->cancel_stream.cancel_error;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(error));
- }
- /* Stash a copy of cancel_error in our call data, so that we can use
- it for subsequent operations. This ensures that if the call is
- cancelled before any batches are passed down (e.g., if the deadline
- is in the past when the call starts), we can return the right
- error to the caller when the first batch does get passed down. */
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
- if (calld->lb_policy != NULL) {
- pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("start_pick_locked", 0);
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ GPR_ASSERT(calld->connected_subchannel == NULL);
+ if (pick_subchannel_locked(exec_ctx, elem)) {
+ // Pick was returned synchronously.
+ if (calld->connected_subchannel == NULL) {
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy");
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
} else {
- pick_after_resolver_result_cancel_locked(exec_ctx, elem,
- GRPC_ERROR_REF(error));
- }
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- goto done;
- }
- /* if we don't have a subchannel, try to get one */
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->connected_subchannel == NULL);
- calld->initial_metadata_payload = batch->payload;
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
- /* If a subchannel is not available immediately, the polling entity from
- call_data should be provided to channel_data's interested_parties, so
- that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(exec_ctx, elem)) {
- // Pick was returned synchronously.
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
- if (calld->connected_subchannel == NULL) {
- grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy");
- set_call_or_error(calld,
- (call_or_error){.error = GRPC_ERROR_REF(error)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
- } else {
- // Create subchannel call.
- create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
- }
- } else {
- grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
+ // Create subchannel call.
+ create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
+ } else {
+ // Pick will be done asynchronously. Add the call's polling entity to
+ // the channel's interested_parties, so that I/O for the resolver
+ // and LB policy can be done under it.
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
-done:
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
+ GPR_TIMER_END("start_pick_locked", 0);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -1365,27 +1313,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error));
}
-/* The logic here is fairly complicated, due to (a) the fact that we
- need to handle the case where we receive the send op before the
- initial metadata op, and (b) the need for efficiency, especially in
- the streaming case.
-
- We use double-checked locking to initially see if initialization has been
- performed. If it has not, we acquire the combiner and perform initialization.
- If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
- GRPC_TRACER_ON(grpc_trace_channel)) {
- grpc_call_log_op(GPR_INFO, elem, batch);
- }
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
}
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
+ // If we've previously been cancelled, immediately fail any new batches.
+ if (calld->error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
+ chand, calld, grpc_error_string(calld->error));
+ }
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ goto done;
+ }
+ if (batch->cancel_stream) {
+ // Stash a copy of cancel_error in our call data, so that we can use
+ // it for subsequent operations. This ensures that if the call is
+ // cancelled before any batches are passed down (e.g., if the deadline
+ // is in the past when the call starts), we can return the right
+ // error to the caller when the first batch does get passed down.
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
+ calld, grpc_error_string(calld->error));
+ }
+ // If we have a subchannel call, send the cancellation batch down.
+ // Otherwise, fail all pending batches.
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+ } else {
+ waiting_for_pick_batches_add(calld, batch);
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
+ }
+ goto done;
+ }
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (batch->recv_trailing_metadata) {
@@ -1395,38 +1365,43 @@ static void cc_start_transport_stream_op_batch(
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete;
}
- /* try to (atomically) get the call */
- call_or_error coe = get_call_or_error(calld);
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
- if (coe.error != GRPC_ERROR_NONE) {
+ // Check if we've already gotten a subchannel call.
+ // Note that once we have completed the pick, we do not need to enter
+ // the channel combiner, which is more efficient (especially for
+ // streaming calls).
+ if (calld->subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ calld, calld->subchannel_call);
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
goto done;
}
- if (coe.subchannel_call != NULL) {
+ // We do not yet have a subchannel call.
+ // Add the batch to the waiting-for-pick list.
+ waiting_for_pick_batches_add(calld, batch);
+ // For batches containing a send_initial_metadata op, enter the channel
+ // combiner to start a pick.
+ if (batch->send_initial_metadata) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
+ }
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
+ elem, grpc_combiner_scheduler(chand->combiner)),
+ GRPC_ERROR_NONE);
+ } else {
+ // For all other batches, release the call combiner.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
+ calld);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- /* we failed; lock and figure out what to do */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "batch does not include send_initial_metadata");
}
- GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
- batch->handler_private.extra_arg = elem;
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
- start_transport_stream_op_batch_locked, batch,
- grpc_combiner_scheduler(chand->combiner)),
- GRPC_ERROR_NONE);
done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@@ -1441,10 +1416,11 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->owning_call = args->call_stack;
calld->arena = args->arena;
+ calld->call_combiner = args->call_combiner;
if (chand->deadline_checking_enabled) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, calld->deadline);
}
return GRPC_ERROR_NONE;
}
@@ -1463,13 +1439,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
- call_or_error coe = get_call_or_error(calld);
- GRPC_ERROR_UNREF(coe.error);
- if (coe.subchannel_call != NULL) {
- grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
+ GRPC_ERROR_UNREF(calld->error);
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = NULL;
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == NULL);
@@ -1508,7 +1483,6 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data),
cc_init_channel_elem,
cc_destroy_channel_elem,
- cc_get_peer,
cc_get_channel_info,
"client-channel",
};
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
index 568bb2ba8d..299f26b4de 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
@@ -132,6 +132,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_load_reporting"};
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c
index 5788819331..5cc8be7628 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.c
@@ -724,13 +724,6 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call) {
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- return top_elem->filter->get_peer(exec_ctx, top_elem);
-}
-
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call,
grpc_transport_stream_op_batch *op) {
@@ -760,13 +753,15 @@ grpc_error *grpc_connected_subchannel_create_call(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- const grpc_call_element_args call_args = {.call_stack = callstk,
- .server_transport_data = NULL,
- .context = args->context,
- .path = args->path,
- .start_time = args->start_time,
- .deadline = args->deadline,
- .arena = args->arena};
+ const grpc_call_element_args call_args = {
+ .call_stack = callstk,
+ .server_transport_data = NULL,
+ .context = args->context,
+ .path = args->path,
+ .start_time = args->start_time,
+ .deadline = args->deadline,
+ .arena = args->arena,
+ .call_combiner = args->call_combiner};
grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 6d2abb04df..51d712f6a7 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -106,6 +106,7 @@ typedef struct {
gpr_timespec deadline;
gpr_arena *arena;
grpc_call_context_element *context;
+ grpc_call_combiner *call_combiner;
} grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call(
@@ -150,10 +151,6 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op_batch *op);
-/** continue querying for peer */
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call);
-
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c
index 6789903c95..565b0679dc 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.c
@@ -34,22 +34,56 @@
// grpc_deadline_state
//
+// The on_complete callback used when sending a cancel_error batch down the
+// filter stack. Yields the call combiner when the batch returns.
+static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_deadline_state* deadline_state = arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "got on_complete from cancel_stream batch");
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
+}
+
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
+static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = arg;
+ grpc_deadline_state* deadline_state = elem->call_data;
+ grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
+ deadline_state, grpc_schedule_on_exec_ctx));
+ batch->cancel_stream = true;
+ batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+}
+
// Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) {
- grpc_call_element_signal_error(
- exec_ctx, elem,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
+ grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
+ GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
+ send_cancel_op_in_call_combiner, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &deadline_state->timer_callback, error,
+ "deadline exceeded -- sending cancel_stream op");
+ } else {
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
+ "deadline_timer");
}
- GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
// Starts the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
@@ -58,51 +92,39 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
- grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
-retry:
- cur_state =
- (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
- switch (cur_state) {
+ switch (deadline_state->timer_state) {
case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_FINISHED,
- GRPC_DEADLINE_STATE_PENDING)) {
- // If we've already created and destroyed a timer, we always create a
- // new closure: we have no other guarantee that the inlined closure is
- // not in use (it may hold a pending call to timer_callback)
- closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure =
+ GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
break;
case GRPC_DEADLINE_STATE_INITIAL:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_INITIAL,
- GRPC_DEADLINE_STATE_PENDING)) {
- closure =
- GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
- elem, grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ closure =
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
break;
}
- GPR_ASSERT(closure);
+ GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
- GRPC_DEADLINE_STATE_FINISHED)) {
+ if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else {
// timer was either in STATE_INITAL (nothing to cancel)
@@ -131,6 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
+ bool in_call_combiner;
grpc_call_element* elem;
gpr_timespec deadline;
grpc_closure closure;
@@ -138,15 +161,29 @@ struct start_timer_after_init_state {
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
struct start_timer_after_init_state* state = arg;
+ grpc_deadline_state* deadline_state = state->elem->call_data;
+ if (!state->in_call_combiner) {
+ // We are initially called without holding the call combiner, so we
+ // need to bounce ourselves into it.
+ state->in_call_combiner = true;
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &state->closure, GRPC_ERROR_REF(error),
+ "scheduling deadline timer");
+ return;
+ }
start_timer_if_needed(exec_ctx, state->elem, state->deadline);
gpr_free(state);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "done scheduling deadline timer");
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
+ deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -158,7 +195,7 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
- struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
+ struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@@ -232,7 +269,8 @@ typedef struct server_call_data {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -310,7 +348,6 @@ const grpc_channel_filter grpc_client_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
@@ -325,7 +362,6 @@ const grpc_channel_filter grpc_server_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 420bf7065a..3eb102ad28 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -31,7 +31,8 @@ typedef enum grpc_deadline_timer_state {
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- gpr_atm timer_state;
+ grpc_call_combiner* call_combiner;
+ grpc_deadline_timer_state timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
@@ -50,6 +51,7 @@ typedef struct grpc_deadline_state {
// assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
gpr_timespec deadline);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@@ -61,6 +63,8 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
@@ -70,6 +74,8 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op);
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c
index 3ca01a41b5..99ddd08e6a 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.c
@@ -36,6 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -215,13 +216,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
// There may or may not be more to read, but we don't care. If we got
@@ -414,7 +415,7 @@ static void hc_start_transport_stream_op_batch(
done:
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
} else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(exec_ctx, elem, batch);
}
@@ -426,6 +427,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -565,6 +567,5 @@ const grpc_channel_filter grpc_http_client_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-client"};
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c
index 20a3488115..98a503cafc 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c
@@ -35,33 +35,29 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-#define INITIAL_METADATA_UNSEEN 0
-#define HAS_COMPRESSION_ALGORITHM 2
-#define NO_COMPRESSION_ALGORITHM 4
-
-#define CANCELLED_BIT ((gpr_atm)1)
+typedef enum {
+ // Initial metadata not yet seen.
+ INITIAL_METADATA_UNSEEN = 0,
+ // Initial metadata seen; compression algorithm set.
+ HAS_COMPRESSION_ALGORITHM,
+ // Initial metadata seen; no compression algorithm set.
+ NO_COMPRESSION_ALGORITHM,
+} initial_metadata_state;
typedef struct call_data {
- grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
+ grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
- uint32_t remaining_slice_bytes;
+ grpc_linked_mdelem accept_stream_encoding_storage;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
-
- /* Atomic recording the state of initial metadata; allowed values:
- INITIAL_METADATA_UNSEEN - initial metadata op not seen
- HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm
- set
- NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm
- set
- pointer - a stalled op containing a send_message that's waiting on initial
- metadata
- pointer | CANCELLED_BIT - request was cancelled with error pointed to */
- gpr_atm send_initial_metadata_state;
-
+ initial_metadata_state send_initial_metadata_state;
+ grpc_error *cancel_error;
+ grpc_closure start_send_message_batch_in_call_combiner;
grpc_transport_stream_op_batch *send_message_batch;
+ grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
grpc_closure *original_send_message_on_complete;
grpc_closure send_message_on_complete;
@@ -75,6 +71,13 @@ typedef struct channel_data {
uint32_t enabled_algorithms_bitset;
/** Supported compression algorithms */
uint32_t supported_compression_algorithms;
+
+ /** The default, channel-level, stream compression algorithm */
+ grpc_stream_compression_algorithm default_stream_compression_algorithm;
+ /** Bitset of enabled stream compression algorithms */
+ uint32_t enabled_stream_compression_algorithms_bitset;
+ /** Supported stream compression algorithms */
+ uint32_t supported_stream_compression_algorithms;
} channel_data;
static bool skip_compression(grpc_call_element *elem, uint32_t flags,
@@ -83,13 +86,13 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags,
channel_data *channeld = elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
- return 1;
+ return true;
}
if (has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
- return 1;
+ return true;
}
- return 0; /* we have an actual call-specific algorithm */
+ return false; /* we have an actual call-specific algorithm */
}
/* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@@ -106,31 +109,56 @@ static grpc_error *process_send_initial_metadata(
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
*has_compression_algorithm = false;
- /* Parse incoming request for compression. If any, it'll be available
- * at calld->compression_algorithm */
- if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) {
+ grpc_stream_compression_algorithm stream_compression_algorithm =
+ GRPC_STREAM_COMPRESS_NONE;
+ if (initial_metadata->idx.named.grpc_internal_stream_encoding_request !=
+ NULL) {
grpc_mdelem md =
- initial_metadata->idx.named.grpc_internal_encoding_request->md;
- if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
- &calld->compression_algorithm)) {
+ initial_metadata->idx.named.grpc_internal_stream_encoding_request->md;
+ if (!grpc_stream_compression_algorithm_parse(
+ GRPC_MDVALUE(md), &stream_compression_algorithm)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
+ "Invalid stream compression algorithm: '%s' (unknown). Ignoring.",
+ val);
gpr_free(val);
- calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
+ }
+ if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
+ stream_compression_algorithm)) {
+ char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(
+ GPR_ERROR,
+ "Invalid stream compression algorithm: '%s' (previously disabled). "
+ "Ignoring.",
+ val);
+ gpr_free(val);
+ stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
+ }
+ *has_compression_algorithm = true;
+ grpc_metadata_batch_remove(
+ exec_ctx, initial_metadata,
+ initial_metadata->idx.named.grpc_internal_stream_encoding_request);
+ /* Disable message-wise compression */
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) {
+ grpc_metadata_batch_remove(
+ exec_ctx, initial_metadata,
+ initial_metadata->idx.named.grpc_internal_encoding_request);
}
- if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
- calld->compression_algorithm)) {
+ } else if (initial_metadata->idx.named.grpc_internal_encoding_request !=
+ NULL) {
+ grpc_mdelem md =
+ initial_metadata->idx.named.grpc_internal_encoding_request->md;
+ if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
+ &calld->compression_algorithm)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
- "Invalid compression algorithm: '%s' (previously disabled). "
- "Ignoring.",
- val);
+ "Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
gpr_free(val);
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
*has_compression_algorithm = true;
-
grpc_metadata_batch_remove(
exec_ctx, initial_metadata,
initial_metadata->idx.named.grpc_internal_encoding_request);
@@ -138,13 +166,25 @@ static grpc_error *process_send_initial_metadata(
/* If no algorithm was found in the metadata and we aren't
* exceptionally skipping compression, fall back to the channel
* default */
- calld->compression_algorithm = channeld->default_compression_algorithm;
+ if (channeld->default_stream_compression_algorithm !=
+ GRPC_STREAM_COMPRESS_NONE) {
+ stream_compression_algorithm =
+ channeld->default_stream_compression_algorithm;
+ calld->compression_algorithm = GRPC_COMPRESS_NONE;
+ } else {
+ calld->compression_algorithm = channeld->default_compression_algorithm;
+ }
*has_compression_algorithm = true;
}
grpc_error *error = GRPC_ERROR_NONE;
/* hint compression algorithm */
- if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
+ if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata,
+ &calld->stream_compression_algorithm_storage,
+ grpc_stream_compression_encoding_mdelem(stream_compression_algorithm));
+ } else if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
error = grpc_metadata_batch_add_tail(
exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
grpc_compression_encoding_mdelem(calld->compression_algorithm));
@@ -158,6 +198,16 @@ static grpc_error *process_send_initial_metadata(
GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
channeld->supported_compression_algorithms));
+ if (error != GRPC_ERROR_NONE) return error;
+
+ /* Do not overwrite accept-encoding header if it already presents. */
+ if (!initial_metadata->idx.named.accept_encoding) {
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata, &calld->accept_stream_encoding_storage,
+ GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(
+ channeld->supported_stream_compression_algorithms));
+ }
+
return error;
}
@@ -170,6 +220,18 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
+static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ // Note: The call to grpc_call_next_op() results in yielding the
+ // call combiner, so we need to clear calld->send_message_batch
+ // before we do that.
+ grpc_transport_stream_op_batch *send_message_batch =
+ calld->send_message_batch;
+ calld->send_message_batch = NULL;
+ grpc_call_next_op(exec_ctx, elem, send_message_batch);
+}
+
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
@@ -178,8 +240,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&tmp);
uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags;
- const bool did_compress = grpc_msg_compress(
- exec_ctx, calld->compression_algorithm, &calld->slices, &tmp);
+ bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
+ &calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
char *algo_name;
@@ -217,7 +279,19 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
calld->original_send_message_on_complete =
calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete;
- grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
+ send_message_batch_continue(exec_ctx, elem);
+}
+
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ call_data *calld = arg;
+ if (calld->send_message_batch != NULL) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ calld->send_message_batch = NULL;
+ }
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
@@ -237,21 +311,25 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
+static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) return error;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
break;
}
}
- return GRPC_ERROR_NONE;
}
// Async callback for grpc_byte_stream_next().
@@ -259,46 +337,37 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ return;
+ }
error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
} else {
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // this function again.
- error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) goto fail;
+ continue_reading_send_message(exec_ctx, elem);
}
- return;
-fail:
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch,
- bool has_compression_algorithm) {
+static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *unused) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (!skip_compression(elem, batch->payload->send_message.send_message->flags,
- has_compression_algorithm)) {
- calld->send_message_batch = batch;
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // on_send_message_next_done().
- grpc_error *error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
- }
+ if (skip_compression(
+ elem,
+ calld->send_message_batch->payload->send_message.send_message->flags,
+ calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) {
+ send_message_batch_continue(exec_ctx, elem);
} else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, batch);
+ continue_reading_send_message(exec_ctx, elem);
}
}
@@ -306,95 +375,80 @@ static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
-
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
-
+ // Handle cancel_stream.
if (batch->cancel_stream) {
- // TODO(roth): As part of the upcoming call combiner work, change
- // this to call grpc_byte_stream_shutdown() on the incoming byte
- // stream, to cancel any in-flight calls to grpc_byte_stream_next().
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- gpr_atm cur = gpr_atm_full_xchg(
- &calld->send_initial_metadata_state,
- CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error);
- switch (cur) {
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- case INITIAL_METADATA_UNSEEN:
- break;
- default:
- if ((cur & CANCELLED_BIT) == 0) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, (grpc_transport_stream_op_batch *)cur,
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
- } else {
- GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
- }
- break;
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (calld->send_message_batch != NULL) {
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
+ } else {
+ grpc_byte_stream_shutdown(
+ exec_ctx,
+ calld->send_message_batch->payload->send_message.send_message,
+ GRPC_ERROR_REF(calld->cancel_error));
+ }
}
+ } else if (calld->cancel_error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
+ calld->call_combiner);
+ goto done;
}
-
+ // Handle send_initial_metadata.
if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
grpc_error *error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- error);
- return;
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
+ goto done;
}
- gpr_atm cur;
- retry_send_im:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
- cur != NO_COMPRESSION_ALGORITHM);
- if ((cur & CANCELLED_BIT) == 0) {
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- has_compression_algorithm
- ? HAS_COMPRESSION_ALGORITHM
- : NO_COMPRESSION_ALGORITHM)) {
- goto retry_send_im;
- }
- if (cur != INITIAL_METADATA_UNSEEN) {
- start_send_message_batch(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur,
- has_compression_algorithm);
- }
+ calld->send_initial_metadata_state = has_compression_algorithm
+ ? HAS_COMPRESSION_ALGORITHM
+ : NO_COMPRESSION_ALGORITHM;
+ // If we had previously received a batch containing a send_message op,
+ // handle it now. Note that we need to re-enter the call combiner
+ // for this, since we can't send two batches down while holding the
+ // call combiner, since the connected_channel filter (at the bottom of
+ // the call stack) will release the call combiner for each batch it sees.
+ if (calld->send_message_batch != NULL) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
+ "starting send_message after send_initial_metadata");
}
}
+ // Handle send_message.
if (batch->send_message) {
- gpr_atm cur;
- retry_send:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- switch (cur) {
- case INITIAL_METADATA_UNSEEN:
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- (gpr_atm)batch)) {
- goto retry_send;
- }
- break;
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- start_send_message_batch(exec_ctx, elem, batch,
- cur == HAS_COMPRESSION_ALGORITHM);
- break;
- default:
- if (cur & CANCELLED_BIT) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch,
- GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
- } else {
- /* >1 send_message concurrently */
- GPR_UNREACHABLE_CODE(break);
- }
+ GPR_ASSERT(calld->send_message_batch == NULL);
+ calld->send_message_batch = batch;
+ // If we have not yet seen send_initial_metadata, then we have to
+ // wait. We save the batch in calld and then drop the call
+ // combiner, which we'll have to pick up again later when we get
+ // send_initial_metadata.
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_STOP(
+ exec_ctx, calld->call_combiner,
+ "send_message batch pending send_initial_metadata");
+ goto done;
}
+ start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE);
} else {
- /* pass control down the stack */
+ // Pass control down the stack.
grpc_call_next_op(exec_ctx, elem, batch);
}
-
+done:
GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
@@ -402,16 +456,16 @@ static void compress_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
-
- /* initialize members */
+ call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
+ calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
+ GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
elem, grpc_schedule_on_exec_ctx);
-
return GRPC_ERROR_NONE;
}
@@ -419,14 +473,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- /* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
- gpr_atm imstate =
- gpr_atm_no_barrier_load(&calld->send_initial_metadata_state);
- if (imstate & CANCELLED_BIT) {
- GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT));
- }
+ GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
@@ -435,6 +484,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
+ /* Configuration for message compression */
channeld->enabled_algorithms_bitset =
grpc_channel_args_compression_algorithm_get_states(args->channel_args);
@@ -449,16 +499,32 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
- channeld->supported_compression_algorithms = 1; /* always support identity */
- for (grpc_compression_algorithm algo_idx = 1;
- algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
- /* skip disabled algorithms */
- if (!GPR_BITGET(channeld->enabled_algorithms_bitset, algo_idx)) {
- continue;
- }
- channeld->supported_compression_algorithms |= 1u << algo_idx;
+ channeld->supported_compression_algorithms =
+ (((1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1) &
+ channeld->enabled_algorithms_bitset) |
+ 1u;
+
+ /* Configuration for stream compression */
+ channeld->enabled_stream_compression_algorithms_bitset =
+ grpc_channel_args_stream_compression_algorithm_get_states(
+ args->channel_args);
+
+ channeld->default_stream_compression_algorithm =
+ grpc_channel_args_get_stream_compression_algorithm(args->channel_args);
+
+ if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
+ channeld->default_stream_compression_algorithm)) {
+ gpr_log(GPR_DEBUG,
+ "stream compression algorithm %d not enabled: switching to none",
+ channeld->default_stream_compression_algorithm);
+ channeld->default_stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE;
}
+ channeld->supported_stream_compression_algorithms =
+ (((1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1) &
+ channeld->enabled_stream_compression_algorithms_bitset) |
+ 1u;
+
GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE;
}
@@ -477,6 +543,5 @@ const grpc_channel_filter grpc_message_compress_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
- "compress"};
+ "message_compress"};
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c
index b145f12aff..a10e69ba59 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.c
@@ -32,6 +32,8 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -281,7 +283,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ // Re-enter call combiner for recv_message_ready, since the surface
+ // code will release the call combiner for each callback it receives.
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ calld->recv_message_ready, GRPC_ERROR_REF(err),
+ "resuming recv_message_ready from on_complete");
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
@@ -293,15 +299,20 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
if (calld->seen_path_with_query) {
- /* do nothing. This is probably a GET request, and payload will be returned
- in hs_on_complete callback. */
+ // Do nothing. This is probably a GET request, and payload will be
+ // returned in hs_on_complete callback.
+ // Note that we release the call combiner here, so that other
+ // callbacks can run.
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "pausing recv_message_ready until on_complete");
} else {
GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
-static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
@@ -323,10 +334,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata));
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
if (op->recv_initial_metadata) {
@@ -359,21 +367,25 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
+
+ return GRPC_ERROR_NONE;
}
-static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- GPR_TIMER_BEGIN("hs_start_transport_op", 0);
- hs_mutate_op(exec_ctx, elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
- GPR_TIMER_END("hs_start_transport_op", 0);
+static void hs_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
+ call_data *calld = elem->call_data;
+ GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
+ grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
+ calld->call_combiner);
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+ GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@@ -383,6 +395,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
@@ -414,7 +427,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
- hs_start_transport_op,
+ hs_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
@@ -423,6 +436,5 @@ const grpc_channel_filter grpc_http_server_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-server"};
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/load_reporting_filter.c
index 08474efb2e..17e946937f 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/load_reporting_filter.c
@@ -223,6 +223,5 @@ const grpc_channel_filter grpc_load_reporting_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"load_reporting"};
diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c
index 7d748b9c32..16c85a70d0 100644
--- a/src/core/ext/filters/max_age/max_age_filter.c
+++ b/src/core/ext/filters/max_age/max_age_filter.c
@@ -391,7 +391,6 @@ const grpc_channel_filter grpc_max_age_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"max_age"};
diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c
index 846c7df69a..47763b1deb 100644
--- a/src/core/ext/filters/message_size/message_size_filter.c
+++ b/src/core/ext/filters/message_size/message_size_filter.c
@@ -68,6 +68,7 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
}
typedef struct call_data {
+ grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
@@ -131,7 +132,8 @@ static void start_transport_stream_op_batch(
exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_RESOURCE_EXHAUSTED));
+ GRPC_STATUS_RESOURCE_EXHAUSTED),
+ calld->call_combiner);
gpr_free(message_string);
return;
}
@@ -152,6 +154,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
+ calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -259,7 +262,6 @@ const grpc_channel_filter grpc_message_size_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"message_size"};
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
index b4d2cb4b8c..c8b2fe5f99 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
@@ -177,7 +177,6 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
0,
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"workaround_cronet_compression"};
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 7bad188f4e..2f0ac85152 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -1298,6 +1298,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (op->send_initial_metadata) {
GPR_ASSERT(s->send_initial_metadata_finished == NULL);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
+
+ /* Identify stream compression */
+ if ((s->stream_compression_send_enabled =
+ (op_payload->send_initial_metadata.send_initial_metadata->idx.named
+ .content_encoding != NULL)) == true) {
+ s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
+ grpc_slice_buffer_init(s->compressed_data_buffer);
+ }
+
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
s->send_initial_metadata =
op_payload->send_initial_metadata.send_initial_metadata;
@@ -1361,17 +1370,28 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"send_initial_metadata_finished");
}
}
+ if (op_payload->send_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
}
if (op->send_message) {
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) {
+ // Return an error unless the client has already received trailing
+ // metadata from the server, since an application using a
+ // streaming call might send another message before getting a
+ // recv_message failure, breaking out of its loop, and then
+ // starting recv_trailing_metadata.
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Attempt to send message after stream was closed",
- &s->write_closed_error, 1),
+ t->is_client && s->received_trailing_metadata
+ ? GRPC_ERROR_NONE
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Attempt to send message after stream was closed",
+ &s->write_closed_error, 1),
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
@@ -1457,6 +1477,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata;
s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available;
+ if (op_payload->recv_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
@@ -1815,8 +1839,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
}
}
- if (s->read_closed && s->frame_storage.length == 0 &&
- (!pending_data || s->seen_error) &&
+ if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
s->recv_trailing_metadata_finished != NULL) {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
@@ -2703,6 +2726,9 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
s->stream_decompression_ctx = NULL;
}
+ if (s->unprocessed_incoming_frames_buffer.length == 0) {
+ *slice = grpc_empty_slice();
+ }
}
error = grpc_deframe_unprocessed_incoming_frames(
exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer,
@@ -2920,14 +2946,6 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
/*******************************************************************************
- * INTEGRATION GLUE
- */
-
-static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
-}
-
-/*******************************************************************************
* MONITORING
*/
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
@@ -2944,7 +2962,6 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
- chttp2_get_peer,
chttp2_get_endpoint};
grpc_transport *grpc_create_chttp2_transport(
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 7f37365558..c21d76ba71 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -1655,6 +1655,23 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
}
+static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_metadata_batch *initial_metadata) {
+ if (initial_metadata->idx.named.content_encoding != NULL) {
+ grpc_slice content_encoding =
+ GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md);
+ if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
+ if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
+ s->stream_compression_recv_enabled = true;
+ s->decompressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
+ grpc_slice_buffer_init(s->decompressed_data_buffer);
+ }
+ }
+ }
+}
+
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
@@ -1681,9 +1698,16 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (s != NULL) {
if (parser->is_boundary) {
if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
+ GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Too many trailer frames");
}
+ /* Process stream compression md element if it exists */
+ if (s->header_frames_received ==
+ 0) { /* Only acts on initial metadata */
+ parse_stream_compression_md(exec_ctx, t, s,
+ &s->metadata_buffer[0].batch);
+ }
s->published_metadata[s->header_frames_received] =
GRPC_METADATA_PUBLISHED_FROM_WIRE;
maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 3c41a8958f..9fff30d54f 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -509,6 +509,8 @@ struct grpc_chttp2_stream {
/** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */
bool write_buffering;
+ /** Has trailing metadata been received. */
+ bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 18d163ee98..19bd86fd0c 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -623,6 +623,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
*s->trailing_metadata_available = true;
}
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
} else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header;
@@ -631,6 +632,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index abb558982b..09420d92e7 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -1386,10 +1386,6 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- return NULL;
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) {
return NULL;
@@ -1408,7 +1404,6 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
perform_op,
destroy_stream,
destroy_transport,
- get_peer,
get_endpoint};
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c
index 6f4b429ee2..b2d6f2d0c9 100644
--- a/src/core/ext/transport/inproc/inproc_transport.c
+++ b/src/core/ext/transport/inproc/inproc_transport.c
@@ -1251,20 +1251,14 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// Nothing to do here
}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup("inproc");
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
return NULL;
}
static const grpc_transport_vtable inproc_vtable = {
- sizeof(inproc_stream), "inproc",
- init_stream, set_pollset,
- set_pollset_set, perform_stream_op,
- perform_transport_op, destroy_stream,
- destroy_transport, get_peer,
+ sizeof(inproc_stream), "inproc", init_stream,
+ set_pollset, set_pollset_set, perform_stream_op,
+ perform_transport_op, destroy_stream, destroy_transport,
get_endpoint};
/*******************************************************************************
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 8fdef0bc64..02db798b5c 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -221,6 +221,21 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
return GRPC_COMPRESS_NONE;
}
+grpc_stream_compression_algorithm
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
+ size_t i;
+ if (a == NULL) return 0;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ a->args[i].key)) {
+ return (grpc_stream_compression_algorithm)a->args[i].value.integer;
+ break;
+ }
+ }
+ return GRPC_STREAM_COMPRESS_NONE;
+}
+
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm) {
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
@@ -231,6 +246,16 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
+grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args *a, grpc_stream_compression_algorithm algorithm) {
+ GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
+ grpc_arg tmp;
+ tmp.type = GRPC_ARG_INTEGER;
+ tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.value.integer = algorithm;
+ return grpc_channel_args_copy_and_add(a, &tmp, 1);
+}
+
/** Returns 1 if the argument for compression algorithm's enabled states bitset
* was found in \a a, returning the arg's value in \a states. Otherwise, returns
* 0. */
@@ -251,6 +276,26 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
return 0; /* GPR_FALSE */
}
+/** Returns 1 if the argument for compression algorithm's enabled states bitset
+ * was found in \a a, returning the arg's value in \a states. Otherwise, returns
+ * 0. */
+static int find_stream_compression_algorithm_states_bitset(
+ const grpc_channel_args *a, int **states_arg) {
+ if (a != NULL) {
+ size_t i;
+ for (i = 0; i < a->num_args; ++i) {
+ if (a->args[i].type == GRPC_ARG_INTEGER &&
+ !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
+ a->args[i].key)) {
+ *states_arg = &a->args[i].value.integer;
+ **states_arg |= 0x1; /* forcefully enable support for no compression */
+ return 1;
+ }
+ }
+ }
+ return 0; /* GPR_FALSE */
+}
+
grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
grpc_compression_algorithm algorithm, int state) {
@@ -292,6 +337,48 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
return result;
}
+grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+ grpc_stream_compression_algorithm algorithm, int state) {
+ int *states_arg = NULL;
+ grpc_channel_args *result = *a;
+ const int states_arg_found =
+ find_stream_compression_algorithm_states_bitset(*a, &states_arg);
+
+ if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
+ state == 0) {
+ char *algo_name = NULL;
+ GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
+ 0);
+ gpr_log(GPR_ERROR,
+ "Tried to disable default stream compression algorithm '%s'. The "
+ "operation has been ignored.",
+ algo_name);
+ } else if (states_arg_found) {
+ if (state != 0) {
+ GPR_BITSET((unsigned *)states_arg, algorithm);
+ } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ GPR_BITCLEAR((unsigned *)states_arg, algorithm);
+ }
+ } else {
+ /* create a new arg */
+ grpc_arg tmp;
+ tmp.type = GRPC_ARG_INTEGER;
+ tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ /* all enabled by default */
+ tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
+ if (state != 0) {
+ GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
+ } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
+ GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
+ }
+ result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
+ grpc_channel_args_destroy(exec_ctx, *a);
+ *a = result;
+ }
+ return result;
+}
+
uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a) {
int *states_arg;
@@ -302,6 +389,17 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
}
}
+uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
+ const grpc_channel_args *a) {
+ int *states_arg;
+ if (find_stream_compression_algorithm_states_bitset(a, &states_arg)) {
+ return (uint32_t)*states_arg;
+ } else {
+ return (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) -
+ 1; /* All algs. enabled */
+ }
+}
+
grpc_channel_args *grpc_channel_args_set_socket_mutator(
grpc_channel_args *a, grpc_socket_mutator *mutator) {
grpc_arg tmp = grpc_socket_mutator_to_arg(mutator);
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index f649a8d9ec..0599e189c3 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -59,12 +59,24 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a);
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a);
+/** Returns the stream compression algorithm set in \a a. */
+grpc_stream_compression_algorithm
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a);
+
/** Returns a channel arg instance with compression enabled. If \a a is
* non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression
* for the channel. */
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm);
+/** Returns a channel arg instance with stream compression enabled. If \a a is
+ * non-NULL, its args are copied. N.B. GRPC_STREAM_COMPRESS_NONE disables
+ * stream compression for the channel. If a value other than
+ * GRPC_STREAM_COMPRESS_NONE is set, it takes precedence over message-wise
+ * compression algorithms. */
+grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args *a, grpc_stream_compression_algorithm algorithm);
+
/** Sets the support for the given compression algorithm. By default, all
* compression algorithms are enabled. It's an error to disable an algorithm set
* by grpc_channel_args_set_compression_algorithm.
@@ -76,6 +88,17 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
grpc_compression_algorithm algorithm, int enabled);
+/** Sets the support for the given stream compression algorithm. By default, all
+ * stream compression algorithms are enabled. It's an error to disable an
+ * algorithm set by grpc_channel_args_set_stream_compression_algorithm.
+ *
+ * Returns an instance with the updated algorithm states. The \a a pointer is
+ * modified to point to the returned instance (which may be different from the
+ * input value of \a a). */
+grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+ grpc_stream_compression_algorithm algorithm, int enabled);
+
/** Returns the bitset representing the support state (true for enabled, false
* for disabled) for compression algorithms.
*
@@ -84,6 +107,14 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a);
+/** Returns the bitset representing the support state (true for enabled, false
+ * for disabled) for stream compression algorithms.
+ *
+ * The i-th bit of the returned bitset corresponds to the i-th entry in the
+ * grpc_stream_compression_algorithm enum. */
+uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
+ const grpc_channel_args *a);
+
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 0f8e33c4be..775c8bc667 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -233,15 +233,10 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1;
+ GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- grpc_call_element *next_elem = elem + 1;
- return next_elem->filter->get_peer(exec_ctx, next_elem);
-}
-
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *channel_info) {
@@ -265,12 +260,3 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
-
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
- op->cancel_stream = true;
- op->payload->cancel_stream.cancel_error = error;
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
-}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index a80f8aa826..ae1cac31f7 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -40,6 +40,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h"
@@ -71,6 +72,7 @@ typedef struct {
gpr_timespec start_time;
gpr_timespec deadline;
gpr_arena *arena;
+ grpc_call_combiner *call_combiner;
} grpc_call_element_args;
typedef struct {
@@ -150,9 +152,6 @@ typedef struct {
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem);
- /* Implement grpc_call_get_peer() */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
-
/* Implement grpc_channel_get_info() */
void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info);
@@ -271,8 +270,6 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op);
-/* Pass through a request to get_peer to the next child element */
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Pass through a request to get_channel_info() to the next child element */
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -288,10 +285,6 @@ void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_error *error);
-
extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index af06ca802e..8285226fc4 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -36,7 +36,57 @@ typedef struct connected_channel_channel_data {
grpc_transport *transport;
} channel_data;
-typedef struct connected_channel_call_data { void *unused; } call_data;
+typedef struct {
+ grpc_closure closure;
+ grpc_closure *original_closure;
+ grpc_call_combiner *call_combiner;
+ const char *reason;
+} callback_state;
+
+typedef struct connected_channel_call_data {
+ grpc_call_combiner *call_combiner;
+ // Closures used for returning results on the call combiner.
+ callback_state on_complete[6]; // Max number of pending batches.
+ callback_state recv_initial_metadata_ready;
+ callback_state recv_message_ready;
+} call_data;
+
+static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ callback_state *state = (callback_state *)arg;
+ GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
+ state->original_closure, GRPC_ERROR_REF(error),
+ state->reason);
+}
+
+static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ run_in_call_combiner(exec_ctx, arg, error);
+ gpr_free(arg);
+}
+
+static void intercept_callback(call_data *calld, callback_state *state,
+ bool free_when_done, const char *reason,
+ grpc_closure **original_closure) {
+ state->original_closure = *original_closure;
+ state->call_combiner = calld->call_combiner;
+ state->reason = reason;
+ *original_closure = GRPC_CLOSURE_INIT(
+ &state->closure,
+ free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner,
+ state, grpc_schedule_on_exec_ctx);
+}
+
+static callback_state *get_state_for_batch(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) return &calld->on_complete[0];
+ if (batch->send_message) return &calld->on_complete[1];
+ if (batch->send_trailing_metadata) return &calld->on_complete[2];
+ if (batch->recv_initial_metadata) return &calld->on_complete[3];
+ if (batch->recv_message) return &calld->on_complete[4];
+ if (batch->recv_trailing_metadata) return &calld->on_complete[5];
+ GPR_UNREACHABLE_CODE(return NULL);
+}
/* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal
@@ -49,13 +99,38 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
into transport stream operations */
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+ grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
+ if (batch->recv_initial_metadata) {
+ callback_state *state = &calld->recv_initial_metadata_ready;
+ intercept_callback(
+ calld, state, false, "recv_initial_metadata_ready",
+ &batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
+ }
+ if (batch->recv_message) {
+ callback_state *state = &calld->recv_message_ready;
+ intercept_callback(calld, state, false, "recv_message_ready",
+ &batch->payload->recv_message.recv_message_ready);
+ }
+ if (batch->cancel_stream) {
+ // There can be more than one cancellation batch in flight at any
+ // given time, so we can't just pick out a fixed index into
+ // calld->on_complete like we can for the other ops. However,
+ // cancellation isn't in the fast path, so we just allocate a new
+ // closure for each one.
+ callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
+ intercept_callback(calld, state, true, "on_complete (cancel_stream)",
+ &batch->on_complete);
+ } else {
+ callback_state *state = get_state_for_batch(calld, batch);
+ intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
+ }
grpc_transport_perform_stream_op(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ batch);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "passed batch to transport");
}
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -71,6 +146,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena);
@@ -118,11 +194,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
}
-static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- return grpc_transport_get_peer(exec_ctx, chand->transport);
-}
-
/* No-op. */
static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -138,7 +209,6 @@ const grpc_channel_filter grpc_connected_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- con_get_peer,
con_get_channel_info,
"connected",
};
diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h
index 4717af6e2b..08feafc1bb 100644
--- a/src/core/lib/compression/algorithm_metadata.h
+++ b/src/core/lib/compression/algorithm_metadata.h
@@ -26,13 +26,27 @@
grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm);
+/** Return stream compression algorithm based metadata value */
+grpc_slice grpc_stream_compression_algorithm_slice(
+ grpc_stream_compression_algorithm algorithm);
+
/** Return compression algorithm based metadata element (grpc-encoding: xxx) */
grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm);
+/** Return stream compression algorithm based metadata element
+ * (content-encoding: xxx) */
+grpc_mdelem grpc_stream_compression_encoding_mdelem(
+ grpc_stream_compression_algorithm algorithm);
+
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str);
+/** Find stream compression algorithm based on passed in mdstr - returns
+ * GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */
+grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
+ grpc_slice str);
+
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.c
index 8deae2798f..ec84c01811 100644
--- a/src/core/lib/compression/compression.c
+++ b/src/core/lib/compression/compression.c
@@ -46,6 +46,19 @@ int grpc_compression_algorithm_parse(grpc_slice name,
}
}
+int grpc_stream_compression_algorithm_parse(
+ grpc_slice name, grpc_stream_compression_algorithm *algorithm) {
+ if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) {
+ *algorithm = GRPC_STREAM_COMPRESS_NONE;
+ return 1;
+ } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) {
+ *algorithm = GRPC_STREAM_COMPRESS_GZIP;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
@@ -66,6 +79,24 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
return 0;
}
+int grpc_stream_compression_algorithm_name(
+ grpc_stream_compression_algorithm algorithm, char **name) {
+ GRPC_API_TRACE(
+ "grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
+ ((int)algorithm, name));
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ *name = "identity";
+ return 1;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ *name = "gzip";
+ return 1;
+ case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT:
+ return 0;
+ }
+ return 0;
+}
+
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE;
@@ -74,6 +105,13 @@ grpc_compression_algorithm grpc_compression_algorithm_from_slice(
return GRPC_COMPRESS_ALGORITHMS_COUNT;
}
+grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
+ grpc_slice str) {
+ if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE;
+ if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP;
+ return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT;
+}
+
grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
@@ -89,6 +127,19 @@ grpc_slice grpc_compression_algorithm_slice(
return grpc_empty_slice();
}
+grpc_slice grpc_stream_compression_algorithm_slice(
+ grpc_stream_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ return GRPC_MDSTR_IDENTITY;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ return GRPC_MDSTR_GZIP;
+ case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT:
+ return grpc_empty_slice();
+ }
+ return grpc_empty_slice();
+}
+
grpc_mdelem grpc_compression_encoding_mdelem(
grpc_compression_algorithm algorithm) {
switch (algorithm) {
@@ -104,10 +155,25 @@ grpc_mdelem grpc_compression_encoding_mdelem(
return GRPC_MDNULL;
}
+grpc_mdelem grpc_stream_compression_encoding_mdelem(
+ grpc_stream_compression_algorithm algorithm) {
+ switch (algorithm) {
+ case GRPC_STREAM_COMPRESS_NONE:
+ return GRPC_MDELEM_CONTENT_ENCODING_IDENTITY;
+ case GRPC_STREAM_COMPRESS_GZIP:
+ return GRPC_MDELEM_CONTENT_ENCODING_GZIP;
+ default:
+ break;
+ }
+ return GRPC_MDNULL;
+}
+
void grpc_compression_options_init(grpc_compression_options *opts) {
memset(opts, 0, sizeof(*opts));
/* all enabled by default */
opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ opts->enabled_stream_compression_algorithms_bitset =
+ (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
}
void grpc_compression_options_enable_algorithm(
@@ -126,6 +192,13 @@ int grpc_compression_options_is_algorithm_enabled(
return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
}
+int grpc_compression_options_is_stream_compression_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_stream_compression_algorithm algorithm) {
+ return GPR_BITGET(opts->enabled_stream_compression_algorithms_bitset,
+ algorithm);
+}
+
/* TODO(dgq): Add the ability to specify parameters to the individual
* compression algorithms */
grpc_compression_algorithm grpc_compression_algorithm_for_level(
@@ -181,3 +254,30 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
abort();
};
}
+
+GRPCAPI grpc_stream_compression_algorithm
+grpc_stream_compression_algorithm_for_level(
+ grpc_stream_compression_level level, uint32_t accepted_stream_encodings) {
+ GRPC_API_TRACE("grpc_stream_compression_algorithm_for_level(level=%d)", 1,
+ ((int)level));
+ if (level > GRPC_STREAM_COMPRESS_LEVEL_HIGH) {
+ gpr_log(GPR_ERROR, "Unknown compression level %d.", (int)level);
+ abort();
+ }
+
+ switch (level) {
+ case GRPC_STREAM_COMPRESS_LEVEL_NONE:
+ return GRPC_STREAM_COMPRESS_NONE;
+ case GRPC_STREAM_COMPRESS_LEVEL_LOW:
+ case GRPC_STREAM_COMPRESS_LEVEL_MED:
+ case GRPC_STREAM_COMPRESS_LEVEL_HIGH:
+ if (GPR_BITGET(accepted_stream_encodings, GRPC_STREAM_COMPRESS_GZIP) ==
+ 1) {
+ return GRPC_STREAM_COMPRESS_GZIP;
+ } else {
+ return GRPC_STREAM_COMPRESS_NONE;
+ }
+ default:
+ abort();
+ }
+}
diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c
new file mode 100644
index 0000000000..899f98552d
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.c
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/call_combiner.h"
+
+#include <grpc/support/log.h>
+
+grpc_tracer_flag grpc_call_combiner_trace =
+ GRPC_TRACER_INITIALIZER(false, "call_combiner");
+
+static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
+ if (cancel_state & 1) {
+ return (grpc_error*)(cancel_state & ~(gpr_atm)1);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static gpr_atm encode_cancel_state_error(grpc_error* error) {
+ return (gpr_atm)1 | (gpr_atm)error;
+}
+
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_init(&call_combiner->queue);
+}
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_destroy(&call_combiner->queue);
+ GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
+}
+
+#ifndef NDEBUG
+#define DEBUG_ARGS , const char *file, int line
+#define DEBUG_FMT_STR "%s:%d: "
+#define DEBUG_FMT_ARGS , file, line
+#else
+#define DEBUG_ARGS
+#define DEBUG_FMT_STR
+#define DEBUG_FMT_ARGS
+#endif
+
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
+ "%s] error=%s",
+ call_combiner, closure DEBUG_FMT_ARGS, reason,
+ grpc_error_string(error));
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size + 1);
+ }
+ if (prev_size == 0) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
+ }
+ // Queue was empty, so execute this closure immediately.
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+ } else {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_INFO, " QUEUING");
+ }
+ // Queue was not empty, so add closure to queue.
+ closure->error_data.error = error;
+ gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
+ }
+}
+
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner DEBUG_ARGS,
+ const char* reason) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
+ call_combiner DEBUG_FMT_ARGS, reason);
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size - 1);
+ }
+ GPR_ASSERT(prev_size >= 1);
+ if (prev_size > 1) {
+ while (true) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " checking queue");
+ }
+ bool empty;
+ grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
+ &call_combiner->queue, &empty);
+ if (closure == NULL) {
+ // This can happen either due to a race condition within the mpscq
+ // code or because of a race with grpc_call_combiner_start().
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue returned no result; checking again");
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
+ closure, grpc_error_string(closure->error_data.error));
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
+ break;
+ }
+ } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue empty");
+ }
+}
+
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure) {
+ while (true) {
+ // Decode original state.
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ // If error is set, invoke the cancellation closure immediately.
+ // Otherwise, store the new closure.
+ if (original_error != GRPC_ERROR_NONE) {
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
+ break;
+ } else {
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ (gpr_atm)closure)) {
+ break;
+ }
+ }
+ // cas failed, try again.
+ }
+}
+
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error) {
+ while (true) {
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ if (original_error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(error);
+ break;
+ }
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ encode_cancel_state_error(error))) {
+ if (original_state != 0) {
+ grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p",
+ call_combiner, notify_on_cancel);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
+ }
+ break;
+ }
+ // cas failed, try again.
+ }
+}
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
new file mode 100644
index 0000000000..621e2c3669
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+
+#include <stddef.h>
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/mpscq.h"
+
+// A simple, lock-free mechanism for serializing activity related to a
+// single call. This is similar to a combiner but is more lightweight.
+//
+// It requires the callback (or, in the common case where the callback
+// actually kicks off a chain of callbacks, the last callback in that
+// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP())
+// when it is done with the action that was kicked off by the original
+// callback.
+
+extern grpc_tracer_flag grpc_call_combiner_trace;
+
+typedef struct {
+ gpr_atm size; // size_t, num closures in queue or currently executing
+ gpr_mpscq queue;
+ // Either 0 (if not cancelled and no cancellation closure set),
+ // a grpc_closure* (if the lowest bit is 0),
+ // or a grpc_error* (if the lowest bit is 1).
+ gpr_atm cancel_state;
+} grpc_call_combiner;
+
+// Assumes memory was initialized to zero.
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
+
+#ifndef NDEBUG
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
+ (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* file, int line, const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* file, int line, const char* reason);
+#else
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* reason);
+#endif
+
+/// Tells \a call_combiner to invoke \a closure when
+/// grpc_call_combiner_cancel() is called. If grpc_call_combiner_cancel()
+/// was previously called, \a closure will be invoked immediately.
+/// If \a closure is NULL, then no closure will be invoked on
+/// cancellation; this effectively unregisters the previously set closure.
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure);
+
+/// Indicates that the call has been cancelled.
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error);
+
+#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index 58e6659e6d..4d1437f606 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -37,7 +37,8 @@ void gpr_timers_set_log_filename(const char *filename);
void gpr_timer_set_enabled(int enabled);
-#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
+ defined(GRPC_CUSTOM_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
do { \
@@ -56,6 +57,12 @@ void gpr_timer_set_enabled(int enabled);
#if defined(GRPC_STAP_PROFILER) && defined(GRPC_BASIC_PROFILER)
#error "GRPC_STAP_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
#endif
+#if defined(GRPC_STAP_PROFILER) && defined(GRPC_CUSTOM_PROFILER)
+#error "GRPC_STAP_PROFILER and GRPC_CUSTOM_PROFILER are mutually exclusive."
+#endif
+#if defined(GRPC_CUSTOM_PROFILER) && defined(GRPC_BASIC_PROFILER)
+#error "GRPC_CUSTOM_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
+#endif
/* Generic profiling interface. */
#define GPR_TIMER_MARK(tag, important) \
@@ -80,22 +87,25 @@ void gpr_timer_set_enabled(int enabled);
#ifdef __cplusplus
}
-#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
+#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
+ defined(GRPC_CUSTOM_PROFILER))
namespace grpc {
class ProfileScope {
public:
- ProfileScope(const char *desc, bool important) : desc_(desc) {
- GPR_TIMER_BEGIN(desc_, important ? 1 : 0);
+ ProfileScope(const char *desc, bool important, const char *file, int line)
+ : desc_(desc) {
+ gpr_timer_begin((desc_, important ? 1 : 0, file, line);
}
- ~ProfileScope() { GPR_TIMER_END(desc_, 0); }
+ ~ProfileScope() { gpr_timer_end(desc_, 0, "n/a", 0); }
private:
const char *const desc_;
};
-}
+} // namespace grpc
-#define GPR_TIMER_SCOPE(tag, important) \
- ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important))
+#define GPR_TIMER_SCOPE(tag, important) \
+ ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important), __FILE__, \
+ __LINE__)
#else
#define GPR_TIMER_SCOPE(tag, important) \
do { \
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 531a88434f..e3f0163a6c 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -39,6 +39,7 @@
/* We can have a per-call credentials. */
typedef struct {
+ grpc_call_combiner *call_combiner;
grpc_call_credentials *creds;
bool have_host;
bool have_method;
@@ -49,17 +50,11 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
- gpr_atm security_context_set;
- gpr_mu security_context_mu;
grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
- grpc_closure closure;
- // Either 0 (no cancellation and no async operation in flight),
- // a grpc_closure* (if the lowest bit is 0),
- // or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancellation_state;
- grpc_closure cancel_closure;
+ grpc_closure async_cancel_closure;
+ grpc_closure async_result_closure;
} call_data;
/* We can have a per-channel credentials. */
@@ -68,43 +63,6 @@ typedef struct {
grpc_auth_context *auth_context;
} channel_data;
-static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func,
- grpc_error **error) {
- // If the lowest bit is 1, the value is a grpc_error*.
- // Otherwise, if non-zdero, the value is a grpc_closure*.
- if (cancel_state & 1) {
- *error = (grpc_error *)(cancel_state & ~(gpr_atm)1);
- } else if (cancel_state != 0) {
- *func = (grpc_closure *)cancel_state;
- }
-}
-
-static gpr_atm encode_cancel_state_error(grpc_error *error) {
- // Set the lowest bit to 1 to indicate that it's an error.
- return (gpr_atm)1 | (gpr_atm)error;
-}
-
-// Returns an error if the call has been cancelled. Otherwise, sets the
-// cancellation function to be called upon cancellation.
-static grpc_error *set_cancel_func(grpc_call_element *elem,
- grpc_iomgr_cb_func func) {
- call_data *calld = (call_data *)elem->call_data;
- // Decode original state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *original_error = GRPC_ERROR_NONE;
- grpc_closure *original_func = NULL;
- decode_cancel_state(original_state, &original_func, &original_error);
- // If error is set, return it.
- if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
- // Otherwise, store func.
- GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
- grpc_schedule_on_exec_ctx);
- GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
- gpr_atm_rel_store(&calld->cancellation_state,
- (gpr_atm)&calld->cancel_closure);
- return GRPC_ERROR_NONE;
-}
-
static void reset_auth_metadata_context(
grpc_auth_metadata_context *auth_md_context) {
if (auth_md_context->service_url != NULL) {
@@ -135,6 +93,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
reset_auth_metadata_context(&calld->auth_md_context);
grpc_error *error = GRPC_ERROR_REF(input_error);
if (error == GRPC_ERROR_NONE) {
@@ -153,7 +112,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
} else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED);
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
}
}
@@ -223,7 +183,8 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
return;
}
} else {
@@ -234,22 +195,23 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
- grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
- return;
- }
GPR_ASSERT(calld->pollent != NULL);
- GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
- grpc_schedule_on_exec_ctx);
+
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata,
+ batch, grpc_schedule_on_exec_ctx);
grpc_error *error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
- &calld->md_array, &calld->closure, &error)) {
+ &calld->md_array, &calld->async_result_closure, &error)) {
// Synchronous return; invoke on_credentials_metadata() directly.
on_credentials_metadata(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error);
+ } else {
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_get_request_metadata,
+ elem, grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->async_cancel_closure);
}
}
@@ -258,7 +220,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
grpc_call_element *elem = batch->handler_private.extra_arg;
call_data *calld = elem->call_data;
-
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch);
} else {
@@ -271,7 +233,8 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
gpr_free(error_msg);
}
}
@@ -282,7 +245,7 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_channel_security_connector_cancel_check_call_host(
- exec_ctx, chand->security_connector, &calld->closure,
+ exec_ctx, chand->security_connector, &calld->async_result_closure,
GRPC_ERROR_REF(error));
}
@@ -295,52 +258,19 @@ static void auth_start_transport_stream_op_batch(
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- if (batch->cancel_stream) {
- while (true) {
- // Decode the original cancellation state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *func = NULL;
- decode_cancel_state(original_state, &func, &cancel_error);
- // If we had already set a cancellation error, there's nothing
- // more to do.
- if (cancel_error != GRPC_ERROR_NONE) break;
- // If there's a cancel func, call it.
- // Note that even if the cancel func has been changed by some
- // other thread between when we decoded it and now, it will just
- // be a no-op.
- cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- if (func != NULL) {
- GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error));
- }
- // Encode the new error into cancellation state.
- if (gpr_atm_full_cas(&calld->cancellation_state, original_state,
- encode_cancel_state_error(cancel_error))) {
- break; // Success.
- }
- // The cas failed, so try again.
- }
- } else {
- /* double checked lock over security context to ensure it's set once */
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- gpr_mu_lock(&calld->security_context_mu);
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- GPR_ASSERT(batch->payload->context != NULL);
- if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- batch->payload->context[GRPC_CONTEXT_SECURITY].value =
- grpc_client_security_context_create();
- batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_client_security_context_destroy;
- }
- grpc_client_security_context *sec_ctx =
- batch->payload->context[GRPC_CONTEXT_SECURITY].value;
- GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
- sec_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
- gpr_atm_rel_store(&calld->security_context_set, 1);
- }
- gpr_mu_unlock(&calld->security_context_mu);
+ if (!batch->cancel_stream) {
+ GPR_ASSERT(batch->payload->context != NULL);
+ if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
}
+ grpc_client_security_context *sec_ctx =
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (batch->send_initial_metadata) {
@@ -365,26 +295,25 @@ static void auth_start_transport_stream_op_batch(
}
}
if (calld->have_host) {
- grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
+ batch->handler_private.extra_arg = elem;
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch,
+ grpc_schedule_on_exec_ctx);
+ char *call_host = grpc_slice_to_c_string(calld->host);
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host,
+ chand->auth_context, &calld->async_result_closure, &error)) {
+ // Synchronous return; invoke on_host_checked() directly.
+ on_host_checked(exec_ctx, batch, error);
+ GRPC_ERROR_UNREF(error);
} else {
- char *call_host = grpc_slice_to_c_string(calld->host);
- batch->handler_private.extra_arg = elem;
- grpc_error *error = GRPC_ERROR_NONE;
- if (grpc_channel_security_connector_check_call_host(
- exec_ctx, chand->security_connector, call_host,
- chand->auth_context,
- GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch,
- grpc_schedule_on_exec_ctx),
- &error)) {
- // Synchronous return; invoke on_host_checked() directly.
- on_host_checked(exec_ctx, batch, error);
- GRPC_ERROR_UNREF(error);
- }
- gpr_free(call_host);
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_check_call_host,
+ elem, grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->async_cancel_closure);
}
+ gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */
}
@@ -400,8 +329,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(*calld));
- gpr_mu_init(&calld->security_context_mu);
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -426,12 +354,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
- gpr_mu_destroy(&calld->security_context_mu);
- gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *cancel_func = NULL;
- decode_cancel_state(cancel_state, &cancel_func, &cancel_error);
- GRPC_ERROR_UNREF(cancel_error);
}
/* Constructor for channel_data */
@@ -490,6 +412,5 @@ const grpc_channel_filter grpc_client_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client-auth"};
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 9bf3f0ca0f..b721ce4a22 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -26,7 +26,15 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+typedef enum {
+ STATE_INIT = 0,
+ STATE_DONE,
+ STATE_CANCELLED,
+} async_state;
+
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+ grpc_call_stack *owning_call;
grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
@@ -34,6 +42,8 @@ typedef struct call_data {
const grpc_metadata *consumed_md;
size_t num_consumed_md;
grpc_auth_context *auth_context;
+ grpc_closure cancel_closure;
+ gpr_atm state; // async_state
} call_data;
typedef struct channel_data {
@@ -78,54 +88,92 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md);
}
-/* called from application code */
-static void on_md_processing_done(
- void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
- const grpc_metadata *response_md, size_t num_response_md,
- grpc_status_code status, const char *error_details) {
- grpc_call_element *elem = user_data;
+static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_metadata *consumed_md,
+ size_t num_consumed_md,
+ const grpc_metadata *response_md,
+ size_t num_response_md,
+ grpc_error *error) {
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL);
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. "
"Ignoring...");
}
- grpc_error *error = GRPC_ERROR_NONE;
- if (status == GRPC_STATUS_OK) {
+ if (error == GRPC_ERROR_NONE) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
error = grpc_metadata_batch_filter(
- &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
+ exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
- } else {
- if (error_details == NULL) {
- error_details = "Authentication metadata processing failed.";
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
+ error);
+}
+
+// Called from application code.
+static void on_md_processing_done(
+ void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+ const grpc_metadata *response_md, size_t num_response_md,
+ grpc_status_code status, const char *error_details) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ // If the call was not cancelled while we were in flight, process the result.
+ if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_DONE)) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (status != GRPC_STATUS_OK) {
+ if (error_details == NULL) {
+ error_details = "Authentication metadata processing failed.";
+ }
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status);
}
- error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status);
+ on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
+ response_md, num_response_md, error);
}
+ // Clean up.
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
- GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
- error);
+ GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata");
grpc_exec_ctx_finish(&exec_ctx);
}
+static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = elem->call_data;
+ // If the result was not already processed, invoke the callback now.
+ if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_CANCELLED)) {
+ on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
+ GRPC_ERROR_REF(error));
+ }
+}
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
+ grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
+ // We're calling out to the application, so we need to make sure
+ // to drop the call combiner early if we get cancelled.
+ GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->cancel_closure);
+ GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process(
@@ -159,6 +207,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
+ calld->call_combiner = args->call_combiner;
+ calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -218,6 +268,5 @@ const grpc_channel_filter grpc_server_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server-auth"};
diff --git a/src/core/lib/support/block_annotate.h b/src/core/lib/support/block_annotate.h
index 0a2cb45018..8e3ef7df65 100644
--- a/src/core/lib/support/block_annotate.h
+++ b/src/core/lib/support/block_annotate.h
@@ -19,15 +19,37 @@
#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void gpr_thd_start_blocking_region();
+void gpr_thd_end_blocking_region();
+
+#ifdef __cplusplus
+}
+#endif
+
/* These annotations identify the beginning and end of regions where
the code may block for reasons other than synchronization functions.
These include poll, epoll, and getaddrinfo. */
+#ifdef GRPC_SCHEDULING_MARK_BLOCKING_REGION
+#define GRPC_SCHEDULING_START_BLOCKING_REGION \
+ do { \
+ gpr_thd_start_blocking_region(); \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION \
+ do { \
+ gpr_thd_end_blocking_region(); \
+ } while (0)
+#else
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION \
do { \
} while (0)
+#endif
#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/support/thd_internal.h b/src/core/lib/support/thd_internal.h
deleted file mode 100644
index cc468c7846..0000000000
--- a/src/core/lib/support/thd_internal.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H
-#define GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H
-
-/* Internal interfaces between modules within the gpr support library. */
-
-#endif /* GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H */
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 00ec9c7c9a..4a7152ea60 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -121,6 +121,7 @@ typedef struct batch_control {
bool is_closure;
} notify_tag;
} completion_data;
+ grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
@@ -147,6 +148,7 @@ typedef struct {
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
+ grpc_call_combiner call_combiner;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
@@ -183,6 +185,11 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
+ grpc_metadata compression_md;
+
+ // A char* indicating the peer name.
+ gpr_atm peer_string;
+
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
@@ -192,8 +199,12 @@ struct grpc_call {
/* Compression algorithm for *incoming* data */
grpc_compression_algorithm incoming_compression_algorithm;
+ /* Stream compression algorithm for *incoming* data */
+ grpc_stream_compression_algorithm incoming_stream_compression_algorithm;
/* Supported encodings (compression algorithms), a bitset */
uint32_t encodings_accepted_by_peer;
+ /* Supported stream encodings (stream compression algorithms), a bitset */
+ uint32_t stream_encodings_accepted_by_peer;
/* Contexts for various subsystems (security, tracing, ...). */
grpc_call_context_element context[GRPC_CONTEXT_COUNT];
@@ -241,8 +252,9 @@ grpc_tracer_flag grpc_compression_trace =
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op);
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *op,
+ grpc_closure *start_batch_closure);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status,
const char *description);
@@ -307,6 +319,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
+ grpc_call_combiner_init(&call->call_combiner);
*out_call = call;
call->channel = args->channel;
call->cq = args->cq;
@@ -410,7 +423,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
.path = path,
.start_time = call->start_time,
.deadline = send_deadline,
- .arena = call->arena};
+ .arena = call->arena,
+ .call_combiner = &call->call_combiner};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) {
@@ -477,6 +491,8 @@ static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
grpc_call *c = call;
grpc_channel *channel = c->channel;
+ grpc_call_combiner_destroy(&c->call_combiner);
+ gpr_free((char *)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
}
@@ -576,30 +592,37 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return GRPC_CALL_OK;
}
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op) {
- grpc_call_element *elem;
-
- GPR_TIMER_BEGIN("execute_op", 0);
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
- GPR_TIMER_END("execute_op", 0);
+// This is called via the call combiner to start sending a batch down
+// the filter stack.
+static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_transport_stream_op_batch *batch = arg;
+ grpc_call *call = batch->handler_private.extra_arg;
+ GPR_TIMER_BEGIN("execute_batch", 0);
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+ GPR_TIMER_END("execute_batch", 0);
+}
+
+// start_batch_closure points to a caller-allocated closure to be used
+// for entering the call combiner.
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *batch,
+ grpc_closure *start_batch_closure) {
+ batch->handler_private.extra_arg = call;
+ GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
+ GRPC_ERROR_NONE, "executing batch");
}
char *grpc_call_get_peer(grpc_call *call) {
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result;
- GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
- result = elem->filter->get_peer(&exec_ctx, elem);
- if (result == NULL) {
- result = grpc_channel_get_target(call->channel);
- }
- if (result == NULL) {
- result = gpr_strdup("unknown");
- }
- grpc_exec_ctx_finish(&exec_ctx);
- return result;
+ char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string);
+ if (peer_string != NULL) return gpr_strdup(peer_string);
+ peer_string = grpc_channel_get_target(call->channel);
+ if (peer_string != NULL) return peer_string;
+ return gpr_strdup("unknown");
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@@ -626,20 +649,41 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return GRPC_CALL_OK;
}
-static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
+typedef struct {
+ grpc_call *call;
+ grpc_closure start_batch;
+ grpc_closure finish_batch;
+} cancel_state;
+
+// The on_complete callback used when sending a cancel_stream batch down
+// the filter stack. Yields the call combiner when the batch is done.
+static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
+ cancel_state *state = (cancel_state *)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
+ "on_complete for cancel_stream op");
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
+ gpr_free(state);
}
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
+ // Inform the call combiner of the cancellation, so that it can cancel
+ // any in-flight asynchronous actions that may be holding the call
+ // combiner. This ensures that the cancel_stream batch can be sent
+ // down the filter stack in a timely manner.
+ grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
- GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
+ cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state));
+ state->call = c;
+ GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
+ grpc_schedule_on_exec_ctx);
+ grpc_transport_stream_op_batch *op =
+ grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
- execute_op(exec_ctx, c, op);
+ execute_batch(exec_ctx, c, op, &state->start_batch);
}
static grpc_error *error_from_status(grpc_status_code status,
@@ -752,6 +796,12 @@ static void set_incoming_compression_algorithm(
call->incoming_compression_algorithm = algo;
}
+static void set_incoming_stream_compression_algorithm(
+ grpc_call *call, grpc_stream_compression_algorithm algo) {
+ GPR_ASSERT(algo < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
+ call->incoming_stream_compression_algorithm = algo;
+}
+
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call) {
grpc_compression_algorithm algorithm;
@@ -765,6 +815,13 @@ static grpc_compression_algorithm compression_algorithm_for_level_locked(
call->encodings_accepted_by_peer);
}
+static grpc_stream_compression_algorithm
+stream_compression_algorithm_for_level_locked(
+ grpc_call *call, grpc_stream_compression_level level) {
+ return grpc_stream_compression_algorithm_for_level(
+ level, call->stream_encodings_accepted_by_peer);
+}
+
uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
uint32_t flags;
flags = call->test_only_last_message_flags;
@@ -819,12 +876,70 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
(void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
}
+static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
+ grpc_call *call,
+ grpc_mdelem mdel) {
+ size_t i;
+ grpc_stream_compression_algorithm algorithm;
+ grpc_slice_buffer accept_encoding_parts;
+ grpc_slice accept_encoding_slice;
+ void *accepted_user_data;
+
+ accepted_user_data =
+ grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
+ if (accepted_user_data != NULL) {
+ call->stream_encodings_accepted_by_peer =
+ (uint32_t)(((uintptr_t)accepted_user_data) - 1);
+ return;
+ }
+
+ accept_encoding_slice = GRPC_MDVALUE(mdel);
+ grpc_slice_buffer_init(&accept_encoding_parts);
+ grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts);
+
+ /* Always support no compression */
+ GPR_BITSET(&call->stream_encodings_accepted_by_peer,
+ GRPC_STREAM_COMPRESS_NONE);
+ for (i = 0; i < accept_encoding_parts.count; i++) {
+ grpc_slice accept_encoding_entry_slice = accept_encoding_parts.slices[i];
+ if (grpc_stream_compression_algorithm_parse(accept_encoding_entry_slice,
+ &algorithm)) {
+ GPR_BITSET(&call->stream_encodings_accepted_by_peer, algorithm);
+ } else {
+ char *accept_encoding_entry_str =
+ grpc_slice_to_c_string(accept_encoding_entry_slice);
+ gpr_log(GPR_ERROR,
+ "Invalid entry in accept encoding metadata: '%s'. Ignoring.",
+ accept_encoding_entry_str);
+ gpr_free(accept_encoding_entry_str);
+ }
+ }
+
+ grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts);
+
+ grpc_mdelem_set_user_data(
+ mdel, destroy_encodings_accepted_by_peer,
+ (void *)(((uintptr_t)call->stream_encodings_accepted_by_peer) + 1));
+}
+
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
uint32_t encodings_accepted_by_peer;
encodings_accepted_by_peer = call->encodings_accepted_by_peer;
return encodings_accepted_by_peer;
}
+uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
+ grpc_call *call) {
+ uint32_t stream_encodings_accepted_by_peer;
+ stream_encodings_accepted_by_peer = call->stream_encodings_accepted_by_peer;
+ return stream_encodings_accepted_by_peer;
+}
+
+grpc_stream_compression_algorithm
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call) {
+ return call->incoming_stream_compression_algorithm;
+}
+
static grpc_linked_mdelem *linked_from_md(const grpc_metadata *md) {
return (grpc_linked_mdelem *)&md->internal_data;
}
@@ -936,6 +1051,22 @@ static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
return algorithm;
}
+static grpc_stream_compression_algorithm decode_stream_compression(
+ grpc_mdelem md) {
+ grpc_stream_compression_algorithm algorithm =
+ grpc_stream_compression_algorithm_from_slice(GRPC_MDVALUE(md));
+ if (algorithm == GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
+ char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ gpr_log(GPR_ERROR,
+ "Invalid incoming stream compression algorithm: '%s'. Interpreting "
+ "incoming data as uncompressed.",
+ md_c_str);
+ gpr_free(md_c_str);
+ return GRPC_STREAM_COMPRESS_NONE;
+ }
+ return algorithm;
+}
+
static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
int is_trailing) {
if (b->list.count == 0) return;
@@ -960,7 +1091,19 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch *b) {
- if (b->idx.named.grpc_encoding != NULL) {
+ if (b->idx.named.content_encoding != NULL) {
+ if (b->idx.named.grpc_encoding != NULL) {
+ gpr_log(GPR_ERROR,
+ "Received both content-encoding and grpc-encoding header. "
+ "Ignoring grpc-encoding.");
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
+ }
+ GPR_TIMER_BEGIN("incoming_stream_compression_algorithm", 0);
+ set_incoming_stream_compression_algorithm(
+ call, decode_stream_compression(b->idx.named.content_encoding->md));
+ GPR_TIMER_END("incoming_stream_compression_algorithm", 0);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_encoding);
+ } else if (b->idx.named.grpc_encoding != NULL) {
GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
set_incoming_compression_algorithm(
call, decode_compression(b->idx.named.grpc_encoding->md));
@@ -974,6 +1117,13 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
}
+ if (b->idx.named.accept_encoding != NULL) {
+ GPR_TIMER_BEGIN("stream_encodings_accepted_by_peer", 0);
+ set_stream_encodings_accepted_by_peer(exec_ctx, call,
+ b->idx.named.accept_encoding->md);
+ grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.accept_encoding);
+ GPR_TIMER_END("stream_encodings_accepted_by_peer", 0);
+ }
publish_app_metadata(call, b, false);
}
@@ -1298,11 +1448,64 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
}
+// The recv_message_ready callback used when sending a batch containing
+// a recv_message op down the filter stack. Yields the call combiner
+// before processing the received message.
+static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *bctlp,
+ grpc_error *error) {
+ batch_control *bctl = bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
+ receiving_stream_ready(exec_ctx, bctlp, error);
+}
+
static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
- /* validate call->incoming_compression_algorithm */
- if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
+ /* validate compression algorithms */
+ if (call->incoming_stream_compression_algorithm !=
+ GRPC_STREAM_COMPRESS_NONE) {
+ const grpc_stream_compression_algorithm algo =
+ call->incoming_stream_compression_algorithm;
+ char *error_msg = NULL;
+ const grpc_compression_options compression_options =
+ grpc_channel_compression_options(call->channel);
+ if (algo >= GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
+ gpr_asprintf(&error_msg,
+ "Invalid stream compression algorithm value '%d'.", algo);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
+ GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ } else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
+ &compression_options, algo) == 0) {
+ /* check if algorithm is supported by current channel config */
+ char *algo_name = NULL;
+ grpc_stream_compression_algorithm_name(algo, &algo_name);
+ gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
+ algo_name);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
+ GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ }
+ gpr_free(error_msg);
+
+ GPR_ASSERT(call->stream_encodings_accepted_by_peer != 0);
+ if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
+ call->incoming_stream_compression_algorithm)) {
+ if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ char *algo_name = NULL;
+ grpc_stream_compression_algorithm_name(
+ call->incoming_stream_compression_algorithm, &algo_name);
+ gpr_log(
+ GPR_ERROR,
+ "Stream compression algorithm (content-encoding = '%s') not "
+ "present in the bitset of accepted encodings (accept-encodings: "
+ "'0x%x')",
+ algo_name, call->stream_encodings_accepted_by_peer);
+ }
+ }
+ } else if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
const grpc_compression_algorithm algo =
call->incoming_compression_algorithm;
char *error_msg = NULL;
@@ -1329,22 +1532,20 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
call->incoming_compression_algorithm = algo;
}
gpr_free(error_msg);
- }
- /* make sure the received grpc-encoding is amongst the ones listed in
- * grpc-accept-encoding */
- GPR_ASSERT(call->encodings_accepted_by_peer != 0);
- if (!GPR_BITGET(call->encodings_accepted_by_peer,
- call->incoming_compression_algorithm)) {
- if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
- grpc_compression_algorithm_name(call->incoming_compression_algorithm,
- &algo_name);
- gpr_log(GPR_ERROR,
- "Compression algorithm (grpc-encoding = '%s') not present in "
- "the bitset of accepted encodings (grpc-accept-encodings: "
- "'0x%x')",
- algo_name, call->encodings_accepted_by_peer);
+ GPR_ASSERT(call->encodings_accepted_by_peer != 0);
+ if (!GPR_BITGET(call->encodings_accepted_by_peer,
+ call->incoming_compression_algorithm)) {
+ if (GRPC_TRACER_ON(grpc_compression_trace)) {
+ char *algo_name = NULL;
+ grpc_compression_algorithm_name(call->incoming_compression_algorithm,
+ &algo_name);
+ gpr_log(GPR_ERROR,
+ "Compression algorithm (grpc-encoding = '%s') not present in "
+ "the bitset of accepted encodings (grpc-accept-encodings: "
+ "'0x%x')",
+ algo_name, call->encodings_accepted_by_peer);
+ }
}
}
}
@@ -1365,6 +1566,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
batch_control *bctl = bctlp;
grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
+ "recv_initial_metadata_ready");
+
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
@@ -1399,7 +1603,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
-
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
@@ -1419,9 +1624,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
- // sent_initial_metadata guards against variable reuse.
- grpc_metadata compression_md;
-
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1469,31 +1671,60 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
/* process compression level */
- memset(&compression_md, 0, sizeof(compression_md));
+ memset(&call->compression_md, 0, sizeof(call->compression_md));
size_t additional_metadata_count = 0;
- grpc_compression_level effective_compression_level;
+ grpc_compression_level effective_compression_level =
+ GRPC_COMPRESS_LEVEL_NONE;
+ grpc_stream_compression_level effective_stream_compression_level =
+ GRPC_STREAM_COMPRESS_LEVEL_NONE;
bool level_set = false;
- if (op->data.send_initial_metadata.maybe_compression_level.is_set) {
+ bool stream_compression = false;
+ if (op->data.send_initial_metadata.maybe_stream_compression_level
+ .is_set) {
+ effective_stream_compression_level =
+ op->data.send_initial_metadata.maybe_stream_compression_level
+ .level;
+ level_set = true;
+ stream_compression = true;
+ } else if (op->data.send_initial_metadata.maybe_compression_level
+ .is_set) {
effective_compression_level =
op->data.send_initial_metadata.maybe_compression_level.level;
level_set = true;
} else {
const grpc_compression_options copts =
grpc_channel_compression_options(call->channel);
- level_set = copts.default_level.is_set;
- if (level_set) {
+ if (copts.default_stream_compression_level.is_set) {
+ level_set = true;
+ effective_stream_compression_level =
+ copts.default_stream_compression_level.level;
+ stream_compression = true;
+ } else if (copts.default_level.is_set) {
+ level_set = true;
effective_compression_level = copts.default_level.level;
}
}
if (level_set && !call->is_client) {
- const grpc_compression_algorithm calgo =
- compression_algorithm_for_level_locked(
- call, effective_compression_level);
- // the following will be picked up by the compress filter and used as
- // the call's compression algorithm.
- compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
- compression_md.value = grpc_compression_algorithm_slice(calgo);
- additional_metadata_count++;
+ if (stream_compression) {
+ const grpc_stream_compression_algorithm calgo =
+ stream_compression_algorithm_for_level_locked(
+ call, effective_stream_compression_level);
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_stream_compression_algorithm_slice(calgo);
+ } else {
+ const grpc_compression_algorithm calgo =
+ compression_algorithm_for_level_locked(
+ call, effective_compression_level);
+ /* the following will be picked up by the compress filter and used
+ * as the call's compression algorithm. */
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_compression_algorithm_slice(calgo);
+ additional_metadata_count++;
+ }
}
if (op->data.send_initial_metadata.count + additional_metadata_count >
@@ -1506,7 +1737,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client,
- &compression_md, (int)additional_metadata_count)) {
+ &call->compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1518,6 +1749,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
op->flags;
+ if (call->is_client) {
+ stream_op_payload->send_initial_metadata.peer_string =
+ &call->peer_string;
+ }
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
@@ -1650,6 +1885,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready;
+ if (!call->is_client) {
+ stream_op_payload->recv_initial_metadata.peer_string =
+ &call->peer_string;
+ }
num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_MESSAGE:
@@ -1666,8 +1905,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream;
- GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
- bctl, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&call->receiving_stream_ready,
+ receiving_stream_ready_in_call_combiner, bctl,
+ grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
@@ -1737,7 +1977,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
- execute_op(exec_ctx, call, stream_op);
+ execute_batch(exec_ctx, call, stream_op, &bctl->start_batch);
done:
GPR_TIMER_END("grpc_call_start_batch", 0);
diff --git a/src/core/lib/surface/call_test_only.h b/src/core/lib/surface/call_test_only.h
index 2f1b80bfd7..a5a01b3679 100644
--- a/src/core/lib/surface/call_test_only.h
+++ b/src/core/lib/surface/call_test_only.h
@@ -42,6 +42,18 @@ uint32_t grpc_call_test_only_get_message_flags(grpc_call *call);
* To be indexed by grpc_compression_algorithm enum values. */
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+/** Returns a bitset for the stream encodings (stream compression algorithms)
+ * supported by \a call's peer.
+ *
+ * To be indexed by grpc_stream_compression_algorithm enum values. */
+uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
+ grpc_call *call);
+
+/** Returns the incoming stream compression algorithm (content-encoding header)
+ * received by a call. */
+grpc_stream_compression_algorithm
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 5780a18ce8..850fbe6a69 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -142,6 +142,16 @@ grpc_channel *grpc_channel_create_with_builder(
GRPC_COMPRESS_LEVEL_NONE,
GRPC_COMPRESS_LEVEL_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
+ channel->compression_options.default_stream_compression_level.is_set =
+ true;
+ channel->compression_options.default_stream_compression_level.level =
+ (grpc_stream_compression_level)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){GRPC_STREAM_COMPRESS_LEVEL_NONE,
+ GRPC_STREAM_COMPRESS_LEVEL_NONE,
+ GRPC_STREAM_COMPRESS_LEVEL_COUNT - 1});
+ } else if (0 == strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
channel->compression_options.default_algorithm.is_set = true;
channel->compression_options.default_algorithm.algorithm =
@@ -149,12 +159,31 @@ grpc_channel *grpc_channel_create_with_builder(
&args->args[i],
(grpc_integer_options){GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
GRPC_COMPRESS_ALGORITHMS_COUNT - 1});
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
+ channel->compression_options.default_stream_compression_algorithm.is_set =
+ true;
+ channel->compression_options.default_stream_compression_algorithm
+ .algorithm =
+ (grpc_stream_compression_algorithm)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ GRPC_STREAM_COMPRESS_NONE, GRPC_STREAM_COMPRESS_NONE,
+ GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT - 1});
} else if (0 ==
strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
channel->compression_options.enabled_algorithms_bitset =
(uint32_t)args->args[i].value.integer |
0x1; /* always support no compression */
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
+ channel->compression_options
+ .enabled_stream_compression_algorithms_bitset =
+ (uint32_t)args->args[i].value.integer |
+ 0x1; /* always support no compression */
}
}
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index c20cfbc740..10e4e5ab0c 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -976,7 +976,6 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
if (cqd->shutdown_called) {
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
- GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cqd->shutdown_called = true;
@@ -1208,7 +1207,6 @@ static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
if (cqd->shutdown_called) {
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)");
- GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
cqd->shutdown_called = true;
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index d199ac060e..75a13d28fc 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -30,6 +30,7 @@
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
@@ -127,6 +128,7 @@ void grpc_init(void) {
grpc_register_tracer(&grpc_trace_channel_stack_builder);
grpc_register_tracer(&grpc_http1_trace);
grpc_register_tracer(&grpc_cq_pluck_trace); // default on
+ grpc_register_tracer(&grpc_call_combiner_trace);
grpc_register_tracer(&grpc_combiner_trace);
grpc_register_tracer(&grpc_server_channel_trace);
grpc_register_tracer(&grpc_bdp_estimator_trace);
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index a0791080a9..6286f9159d 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -40,6 +40,7 @@ namespace grpc_core {
namespace {
struct CallData {
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata;
@@ -52,14 +53,14 @@ struct ChannelData {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) {
- CallData *calld = static_cast<CallData *>(elem->call_data);
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
- ChannelData *chand = static_cast<ChannelData *>(elem->channel_data);
+ ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@@ -79,6 +80,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void lame_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@@ -87,12 +89,8 @@ static void lame_start_transport_stream_op_batch(
op->payload->recv_trailing_metadata.recv_trailing_metadata);
}
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
-}
-
-static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return NULL;
+ exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
+ calld->call_combiner);
}
static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
@@ -122,6 +120,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -156,7 +156,6 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
sizeof(grpc_core::ChannelData),
grpc_core::init_channel_elem,
grpc_core::destroy_channel_elem,
- grpc_core::lame_get_peer,
grpc_core::lame_get_channel_info,
"lame-client",
};
@@ -176,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
- auto chand = static_cast<grpc_core::ChannelData *>(elem->channel_data);
+ auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 66dcc299aa..8582d826ca 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -789,7 +789,6 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
@@ -962,7 +961,6 @@ const grpc_channel_filter grpc_server_top_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server",
};
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 2388f19f81..28f05d5c44 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -40,65 +40,68 @@ static uint8_t g_bytes[] = {
114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99,
45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116,
114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116,
- 45, 116, 121, 112, 101, 103, 114, 112, 99, 45, 105, 110, 116, 101, 114,
- 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, 101,
- 113, 117, 101, 115, 116, 117, 115, 101, 114, 45, 97, 103, 101, 110, 116,
- 104, 111, 115, 116, 108, 98, 45, 116, 111, 107, 101, 110, 103, 114, 112,
- 99, 45, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46, 119,
- 97, 105, 116, 95, 102, 111, 114, 95, 114, 101, 97, 100, 121, 103, 114,
- 112, 99, 46, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46,
- 109, 97, 120, 95, 114, 101, 113, 117, 101, 115, 116, 95, 109, 101, 115,
- 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 103, 114, 112, 99, 46,
- 109, 97, 120, 95, 114, 101, 115, 112, 111, 110, 115, 101, 95, 109, 101,
- 115, 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 47, 103, 114, 112,
- 99, 46, 108, 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108,
- 97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111,
- 97, 100, 48, 49, 50, 105, 100, 101, 110, 116, 105, 116, 121, 103, 122,
- 105, 112, 100, 101, 102, 108, 97, 116, 101, 116, 114, 97, 105, 108, 101,
- 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103,
- 114, 112, 99, 80, 79, 83, 84, 50, 48, 48, 52, 48, 52, 104, 116,
- 116, 112, 104, 116, 116, 112, 115, 103, 114, 112, 99, 71, 69, 84, 80,
- 85, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104, 116, 109, 108, 50,
- 48, 52, 50, 48, 54, 51, 48, 52, 52, 48, 48, 53, 48, 48, 97,
- 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115, 101, 116, 97, 99,
- 99, 101, 112, 116, 45, 101, 110, 99, 111, 100, 105, 110, 103, 103, 122,
- 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101, 97, 99, 99, 101,
- 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 97, 99, 99, 101,
- 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99, 99, 101, 112, 116,
- 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116, 114, 111, 108, 45,
- 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105, 110, 97, 103, 101,
- 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116,
- 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111, 110, 116, 114, 111,
- 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105, 115, 112, 111, 115,
- 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
- 99, 111, 100, 105, 110, 103, 99, 111, 110, 116, 101, 110, 116, 45, 108,
- 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, 116, 45,
- 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, 45, 108,
- 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45,
- 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, 116, 101,
- 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, 105, 114,
- 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, 104, 105,
- 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99,
- 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, 104, 105,
- 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, 111, 100,
- 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, 115, 116,
- 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 98, 45, 99, 111, 115,
- 116, 45, 98, 105, 110, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105,
- 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112,
- 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
- 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105,
- 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101,
- 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121,
- 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116,
- 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116,
- 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105,
- 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111,
- 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45,
- 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 105, 100, 101,
- 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100,
- 101, 110, 116, 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108,
- 97, 116, 101, 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116,
- 121, 44, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
+ 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114,
+ 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115,
+ 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45,
+ 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
+ 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
+ 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
+ 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
+ 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
+ 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
+ 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
+ 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
+ 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
+ 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
+ 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
+ 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
+ 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
+ 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
+ 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
+ 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
+ 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
+ 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102,
+ 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
+ 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
+ 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
+ 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
+ 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
+ 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
+ 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99,
+ 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111,
+ 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101,
+ 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105,
+ 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105,
+ 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105,
+ 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101,
+ 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110,
+ 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111,
+ 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116,
+ 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45,
+ 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97,
+ 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101,
+ 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101,
+ 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115,
+ 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116,
+ 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102,
+ 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121,
+ 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105,
+ 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
+ 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103,
+ 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112,
+ 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
+ 101, 44, 103, 122, 105, 112};
static void static_ref(void *unused) {}
static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
@@ -209,6 +212,7 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
@@ -243,193 +247,194 @@ const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 30}},
+ .data.refcounted = {g_bytes + 170, 16}},
{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 200, 10}},
+ .data.refcounted = {g_bytes + 186, 15}},
{.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 210, 4}},
+ .data.refcounted = {g_bytes + 201, 30}},
{.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 214, 8}},
+ .data.refcounted = {g_bytes + 231, 37}},
{.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 222, 12}},
+ .data.refcounted = {g_bytes + 268, 10}},
{.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}},
+ .data.refcounted = {g_bytes + 278, 4}},
{.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 234, 19}},
+ .data.refcounted = {g_bytes + 282, 8}},
{.refcount = &grpc_static_metadata_refcounts[22],
- .data.refcounted = {g_bytes + 253, 12}},
+ .data.refcounted = {g_bytes + 290, 12}},
{.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 265, 30}},
+ .data.refcounted = {g_bytes + 302, 0}},
{.refcount = &grpc_static_metadata_refcounts[24],
- .data.refcounted = {g_bytes + 295, 31}},
+ .data.refcounted = {g_bytes + 302, 19}},
{.refcount = &grpc_static_metadata_refcounts[25],
- .data.refcounted = {g_bytes + 326, 36}},
+ .data.refcounted = {g_bytes + 321, 12}},
{.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 362, 1}},
+ .data.refcounted = {g_bytes + 333, 30}},
{.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 1}},
+ .data.refcounted = {g_bytes + 363, 31}},
{.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 364, 1}},
+ .data.refcounted = {g_bytes + 394, 36}},
{.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}},
+ .data.refcounted = {g_bytes + 430, 1}},
{.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}},
+ .data.refcounted = {g_bytes + 431, 1}},
{.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}},
+ .data.refcounted = {g_bytes + 432, 1}},
{.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 384, 8}},
+ .data.refcounted = {g_bytes + 433, 8}},
{.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 392, 16}},
+ .data.refcounted = {g_bytes + 441, 4}},
{.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 408, 4}},
+ .data.refcounted = {g_bytes + 445, 7}},
{.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 412, 3}},
+ .data.refcounted = {g_bytes + 452, 8}},
{.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 415, 3}},
+ .data.refcounted = {g_bytes + 460, 16}},
{.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 418, 4}},
+ .data.refcounted = {g_bytes + 476, 4}},
{.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 422, 5}},
+ .data.refcounted = {g_bytes + 480, 3}},
{.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 427, 4}},
+ .data.refcounted = {g_bytes + 483, 3}},
{.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 431, 3}},
+ .data.refcounted = {g_bytes + 486, 4}},
{.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 434, 3}},
+ .data.refcounted = {g_bytes + 490, 5}},
{.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 437, 1}},
+ .data.refcounted = {g_bytes + 495, 4}},
{.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 438, 11}},
+ .data.refcounted = {g_bytes + 499, 3}},
{.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 449, 3}},
+ .data.refcounted = {g_bytes + 502, 3}},
{.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 452, 3}},
+ .data.refcounted = {g_bytes + 505, 1}},
{.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 455, 3}},
+ .data.refcounted = {g_bytes + 506, 11}},
{.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 458, 3}},
+ .data.refcounted = {g_bytes + 517, 3}},
{.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 461, 3}},
+ .data.refcounted = {g_bytes + 520, 3}},
{.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 464, 14}},
+ .data.refcounted = {g_bytes + 523, 3}},
{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
+ .data.refcounted = {g_bytes + 526, 3}},
{.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 493, 13}},
+ .data.refcounted = {g_bytes + 529, 3}},
{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 506, 15}},
+ .data.refcounted = {g_bytes + 532, 14}},
{.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 521, 13}},
+ .data.refcounted = {g_bytes + 546, 13}},
{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 534, 6}},
+ .data.refcounted = {g_bytes + 559, 15}},
{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 540, 27}},
+ .data.refcounted = {g_bytes + 574, 13}},
{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 567, 3}},
+ .data.refcounted = {g_bytes + 587, 6}},
{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 570, 5}},
+ .data.refcounted = {g_bytes + 593, 27}},
{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 575, 13}},
+ .data.refcounted = {g_bytes + 620, 3}},
{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 588, 13}},
+ .data.refcounted = {g_bytes + 623, 5}},
{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 601, 19}},
+ .data.refcounted = {g_bytes + 628, 13}},
{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 620, 16}},
+ .data.refcounted = {g_bytes + 641, 13}},
{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 636, 16}},
+ .data.refcounted = {g_bytes + 654, 19}},
{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 652, 14}},
+ .data.refcounted = {g_bytes + 673, 16}},
{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 666, 16}},
+ .data.refcounted = {g_bytes + 689, 14}},
{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 682, 13}},
+ .data.refcounted = {g_bytes + 703, 16}},
{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 695, 6}},
+ .data.refcounted = {g_bytes + 719, 13}},
{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 701, 4}},
+ .data.refcounted = {g_bytes + 732, 6}},
{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 705, 4}},
+ .data.refcounted = {g_bytes + 738, 4}},
{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 709, 6}},
+ .data.refcounted = {g_bytes + 742, 4}},
{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 715, 7}},
+ .data.refcounted = {g_bytes + 746, 6}},
{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 722, 4}},
+ .data.refcounted = {g_bytes + 752, 7}},
{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 726, 8}},
+ .data.refcounted = {g_bytes + 759, 4}},
{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 734, 17}},
+ .data.refcounted = {g_bytes + 763, 8}},
{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 751, 13}},
+ .data.refcounted = {g_bytes + 771, 17}},
{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 764, 8}},
+ .data.refcounted = {g_bytes + 788, 13}},
{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 772, 19}},
+ .data.refcounted = {g_bytes + 801, 8}},
{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 791, 13}},
+ .data.refcounted = {g_bytes + 809, 19}},
{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 804, 11}},
+ .data.refcounted = {g_bytes + 828, 13}},
{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 815, 4}},
+ .data.refcounted = {g_bytes + 841, 11}},
{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 819, 8}},
+ .data.refcounted = {g_bytes + 852, 4}},
{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 827, 12}},
+ .data.refcounted = {g_bytes + 856, 8}},
{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 18}},
+ .data.refcounted = {g_bytes + 864, 12}},
{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 857, 19}},
+ .data.refcounted = {g_bytes + 876, 18}},
{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 876, 5}},
+ .data.refcounted = {g_bytes + 894, 19}},
{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 881, 7}},
+ .data.refcounted = {g_bytes + 913, 5}},
{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 888, 7}},
+ .data.refcounted = {g_bytes + 918, 7}},
{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 895, 11}},
+ .data.refcounted = {g_bytes + 925, 7}},
{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 906, 6}},
+ .data.refcounted = {g_bytes + 932, 11}},
{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 912, 10}},
+ .data.refcounted = {g_bytes + 943, 6}},
{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 922, 25}},
+ .data.refcounted = {g_bytes + 949, 10}},
{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 947, 17}},
+ .data.refcounted = {g_bytes + 959, 25}},
{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 964, 4}},
+ .data.refcounted = {g_bytes + 984, 17}},
{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 968, 3}},
+ .data.refcounted = {g_bytes + 1001, 4}},
{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 971, 16}},
+ .data.refcounted = {g_bytes + 1005, 3}},
{.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 987, 16}},
+ .data.refcounted = {g_bytes + 1008, 16}},
{.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1003, 13}},
+ .data.refcounted = {g_bytes + 1024, 16}},
{.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1016, 12}},
+ .data.refcounted = {g_bytes + 1040, 13}},
{.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1028, 21}},
+ .data.refcounted = {g_bytes + 1053, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[99],
+ .data.refcounted = {g_bytes + 1065, 21}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
static const int8_t elems_r[] = {
- 10, 8, -3, 0, 9, 21, -77, 22, 0, 10, -7, 0, 0, 0,
- 14, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, -50, -51, 16, -53, -54, -55, -56,
- -56, -57, -58, -59, 0, 37, 36, 35, 34, 33, 32, 31, 30, 29,
- 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
- 14, 13, 12, 11, 10, 13, 12, 11, 10, 9, 8, 7, 0};
+ 11, 9, -3, 0, 10, 27, -74, 28, 0, 14, -7, 0, 0, 0, 18, 8, -2,
+ 0, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, -50, 0, -33, -55, -56, -57, -58, -57, 0, 40, 39, 38, 37, 36, 35, 34,
+ 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 22,
+ 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 42;
- uint32_t x = i % 97;
- uint32_t y = i / 97;
+ i -= 45;
+ uint32_t x = i % 98;
+ uint32_t y = i / 98;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
@@ -439,30 +444,31 @@ static uint32_t elems_phash(uint32_t i) {
}
static const uint16_t elem_keys[] = {
- 1019, 1020, 1021, 242, 243, 244, 245, 246, 139, 140, 42, 43,
- 433, 434, 435, 920, 921, 922, 719, 720, 1406, 527, 721, 1604,
- 1703, 1802, 4871, 4970, 5001, 5168, 5267, 5366, 5465, 1419, 5564, 5663,
- 5762, 5861, 5960, 6059, 6158, 6257, 6356, 6455, 6554, 6653, 6752, 6851,
- 6950, 7049, 7148, 7247, 7346, 7445, 7544, 7643, 7742, 7841, 7940, 8039,
- 8138, 8237, 8336, 8435, 8534, 8633, 1085, 1086, 1087, 1088, 8732, 8831,
- 8930, 9029, 9128, 9227, 9326, 0, 317, 0, 0, 0, 0, 0,
+ 1032, 1033, 1034, 247, 248, 249, 250, 251, 1623, 143, 144, 45,
+ 46, 440, 441, 442, 1523, 1632, 1633, 932, 933, 934, 729, 730,
+ 1423, 1532, 1533, 535, 731, 1923, 2023, 2123, 5223, 5523, 5623, 5723,
+ 5823, 1436, 1653, 5923, 6023, 6123, 6223, 6323, 6423, 6523, 6623, 6723,
+ 6823, 6923, 7023, 7123, 7223, 5423, 7323, 7423, 7523, 7623, 7723, 7823,
+ 7923, 8023, 8123, 8223, 1096, 1097, 1098, 1099, 8323, 8423, 8523, 8623,
+ 8723, 8823, 8923, 9023, 9123, 9223, 9323, 323, 9423, 9523, 1697, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 133, 233, 234, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 137, 238, 239, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0};
+ 0, 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 74, 77, 75, 19, 20, 21, 22, 23, 15, 16, 17, 18, 11, 12, 13,
- 3, 4, 5, 0, 1, 41, 6, 2, 70, 48, 55, 24, 25, 26, 27,
- 28, 29, 30, 7, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42,
- 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59,
- 60, 61, 62, 63, 64, 65, 76, 78, 79, 80, 66, 67, 68, 69, 71,
- 72, 73, 255, 14, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 8, 9, 10};
+ 76, 79, 77, 19, 20, 21, 22, 23, 25, 15, 16, 17, 18, 11,
+ 12, 13, 38, 83, 84, 3, 4, 5, 0, 1, 43, 36, 37, 6,
+ 2, 72, 50, 57, 24, 28, 29, 30, 31, 7, 26, 32, 33, 34,
+ 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 27, 51, 52,
+ 53, 54, 55, 56, 58, 59, 60, 61, 78, 80, 81, 82, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 73, 14, 74, 75, 85, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 8, 9, 10};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 99 + b);
+ uint32_t k = (uint32_t)(a * 100 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
elem_idxs[h] != 255
@@ -474,328 +480,350 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 362, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[29],
+ .data.refcounted = {g_bytes + 430, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[30],
+ .data.refcounted = {g_bytes + 431, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 364, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[31],
+ .data.refcounted = {g_bytes + 432, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}}},
+ {.refcount = &grpc_static_metadata_refcounts[34],
+ .data.refcounted = {g_bytes + 445, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[5],
.data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 384, 8}}},
+ {.refcount = &grpc_static_metadata_refcounts[35],
+ .data.refcounted = {g_bytes + 452, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 392, 16}}},
+ {.refcount = &grpc_static_metadata_refcounts[36],
+ .data.refcounted = {g_bytes + 460, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 408, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[37],
+ .data.refcounted = {g_bytes + 476, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 412, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[38],
+ .data.refcounted = {g_bytes + 480, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 415, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[39],
+ .data.refcounted = {g_bytes + 483, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 418, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[40],
+ .data.refcounted = {g_bytes + 486, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 422, 5}}},
+ {.refcount = &grpc_static_metadata_refcounts[41],
+ .data.refcounted = {g_bytes + 490, 5}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 427, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[42],
+ .data.refcounted = {g_bytes + 495, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[3],
.data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 431, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[43],
+ .data.refcounted = {g_bytes + 499, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 434, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[44],
+ .data.refcounted = {g_bytes + 502, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 437, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[45],
+ .data.refcounted = {g_bytes + 505, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 438, 11}}},
+ {.refcount = &grpc_static_metadata_refcounts[46],
+ .data.refcounted = {g_bytes + 506, 11}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 449, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[47],
+ .data.refcounted = {g_bytes + 517, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 452, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[48],
+ .data.refcounted = {g_bytes + 520, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 455, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[49],
+ .data.refcounted = {g_bytes + 523, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 458, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[50],
+ .data.refcounted = {g_bytes + 526, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 461, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 464, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 478, 15}},
{.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 493, 13}}},
+ .data.refcounted = {g_bytes + 529, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 506, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 521, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 532, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 186, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 186, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[53],
+ .data.refcounted = {g_bytes + 546, 13}}},
{{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 534, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 559, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 540, 27}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 574, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 567, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 587, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 570, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 593, 27}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 575, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 620, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 588, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 623, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 601, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 628, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 620, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 641, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 636, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 654, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[15],
+ .data.refcounted = {g_bytes + 170, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 433, 8}}},
+ {{.refcount = &grpc_static_metadata_refcounts[15],
+ .data.refcounted = {g_bytes + 170, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 441, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[15],
+ .data.refcounted = {g_bytes + 170, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 652, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 673, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 666, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 689, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 682, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 703, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[66],
+ .data.refcounted = {g_bytes + 719, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 695, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 701, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 732, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 705, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 738, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 709, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 742, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 715, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 746, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 722, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 210, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 752, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 726, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 759, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[20],
+ .data.refcounted = {g_bytes + 278, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 734, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 763, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 751, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 771, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 764, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 788, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 772, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 801, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 791, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 214, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 809, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 804, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 828, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[21],
+ .data.refcounted = {g_bytes + 282, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 815, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 841, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 819, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 852, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 827, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 856, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 18}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 864, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 857, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 876, 18}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 876, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 894, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 881, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 913, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 888, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 918, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 895, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 925, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 906, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 932, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 912, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 943, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 922, 25}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 949, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 947, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 200, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 959, 25}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 964, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 984, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 268, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 968, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
+ .data.refcounted = {g_bytes + 1001, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 971, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 234, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 365, 8}}},
+ .data.refcounted = {g_bytes + 1005, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[95],
+ .data.refcounted = {g_bytes + 1008, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[23],
+ .data.refcounted = {g_bytes + 302, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 377, 7}}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 433, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 987, 16}}},
+ {.refcount = &grpc_static_metadata_refcounts[34],
+ .data.refcounted = {g_bytes + 445, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 373, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[96],
+ .data.refcounted = {g_bytes + 1024, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1003, 13}}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 441, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1016, 12}}},
+ .data.refcounted = {g_bytes + 1040, 13}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
{.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1028, 21}}},
+ .data.refcounted = {g_bytes + 1053, 12}}},
+ {{.refcount = &grpc_static_metadata_refcounts[10],
+ .data.refcounted = {g_bytes + 90, 20}},
+ {.refcount = &grpc_static_metadata_refcounts[99],
+ .data.refcounted = {g_bytes + 1065, 21}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 186, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 433, 8}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 186, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 441, 4}}},
+ {{.refcount = &grpc_static_metadata_refcounts[16],
+ .data.refcounted = {g_bytes + 186, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[97],
+ .data.refcounted = {g_bytes + 1040, 13}}},
};
-const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 74, 75, 76,
- 77, 78, 79, 80};
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
+ 79, 80, 81, 82};
+
+const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index baa86de142..93ab90dff8 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -29,7 +29,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 99
+#define GRPC_STATIC_MDSTR_COUNT 100
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -61,178 +61,181 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_GRPC_TRACE_BIN (grpc_static_slice_table[13])
/* "content-type" */
#define GRPC_MDSTR_CONTENT_TYPE (grpc_static_slice_table[14])
+/* "content-encoding" */
+#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[15])
+/* "accept-encoding" */
+#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[16])
/* "grpc-internal-encoding-request" */
-#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (grpc_static_slice_table[15])
+#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (grpc_static_slice_table[17])
+/* "grpc-internal-stream-encoding-request" */
+#define GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST \
+ (grpc_static_slice_table[18])
/* "user-agent" */
-#define GRPC_MDSTR_USER_AGENT (grpc_static_slice_table[16])
+#define GRPC_MDSTR_USER_AGENT (grpc_static_slice_table[19])
/* "host" */
-#define GRPC_MDSTR_HOST (grpc_static_slice_table[17])
+#define GRPC_MDSTR_HOST (grpc_static_slice_table[20])
/* "lb-token" */
-#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[18])
+#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[21])
/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[19])
+#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[22])
/* "" */
-#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[20])
+#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[23])
/* "grpc.wait_for_ready" */
-#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[21])
+#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[24])
/* "grpc.timeout" */
-#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[22])
+#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[25])
/* "grpc.max_request_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \
- (grpc_static_slice_table[23])
+ (grpc_static_slice_table[26])
/* "grpc.max_response_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \
- (grpc_static_slice_table[24])
+ (grpc_static_slice_table[27])
/* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \
- (grpc_static_slice_table[25])
+ (grpc_static_slice_table[28])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[26])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[29])
/* "1" */
-#define GRPC_MDSTR_1 (grpc_static_slice_table[27])
+#define GRPC_MDSTR_1 (grpc_static_slice_table[30])
/* "2" */
-#define GRPC_MDSTR_2 (grpc_static_slice_table[28])
+#define GRPC_MDSTR_2 (grpc_static_slice_table[31])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[29])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[32])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[30])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[33])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[31])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[34])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[32])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[35])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[33])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[36])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[34])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[37])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[35])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[38])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[36])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[39])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[37])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[40])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[38])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[41])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[39])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[42])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[43])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[41])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[44])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[45])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[46])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[44])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[47])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[45])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[48])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[46])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[49])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[47])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[50])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[51])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[49])
-/* "accept-encoding" */
-#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[50])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[52])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[51])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[53])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[52])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[54])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[53])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[55])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[54])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[56])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[57])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[56])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[58])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[59])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[58])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[60])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[59])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[61])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[60])
-/* "content-encoding" */
-#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[61])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[62])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[63])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[64])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[65])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[66])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[66])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[67])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[68])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[68])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[69])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[69])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[70])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[70])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[71])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[71])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[72])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[72])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[73])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[73])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[74])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[75])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[76])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[77])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[77])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[78])
/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[78])
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[79])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[80])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[80])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[81])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[82])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[83])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[84])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[85])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[86])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[88])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[89])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[90])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[91])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[92])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[93])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[93])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[94])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[94])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[95])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[96])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[96])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[97])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[97])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[98])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[98])
+ (grpc_static_slice_table[99])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
@@ -244,7 +247,7 @@ extern grpc_slice_refcount
#define GRPC_STATIC_METADATA_INDEX(static_slice) \
((int)((static_slice).refcount - grpc_static_metadata_refcounts))
-#define GRPC_STATIC_MDELEM_COUNT 81
+#define GRPC_STATIC_MDELEM_COUNT 86
extern grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "grpc-status": "0" */
@@ -355,141 +358,156 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "content-disposition": "" */
#define GRPC_MDELEM_CONTENT_DISPOSITION_EMPTY \
(GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[35], GRPC_MDELEM_STORAGE_STATIC))
+/* "content-encoding": "identity" */
+#define GRPC_MDELEM_CONTENT_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[36], GRPC_MDELEM_STORAGE_STATIC))
+/* "content-encoding": "gzip" */
+#define GRPC_MDELEM_CONTENT_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[37], GRPC_MDELEM_STORAGE_STATIC))
/* "content-encoding": "" */
#define GRPC_MDELEM_CONTENT_ENCODING_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[36], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[38], GRPC_MDELEM_STORAGE_STATIC))
/* "content-language": "" */
#define GRPC_MDELEM_CONTENT_LANGUAGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[37], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[39], GRPC_MDELEM_STORAGE_STATIC))
/* "content-length": "" */
#define GRPC_MDELEM_CONTENT_LENGTH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[38], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[40], GRPC_MDELEM_STORAGE_STATIC))
/* "content-location": "" */
#define GRPC_MDELEM_CONTENT_LOCATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[39], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[41], GRPC_MDELEM_STORAGE_STATIC))
/* "content-range": "" */
#define GRPC_MDELEM_CONTENT_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[40], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[42], GRPC_MDELEM_STORAGE_STATIC))
/* "content-type": "" */
#define GRPC_MDELEM_CONTENT_TYPE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[41], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[43], GRPC_MDELEM_STORAGE_STATIC))
/* "cookie": "" */
#define GRPC_MDELEM_COOKIE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[42], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[44], GRPC_MDELEM_STORAGE_STATIC))
/* "date": "" */
#define GRPC_MDELEM_DATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[43], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[45], GRPC_MDELEM_STORAGE_STATIC))
/* "etag": "" */
#define GRPC_MDELEM_ETAG_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[44], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[46], GRPC_MDELEM_STORAGE_STATIC))
/* "expect": "" */
#define GRPC_MDELEM_EXPECT_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[45], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[47], GRPC_MDELEM_STORAGE_STATIC))
/* "expires": "" */
#define GRPC_MDELEM_EXPIRES_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[46], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[48], GRPC_MDELEM_STORAGE_STATIC))
/* "from": "" */
#define GRPC_MDELEM_FROM_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[47], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[49], GRPC_MDELEM_STORAGE_STATIC))
/* "host": "" */
#define GRPC_MDELEM_HOST_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[48], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[50], GRPC_MDELEM_STORAGE_STATIC))
/* "if-match": "" */
#define GRPC_MDELEM_IF_MATCH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[49], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[51], GRPC_MDELEM_STORAGE_STATIC))
/* "if-modified-since": "" */
#define GRPC_MDELEM_IF_MODIFIED_SINCE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[50], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[52], GRPC_MDELEM_STORAGE_STATIC))
/* "if-none-match": "" */
#define GRPC_MDELEM_IF_NONE_MATCH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[51], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[53], GRPC_MDELEM_STORAGE_STATIC))
/* "if-range": "" */
#define GRPC_MDELEM_IF_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[52], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[54], GRPC_MDELEM_STORAGE_STATIC))
/* "if-unmodified-since": "" */
#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[53], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
/* "last-modified": "" */
#define GRPC_MDELEM_LAST_MODIFIED_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[54], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
/* "lb-token": "" */
#define GRPC_MDELEM_LB_TOKEN_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
/* "lb-cost-bin": "" */
#define GRPC_MDELEM_LB_COST_BIN_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
/* "link": "" */
#define GRPC_MDELEM_LINK_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
/* "location": "" */
#define GRPC_MDELEM_LOCATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
/* "max-forwards": "" */
#define GRPC_MDELEM_MAX_FORWARDS_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authenticate": "" */
#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authorization": "" */
#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
/* "range": "" */
#define GRPC_MDELEM_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
/* "referer": "" */
#define GRPC_MDELEM_REFERER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
/* "refresh": "" */
#define GRPC_MDELEM_REFRESH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
/* "retry-after": "" */
#define GRPC_MDELEM_RETRY_AFTER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
/* "server": "" */
#define GRPC_MDELEM_SERVER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
/* "set-cookie": "" */
#define GRPC_MDELEM_SET_COOKIE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
/* "strict-transport-security": "" */
#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
/* "transfer-encoding": "" */
#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
/* "user-agent": "" */
#define GRPC_MDELEM_USER_AGENT_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
/* "vary": "" */
#define GRPC_MDELEM_VARY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
/* "via": "" */
#define GRPC_MDELEM_VIA_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
/* "www-authenticate": "" */
#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[81], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[82], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "identity" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_IDENTITY \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[83], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "gzip" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[84], GRPC_MDELEM_STORAGE_STATIC))
+/* "accept-encoding": "identity,gzip" */
+#define GRPC_MDELEM_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[85], GRPC_MDELEM_STORAGE_STATIC))
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);
typedef enum {
@@ -508,7 +526,10 @@ typedef enum {
GRPC_BATCH_GRPC_TAGS_BIN,
GRPC_BATCH_GRPC_TRACE_BIN,
GRPC_BATCH_CONTENT_TYPE,
+ GRPC_BATCH_CONTENT_ENCODING,
+ GRPC_BATCH_ACCEPT_ENCODING,
GRPC_BATCH_GRPC_INTERNAL_ENCODING_REQUEST,
+ GRPC_BATCH_GRPC_INTERNAL_STREAM_ENCODING_REQUEST,
GRPC_BATCH_USER_AGENT,
GRPC_BATCH_HOST,
GRPC_BATCH_LB_TOKEN,
@@ -533,7 +554,10 @@ typedef union {
struct grpc_linked_mdelem *grpc_tags_bin;
struct grpc_linked_mdelem *grpc_trace_bin;
struct grpc_linked_mdelem *content_type;
+ struct grpc_linked_mdelem *content_encoding;
+ struct grpc_linked_mdelem *accept_encoding;
struct grpc_linked_mdelem *grpc_internal_encoding_request;
+ struct grpc_linked_mdelem *grpc_internal_stream_encoding_request;
struct grpc_linked_mdelem *user_agent;
struct grpc_linked_mdelem *host;
struct grpc_linked_mdelem *lb_token;
@@ -552,4 +576,10 @@ extern const uint8_t grpc_static_accept_encoding_metadata[8];
(GRPC_MAKE_MDELEM( \
&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]], \
GRPC_MDELEM_STORAGE_STATIC))
+
+extern const uint8_t grpc_static_accept_stream_encoding_metadata[4];
+#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) \
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table \
+ [grpc_static_accept_stream_encoding_metadata[(algs)]], \
+ GRPC_MDELEM_STORAGE_STATIC))
#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 6c61f4b8d9..650b0559aa 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -197,11 +197,6 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
then_schedule_closure);
}
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
- return transport->vtable->get_peer(exec_ctx, transport);
-}
-
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
@@ -214,24 +209,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// is a function that must always unref cancel_error
// though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure
-
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
- grpc_error *error) {
+ grpc_error *error, grpc_call_combiner *call_combiner) {
if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message);
}
if (batch->recv_message) {
- GRPC_CLOSURE_SCHED(exec_ctx,
- batch->payload->recv_message.recv_message_ready,
- GRPC_ERROR_REF(error));
+ GRPC_CALL_COMBINER_START(exec_ctx, call_combiner,
+ batch->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_REF(error),
+ "failing recv_message_ready");
}
if (batch->recv_initial_metadata) {
- GRPC_CLOSURE_SCHED(
- exec_ctx,
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, call_combiner,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
- GRPC_ERROR_REF(error));
+ GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready");
}
GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
if (batch->cancel_stream) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 099138ea14..fbf5dcb8b5 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -22,6 +22,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
@@ -152,6 +153,9 @@ struct grpc_transport_stream_op_batch_payload {
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} send_initial_metadata;
struct {
@@ -176,6 +180,9 @@ struct grpc_transport_stream_op_batch_payload {
// immediately available. This may be a signal that we received a
// Trailers-Only response.
bool *trailing_metadata_available;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} recv_initial_metadata;
struct {
@@ -293,7 +300,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_error *error);
+ grpc_error *error, grpc_call_combiner *call_combiner);
char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op);
@@ -332,10 +339,6 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
-/* Get the transports peer */
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport);
-
/* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index fc772c6dd1..bbae69c223 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -59,9 +59,6 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
- /* implementation of grpc_transport_get_peer */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
-
/* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 7b18229ba6..409a6c4103 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -112,6 +112,13 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec_add(&b, tmp);
}
+ if (op->collect_stats) {
+ gpr_strvec_add(&b, gpr_strdup(" "));
+ gpr_asprintf(&tmp, "COLLECT_STATS:%p",
+ op->payload->collect_stats.collect_stats);
+ gpr_strvec_add(&b, tmp);
+ }
+
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);