aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/gen/README.md4
-rw-r--r--src/core/ext/census/gen/trace_context.pb.c81
-rw-r--r--src/core/ext/census/gen/trace_context.pb.h99
-rw-r--r--src/core/ext/client_config/client_channel.c470
-rw-r--r--src/core/ext/client_config/subchannel_call_holder.c296
-rw-r--r--src/core/ext/client_config/subchannel_call_holder.h99
-rw-r--r--src/core/lib/channel/http_client_filter.c177
-rw-r--r--src/core/lib/channel/http_client_filter.h3
-rw-r--r--src/core/lib/channel/http_server_filter.c79
-rw-r--r--src/core/lib/security/context/security_context.c6
-rw-r--r--src/core/lib/security/context/security_context.h12
-rw-r--r--src/core/lib/surface/server.c11
-rw-r--r--src/core/lib/transport/static_metadata.c15
-rw-r--r--src/core/lib/transport/static_metadata.h98
-rw-r--r--src/core/lib/transport/transport.h1
15 files changed, 877 insertions, 574 deletions
diff --git a/src/core/ext/census/gen/README.md b/src/core/ext/census/gen/README.md
index 72bef6542d..fdbac1084c 100644
--- a/src/core/ext/census/gen/README.md
+++ b/src/core/ext/census/gen/README.md
@@ -4,3 +4,7 @@ Files generated for use by Census stats and trace recording subsystem.
* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
$PWD/src/core/ext/census/gen src/core/ext/census/gen`
+* trace_context.pb.{h,c} - Generated from
+ src/core/ext/census/trace_context.proto, using the script
+ `tools/codegen/core/gen_nano_proto.sh src/proto/census/trace_context.proto
+ $PWD/src/core/ext/census/gen src/core/ext/census/gen`
diff --git a/src/core/ext/census/gen/trace_context.pb.c b/src/core/ext/census/gen/trace_context.pb.c
new file mode 100644
index 0000000000..c8aea324ce
--- /dev/null
+++ b/src/core/ext/census/gen/trace_context.pb.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb constant definitions */
+/* Generated by nanopb-0.3.5-dev */
+
+#include "src/core/ext/census/gen/trace_context.pb.h"
+
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+
+
+const pb_field_t google_trace_TraceId_fields[3] = {
+ PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceId, hi, hi, 0),
+ PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceId, lo, hi, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_trace_TraceContext_fields[4] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id, trace_id, &google_trace_TraceId_fields),
+ PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id, 0),
+ PB_FIELD( 3, BOOL , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, is_sampled, span_id, 0),
+ PB_LAST_FIELD
+};
+
+
+/* Check that field information fits in pb_field_t */
+#if !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_32BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in 8 or 16 bit
+ * field descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(google_trace_TraceContext, trace_id) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_trace_TraceId_google_trace_TraceContext)
+#endif
+
+#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_16BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in the default
+ * 8 bit descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(google_trace_TraceContext, trace_id) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_trace_TraceId_google_trace_TraceContext)
+#endif
+
+
diff --git a/src/core/ext/census/gen/trace_context.pb.h b/src/core/ext/census/gen/trace_context.pb.h
new file mode 100644
index 0000000000..263c4c58cb
--- /dev/null
+++ b/src/core/ext/census/gen/trace_context.pb.h
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb header */
+/* Generated by nanopb-0.3.5-dev */
+
+#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
+#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
+#include "third_party/nanopb/pb.h"
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Struct definitions */
+typedef struct _google_trace_TraceId {
+ bool has_hi;
+ uint64_t hi;
+ bool has_lo;
+ uint64_t lo;
+} google_trace_TraceId;
+
+typedef struct _google_trace_TraceContext {
+ bool has_trace_id;
+ google_trace_TraceId trace_id;
+ bool has_span_id;
+ uint64_t span_id;
+ bool has_is_sampled;
+ bool is_sampled;
+} google_trace_TraceContext;
+
+/* Default values for struct fields */
+
+/* Initializer values for message structs */
+#define google_trace_TraceId_init_default {false, 0, false, 0}
+#define google_trace_TraceContext_init_default {false, google_trace_TraceId_init_default, false, 0, false, 0}
+#define google_trace_TraceId_init_zero {false, 0, false, 0}
+#define google_trace_TraceContext_init_zero {false, google_trace_TraceId_init_zero, false, 0, false, 0}
+
+/* Field tags (for use in manual encoding/decoding) */
+#define google_trace_TraceId_hi_tag 1
+#define google_trace_TraceId_lo_tag 2
+#define google_trace_TraceContext_trace_id_tag 1
+#define google_trace_TraceContext_span_id_tag 2
+#define google_trace_TraceContext_is_sampled_tag 3
+
+/* Struct field encoding specification for nanopb */
+extern const pb_field_t google_trace_TraceId_fields[3];
+extern const pb_field_t google_trace_TraceContext_fields[4];
+
+/* Maximum encoded size of messages (where known) */
+#define google_trace_TraceId_size 18
+#define google_trace_TraceContext_size 31
+
+/* Message IDs (where set with "msgid" option) */
+#ifdef PB_MSGID
+
+#define TRACE_CONTEXT_MESSAGES \
+
+
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c
index 566d3d5ce4..be333f4e0d 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_config/client_channel.c
@@ -33,6 +33,7 @@
#include "src/core/ext/client_config/client_channel.h"
+#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -41,10 +42,11 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-#include "src/core/ext/client_config/subchannel_call_holder.h"
+#include "src/core/ext/client_config/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
@@ -52,13 +54,15 @@
/* Client channel implementation */
-typedef grpc_subchannel_call_holder call_data;
+/*************************************************************************
+ * CHANNEL-WIDE FUNCTIONS
+ */
typedef struct client_channel_channel_data {
/** resolver for this channel */
grpc_resolver *resolver;
/** have we started resolving this channel */
- int started_resolving;
+ bool started_resolving;
/** mutex protecting client configuration, including all
variables below in this data structure */
@@ -74,7 +78,7 @@ typedef struct client_channel_channel_data {
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
- int exit_idle_when_lb_policy_arrives;
+ bool exit_idle_when_lb_policy_arrives;
/** owning stack */
grpc_channel_stack *owning_stack;
/** interested parties (owned) */
@@ -82,10 +86,8 @@ typedef struct client_channel_channel_data {
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
- resolver,
- to watch for state changes from the lb_policy. When a state change is seen,
- we
- update the channel, and create a new watcher */
+ resolver, to watch for state changes from the lb_policy. When a state
+ change is seen, we update the channel, and create a new watcher. */
typedef struct {
channel_data *chand;
grpc_closure on_changed;
@@ -93,22 +95,6 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
-typedef struct {
- grpc_closure closure;
- grpc_call_element *elem;
-} waiting_call;
-
-static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
-}
-
-static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
-}
-
static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state);
@@ -177,13 +163,13 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
-static void cc_on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- int exit_idle = 0;
+ bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
if (chand->resolver_result != NULL) {
@@ -221,8 +207,8 @@ static void cc_on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
- exit_idle = 1;
- chand->exit_idle_when_lb_policy_arrives = 0;
+ exit_idle = true;
+ chand->exit_idle_when_lb_policy_arrives = false;
}
if (error == GRPC_ERROR_NONE && chand->resolver) {
@@ -330,6 +316,192 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&chand->mu);
}
+/* Constructor for channel_data */
+static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ channel_data *chand = elem->channel_data;
+
+ memset(chand, 0, sizeof(*chand));
+
+ GPR_ASSERT(args->is_last);
+ GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
+
+ gpr_mu_init(&chand->mu);
+ grpc_closure_init(&chand->on_resolver_result_changed,
+ on_resolver_result_changed, chand);
+ chand->owning_stack = args->channel_stack;
+
+ grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
+ "client_channel");
+ chand->interested_parties = grpc_pollset_set_create();
+}
+
+/* Destructor for channel_data */
+static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
+
+ if (chand->resolver != NULL) {
+ grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+ }
+ if (chand->lb_policy != NULL) {
+ grpc_pollset_set_del_pollset_set(exec_ctx,
+ chand->lb_policy->interested_parties,
+ chand->interested_parties);
+ GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+ }
+ grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
+ grpc_pollset_set_destroy(chand->interested_parties);
+ gpr_mu_destroy(&chand->mu);
+}
+
+/*************************************************************************
+ * PER-CALL FUNCTIONS
+ */
+
+#define GET_CALL(call_data) \
+ ((grpc_subchannel_call *)(gpr_atm_acq_load(&(call_data)->subchannel_call)))
+
+#define CANCELLED_CALL ((grpc_subchannel_call *)1)
+
+typedef enum {
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
+} subchannel_creation_phase;
+
+/** Call data. Holds a pointer to grpc_subchannel_call and the
+ associated machinery to create such a pointer.
+ Handles queueing of stream ops until a call object is ready, waiting
+ for initial metadata before trying to create a call object,
+ and handling cancellation gracefully. */
+typedef struct client_channel_call_data {
+ /** either 0 for no call, 1 for cancelled, or a pointer to a
+ grpc_subchannel_call */
+ gpr_atm subchannel_call;
+
+ gpr_mu mu;
+
+ subchannel_creation_phase creation_phase;
+ grpc_connected_subchannel *connected_subchannel;
+ grpc_polling_entity *pollent;
+
+ grpc_transport_stream_op **waiting_ops;
+ size_t waiting_ops_count;
+ size_t waiting_ops_capacity;
+
+ grpc_closure next_step;
+
+ grpc_call_stack *owning_call;
+} call_data;
+
+static void add_waiting_locked(call_data *calld, grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("add_waiting_locked", 0);
+ if (calld->waiting_ops_count == calld->waiting_ops_capacity) {
+ calld->waiting_ops_capacity = GPR_MAX(3, 2 * calld->waiting_ops_capacity);
+ calld->waiting_ops =
+ gpr_realloc(calld->waiting_ops,
+ calld->waiting_ops_capacity * sizeof(*calld->waiting_ops));
+ }
+ calld->waiting_ops[calld->waiting_ops_count++] = op;
+ GPR_TIMER_END("add_waiting_locked", 0);
+}
+
+static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
+ grpc_error *error) {
+ size_t i;
+ for (i = 0; i < calld->waiting_ops_count; i++) {
+ grpc_transport_stream_op_finish_with_failure(
+ exec_ctx, calld->waiting_ops[i], GRPC_ERROR_REF(error));
+ }
+ calld->waiting_ops_count = 0;
+ GRPC_ERROR_UNREF(error);
+}
+
+typedef struct {
+ grpc_transport_stream_op **ops;
+ size_t nops;
+ grpc_subchannel_call *call;
+} retry_ops_args;
+
+static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
+ retry_ops_args *a = args;
+ size_t i;
+ for (i = 0; i < a->nops; i++) {
+ grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
+ gpr_free(a->ops);
+ gpr_free(a);
+}
+
+static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
+ if (calld->waiting_ops_count == 0) {
+ return;
+ }
+
+ retry_ops_args *a = gpr_malloc(sizeof(*a));
+ a->ops = calld->waiting_ops;
+ a->nops = calld->waiting_ops_count;
+ a->call = GET_CALL(calld);
+ if (a->call == CANCELLED_CALL) {
+ gpr_free(a);
+ fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
+ return;
+ }
+ calld->waiting_ops = NULL;
+ calld->waiting_ops_count = 0;
+ calld->waiting_ops_capacity = 0;
+ GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
+ grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
+ GRPC_ERROR_NONE, NULL);
+}
+
+static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ call_data *calld = arg;
+ gpr_mu_lock(&calld->mu);
+ GPR_ASSERT(calld->creation_phase ==
+ GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
+ calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ if (calld->connected_subchannel == NULL) {
+ gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
+ fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
+ "Failed to create subchannel", &error, 1));
+ } else if (1 == gpr_atm_acq_load(&calld->subchannel_call)) {
+ /* already cancelled before subchannel became ready */
+ fail_locked(exec_ctx, calld,
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Cancelled before creating subchannel", &error, 1));
+ } else {
+ grpc_subchannel_call *subchannel_call = NULL;
+ grpc_error *new_error = grpc_connected_subchannel_create_call(
+ exec_ctx, calld->connected_subchannel, calld->pollent,
+ &subchannel_call);
+ if (new_error != GRPC_ERROR_NONE) {
+ new_error = grpc_error_add_child(new_error, error);
+ subchannel_call = CANCELLED_CALL;
+ fail_locked(exec_ctx, calld, new_error);
+ }
+ gpr_atm_rel_store(&calld->subchannel_call,
+ (gpr_atm)(uintptr_t)subchannel_call);
+ retry_waiting_locked(exec_ctx, calld);
+ }
+ gpr_mu_unlock(&calld->mu);
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+}
+
+static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ grpc_subchannel_call *subchannel_call = GET_CALL(calld);
+ if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
+ return NULL;
+ } else {
+ return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
+ }
+}
+
typedef struct {
grpc_metadata_batch *initial_metadata;
uint32_t initial_metadata_flags;
@@ -339,11 +511,11 @@ typedef struct {
grpc_closure closure;
} continue_picking_args;
-static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_metadata_batch *initial_metadata,
- uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_closure *on_ready);
+static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_metadata_batch *initial_metadata,
+ uint32_t initial_metadata_flags,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
@@ -352,22 +524,21 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
- } else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags,
- cpa->connected_subchannel, cpa->on_ready)) {
+ } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->initial_metadata_flags,
+ cpa->connected_subchannel, cpa->on_ready)) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
}
gpr_free(cpa);
}
-static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
- grpc_metadata_batch *initial_metadata,
- uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_closure *on_ready) {
- GPR_TIMER_BEGIN("cc_pick_subchannel", 0);
+static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_metadata_batch *initial_metadata,
+ uint32_t initial_metadata_flags,
+ grpc_connected_subchannel **connected_subchannel,
+ grpc_closure *on_ready) {
+ GPR_TIMER_BEGIN("pick_subchannel", 0);
- grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
continue_picking_args *cpa;
@@ -391,23 +562,23 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
}
}
gpr_mu_unlock(&chand->mu);
- GPR_TIMER_END("cc_pick_subchannel", 0);
- return 1;
+ GPR_TIMER_END("pick_subchannel", 0);
+ return true;
}
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
int r;
- GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
+ GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollent,
initial_metadata, initial_metadata_flags,
connected_subchannel, on_ready);
- GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
- GPR_TIMER_END("cc_pick_subchannel", 0);
+ GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
+ GPR_TIMER_END("pick_subchannel", 0);
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {
- chand->started_resolving = 1;
+ chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
@@ -428,66 +599,143 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
}
gpr_mu_unlock(&chand->mu);
- GPR_TIMER_END("cc_pick_subchannel", 0);
- return 0;
+ GPR_TIMER_END("pick_subchannel", 0);
+ return false;
+}
+
+// The logic here is fairly complicated, due to (a) the fact that we
+// need to handle the case where we receive the send op before the
+// initial metadata op, and (b) the need for efficiency, especially in
+// the streaming case.
+// TODO(ctiller): Explain this more thoroughly.
+static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ /* try to (atomically) get the call */
+ grpc_subchannel_call *call = GET_CALL(calld);
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
+ if (call == CANCELLED_CALL) {
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
+ GRPC_ERROR_CANCELLED);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ return;
+ }
+ /* we failed; lock and figure out what to do */
+ gpr_mu_lock(&calld->mu);
+retry:
+ /* need to recheck that another thread hasn't set the call */
+ call = GET_CALL(calld);
+ if (call == CANCELLED_CALL) {
+ gpr_mu_unlock(&calld->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
+ GRPC_ERROR_CANCELLED);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ return;
+ }
+ if (call != NULL) {
+ gpr_mu_unlock(&calld->mu);
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ return;
+ }
+ /* if this is a cancellation, then we can raise our cancelled flag */
+ if (op->cancel_error != GRPC_ERROR_NONE) {
+ if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
+ (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
+ goto retry;
+ } else {
+ switch (calld->creation_phase) {
+ case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
+ fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
+ break;
+ case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
+ pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
+ NULL);
+ break;
+ }
+ gpr_mu_unlock(&calld->mu);
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
+ GRPC_ERROR_CANCELLED);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ return;
+ }
+ }
+ /* if we don't have a subchannel, try to get one */
+ if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ calld->connected_subchannel == NULL &&
+ op->send_initial_metadata != NULL) {
+ calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
+ grpc_closure_init(&calld->next_step, subchannel_ready, calld);
+ GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
+ if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
+ op->send_initial_metadata_flags,
+ &calld->connected_subchannel, &calld->next_step)) {
+ calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ }
+ }
+ /* if we've got a subchannel, then let's ask it to create a call */
+ if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
+ calld->connected_subchannel != NULL) {
+ grpc_subchannel_call *subchannel_call = NULL;
+ grpc_error *error = grpc_connected_subchannel_create_call(
+ exec_ctx, calld->connected_subchannel, calld->pollent,
+ &subchannel_call);
+ if (error != GRPC_ERROR_NONE) {
+ subchannel_call = CANCELLED_CALL;
+ fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
+ grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ }
+ gpr_atm_rel_store(&calld->subchannel_call,
+ (gpr_atm)(uintptr_t)subchannel_call);
+ retry_waiting_locked(exec_ctx, calld);
+ goto retry;
+ }
+ /* nothing to be done but wait */
+ add_waiting_locked(calld, op);
+ gpr_mu_unlock(&calld->mu);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_call_element_args *args) {
- grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
- args->call_stack);
+static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ call_data *calld = elem->call_data;
+ gpr_atm_rel_store(&calld->subchannel_call, 0);
+ gpr_mu_init(&calld->mu);
+ calld->connected_subchannel = NULL;
+ calld->waiting_ops = NULL;
+ calld->waiting_ops_count = 0;
+ calld->waiting_ops_capacity = 0;
+ calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ calld->owning_call = args->call_stack;
+ calld->pollent = NULL;
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- void *and_free_memory) {
- grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
- gpr_free(and_free_memory);
-}
-
-/* Constructor for channel_data */
-static void init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
-
- memset(chand, 0, sizeof(*chand));
-
- GPR_ASSERT(args->is_last);
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
-
- gpr_mu_init(&chand->mu);
- grpc_closure_init(&chand->on_resolver_result_changed,
- cc_on_resolver_result_changed, chand);
- chand->owning_stack = args->channel_stack;
-
- grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
- "client_channel");
- chand->interested_parties = grpc_pollset_set_create();
-}
-
-/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
-
- if (chand->resolver != NULL) {
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
- GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
- }
- if (chand->lb_policy != NULL) {
- grpc_pollset_set_del_pollset_set(exec_ctx,
- chand->lb_policy->interested_parties,
- chand->interested_parties);
- GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_call_final_info *final_info,
+ void *and_free_memory) {
+ call_data *calld = elem->call_data;
+ grpc_subchannel_call *call = GET_CALL(calld);
+ if (call != NULL && call != CANCELLED_CALL) {
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
- grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
- grpc_pollset_set_destroy(chand->interested_parties);
- gpr_mu_destroy(&chand->mu);
+ GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
+ gpr_mu_destroy(&calld->mu);
+ GPR_ASSERT(calld->waiting_ops_count == 0);
+ gpr_free(calld->waiting_ops);
+ gpr_free(and_free_memory);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -497,16 +745,20 @@ static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
calld->pollent = pollent;
}
+/*************************************************************************
+ * EXPORTED SYMBOLS
+ */
+
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op,
cc_start_transport_op,
sizeof(call_data),
- init_call_elem,
+ cc_init_call_elem,
cc_set_pollset_or_pollset_set,
- destroy_call_elem,
+ cc_destroy_call_elem,
sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
+ cc_init_channel_elem,
+ cc_destroy_channel_elem,
cc_get_peer,
"client-channel",
};
@@ -523,7 +775,7 @@ void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_REF(resolver, "channel");
if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
- chand->started_resolving = 1;
+ chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
@@ -541,10 +793,10 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
} else {
- chand->exit_idle_when_lb_policy_arrives = 1;
+ chand->exit_idle_when_lb_policy_arrives = true;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- chand->started_resolving = 1;
+ chand->started_resolving = true;
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
&chand->on_resolver_result_changed);
}
diff --git a/src/core/ext/client_config/subchannel_call_holder.c b/src/core/ext/client_config/subchannel_call_holder.c
deleted file mode 100644
index cda939b4a0..0000000000
--- a/src/core/ext/client_config/subchannel_call_holder.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/ext/client_config/subchannel_call_holder.h"
-
-#include <grpc/support/alloc.h>
-
-#include "src/core/lib/profiling/timers.h"
-
-#define GET_CALL(holder) \
- ((grpc_subchannel_call *)(gpr_atm_acq_load(&(holder)->subchannel_call)))
-
-#define CANCELLED_CALL ((grpc_subchannel_call *)1)
-
-static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
- grpc_error *error);
-static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
- grpc_error *error);
-
-static void add_waiting_locked(grpc_subchannel_call_holder *holder,
- grpc_transport_stream_op *op);
-static void fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder, grpc_error *error);
-static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder);
-
-void grpc_subchannel_call_holder_init(
- grpc_subchannel_call_holder *holder,
- grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
- void *pick_subchannel_arg, grpc_call_stack *owning_call) {
- gpr_atm_rel_store(&holder->subchannel_call, 0);
- holder->pick_subchannel = pick_subchannel;
- holder->pick_subchannel_arg = pick_subchannel_arg;
- gpr_mu_init(&holder->mu);
- holder->connected_subchannel = NULL;
- holder->waiting_ops = NULL;
- holder->waiting_ops_count = 0;
- holder->waiting_ops_capacity = 0;
- holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
- holder->owning_call = owning_call;
- holder->pollent = NULL;
-}
-
-void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder) {
- grpc_subchannel_call *call = GET_CALL(holder);
- if (call != NULL && call != CANCELLED_CALL) {
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "holder");
- }
- GPR_ASSERT(holder->creation_phase ==
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
- gpr_mu_destroy(&holder->mu);
- GPR_ASSERT(holder->waiting_ops_count == 0);
- gpr_free(holder->waiting_ops);
-}
-
-// The logic here is fairly complicated, due to (a) the fact that we
-// need to handle the case where we receive the send op before the
-// initial metadata op, and (b) the need for efficiency, especially in
-// the streaming case.
-// TODO(ctiller): Explain this more thoroughly.
-void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder,
- grpc_transport_stream_op *op) {
- /* try to (atomically) get the call */
- grpc_subchannel_call *call = GET_CALL(holder);
- GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
- if (call == CANCELLED_CALL) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
- GRPC_ERROR_CANCELLED);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
- return;
- }
- if (call != NULL) {
- grpc_subchannel_call_process_op(exec_ctx, call, op);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
- return;
- }
- /* we failed; lock and figure out what to do */
- gpr_mu_lock(&holder->mu);
-retry:
- /* need to recheck that another thread hasn't set the call */
- call = GET_CALL(holder);
- if (call == CANCELLED_CALL) {
- gpr_mu_unlock(&holder->mu);
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
- GRPC_ERROR_CANCELLED);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
- return;
- }
- if (call != NULL) {
- gpr_mu_unlock(&holder->mu);
- grpc_subchannel_call_process_op(exec_ctx, call, op);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
- return;
- }
- /* if this is a cancellation, then we can raise our cancelled flag */
- if (op->cancel_error != GRPC_ERROR_NONE) {
- if (!gpr_atm_rel_cas(&holder->subchannel_call, 0,
- (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
- goto retry;
- } else {
- switch (holder->creation_phase) {
- case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
- fail_locked(exec_ctx, holder, GRPC_ERROR_REF(op->cancel_error));
- break;
- case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
- holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
- 0, &holder->connected_subchannel, NULL);
- break;
- }
- gpr_mu_unlock(&holder->mu);
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
- GRPC_ERROR_CANCELLED);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
- return;
- }
- }
- /* if we don't have a subchannel, try to get one */
- if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
- holder->connected_subchannel == NULL &&
- op->send_initial_metadata != NULL) {
- holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
- grpc_closure_init(&holder->next_step, subchannel_ready, holder);
- GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
- if (holder->pick_subchannel(
- exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
- op->send_initial_metadata_flags, &holder->connected_subchannel,
- &holder->next_step)) {
- holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
- GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
- }
- }
- /* if we've got a subchannel, then let's ask it to create a call */
- if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
- holder->connected_subchannel != NULL) {
- grpc_subchannel_call *subchannel_call = NULL;
- grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, holder->connected_subchannel, holder->pollent,
- &subchannel_call);
- if (error != GRPC_ERROR_NONE) {
- subchannel_call = CANCELLED_CALL;
- fail_locked(exec_ctx, holder, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
- }
- gpr_atm_rel_store(&holder->subchannel_call,
- (gpr_atm)(uintptr_t)subchannel_call);
- retry_waiting_locked(exec_ctx, holder);
- goto retry;
- }
- /* nothing to be done but wait */
- add_waiting_locked(holder, op);
- gpr_mu_unlock(&holder->mu);
- GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
-}
-
-static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_subchannel_call_holder *holder = arg;
- gpr_mu_lock(&holder->mu);
- GPR_ASSERT(holder->creation_phase ==
- GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
- holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
- if (holder->connected_subchannel == NULL) {
- gpr_atm_no_barrier_store(&holder->subchannel_call, 1);
- fail_locked(exec_ctx, holder,
- GRPC_ERROR_CREATE_REFERENCING("Failed to create subchannel",
- &error, 1));
- } else if (1 == gpr_atm_acq_load(&holder->subchannel_call)) {
- /* already cancelled before subchannel became ready */
- fail_locked(exec_ctx, holder,
- GRPC_ERROR_CREATE_REFERENCING(
- "Cancelled before creating subchannel", &error, 1));
- } else {
- grpc_subchannel_call *subchannel_call = NULL;
- grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, holder->connected_subchannel, holder->pollent,
- &subchannel_call);
- if (new_error != GRPC_ERROR_NONE) {
- new_error = grpc_error_add_child(new_error, error);
- subchannel_call = CANCELLED_CALL;
- fail_locked(exec_ctx, holder, new_error);
- }
- gpr_atm_rel_store(&holder->subchannel_call,
- (gpr_atm)(uintptr_t)subchannel_call);
- retry_waiting_locked(exec_ctx, holder);
- }
- gpr_mu_unlock(&holder->mu);
- GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
-}
-
-typedef struct {
- grpc_transport_stream_op **ops;
- size_t nops;
- grpc_subchannel_call *call;
-} retry_ops_args;
-
-static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder) {
- if (holder->waiting_ops_count == 0) {
- return;
- }
-
- retry_ops_args *a = gpr_malloc(sizeof(*a));
- a->ops = holder->waiting_ops;
- a->nops = holder->waiting_ops_count;
- a->call = GET_CALL(holder);
- if (a->call == CANCELLED_CALL) {
- gpr_free(a);
- fail_locked(exec_ctx, holder, GRPC_ERROR_CANCELLED);
- return;
- }
- holder->waiting_ops = NULL;
- holder->waiting_ops_count = 0;
- holder->waiting_ops_capacity = 0;
- GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
- grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
- GRPC_ERROR_NONE, NULL);
-}
-
-static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- retry_ops_args *a = args;
- size_t i;
- for (i = 0; i < a->nops; i++) {
- grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]);
- }
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
- gpr_free(a->ops);
- gpr_free(a);
-}
-
-static void add_waiting_locked(grpc_subchannel_call_holder *holder,
- grpc_transport_stream_op *op) {
- GPR_TIMER_BEGIN("add_waiting_locked", 0);
- if (holder->waiting_ops_count == holder->waiting_ops_capacity) {
- holder->waiting_ops_capacity = GPR_MAX(3, 2 * holder->waiting_ops_capacity);
- holder->waiting_ops =
- gpr_realloc(holder->waiting_ops, holder->waiting_ops_capacity *
- sizeof(*holder->waiting_ops));
- }
- holder->waiting_ops[holder->waiting_ops_count++] = op;
- GPR_TIMER_END("add_waiting_locked", 0);
-}
-
-static void fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder,
- grpc_error *error) {
- size_t i;
- for (i = 0; i < holder->waiting_ops_count; i++) {
- grpc_transport_stream_op_finish_with_failure(
- exec_ctx, holder->waiting_ops[i], GRPC_ERROR_REF(error));
- }
- holder->waiting_ops_count = 0;
- GRPC_ERROR_UNREF(error);
-}
-
-char *grpc_subchannel_call_holder_get_peer(
- grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
- grpc_subchannel_call *subchannel_call = GET_CALL(holder);
-
- if (subchannel_call == NULL || subchannel_call == CANCELLED_CALL) {
- return NULL;
- } else {
- return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- }
-}
diff --git a/src/core/ext/client_config/subchannel_call_holder.h b/src/core/ext/client_config/subchannel_call_holder.h
deleted file mode 100644
index 19b22a2905..0000000000
--- a/src/core/ext/client_config/subchannel_call_holder.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H
-
-#include "src/core/ext/client_config/subchannel.h"
-#include "src/core/lib/iomgr/polling_entity.h"
-
-/** Pick a subchannel for grpc_subchannel_call_holder;
- Return 1 if subchannel is available immediately (in which case on_ready
- should not be called), or 0 otherwise (in which case on_ready should be
- called when the subchannel is available) */
-typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
- grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
- uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
-
-typedef enum {
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
- GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
-} grpc_subchannel_call_holder_creation_phase;
-
-/** Wrapper for holding a pointer to grpc_subchannel_call, and the
- associated machinery to create such a pointer.
- Handles queueing of stream ops until a call object is ready, waiting
- for initial metadata before trying to create a call object,
- and handling cancellation gracefully.
-
- The channel filter uses this as their call_data. */
-typedef struct grpc_subchannel_call_holder {
- /** either 0 for no call, 1 for cancelled, or a pointer to a
- grpc_subchannel_call */
- gpr_atm subchannel_call;
- /** Helper function to choose the subchannel on which to create
- the call object. Channel filter delegates to the load
- balancing policy (once it's ready). */
- grpc_subchannel_call_holder_pick_subchannel pick_subchannel;
- void *pick_subchannel_arg;
-
- gpr_mu mu;
-
- grpc_subchannel_call_holder_creation_phase creation_phase;
- grpc_connected_subchannel *connected_subchannel;
- grpc_polling_entity *pollent;
-
- grpc_transport_stream_op **waiting_ops;
- size_t waiting_ops_count;
- size_t waiting_ops_capacity;
-
- grpc_closure next_step;
-
- grpc_call_stack *owning_call;
-} grpc_subchannel_call_holder;
-
-void grpc_subchannel_call_holder_init(
- grpc_subchannel_call_holder *holder,
- grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
- void *pick_subchannel_arg, grpc_call_stack *owning_call);
-void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder);
-
-void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder,
- grpc_transport_stream_op *op);
-char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call_holder *holder);
-
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H */
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 9e67df8a9c..ef68cc86ea 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -43,6 +43,9 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
+/* default maximum size of payload eligable for GET request */
+static const size_t kMaxPayloadSizeForGet = 2048;
+
typedef struct call_data {
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -50,20 +53,39 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
+ grpc_linked_mdelem payload_bin;
grpc_metadata_batch *recv_initial_metadata;
+ uint8_t *payload_bytes;
+
+ /* Vars to read data off of send_message */
+ grpc_transport_stream_op send_op;
+ uint32_t send_length;
+ uint32_t send_flags;
+ gpr_slice incoming_slice;
+ grpc_slice_buffer_stream replacement_stream;
+ gpr_slice_buffer slices;
+ /* flag that indicates that all slices of send_messages aren't availble */
+ bool send_message_blocked;
/** Closure to call when finished with the hc_on_recv hook */
grpc_closure *on_done_recv;
+ grpc_closure *on_complete;
+ grpc_closure *post_send;
+
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
grpc_closure hc_on_recv;
+ grpc_closure hc_on_complete;
+ grpc_closure got_slice;
+ grpc_closure send_done;
} call_data;
typedef struct channel_data {
grpc_mdelem *static_scheme;
grpc_mdelem *user_agent;
+ size_t max_payload_size_for_get;
} channel_data;
typedef struct {
@@ -119,6 +141,24 @@ static void hc_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
}
+static void hc_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_error *error) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ if (calld->payload_bytes) {
+ gpr_free(calld->payload_bytes);
+ calld->payload_bytes = NULL;
+ }
+ calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, error);
+}
+
+static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_reset_and_unref(&calld->slices);
+ calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
+}
+
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
/* eat the things we'd like to set ourselves */
if (md->key == GRPC_MDSTR_METHOD) return NULL;
@@ -129,22 +169,105 @@ static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
return md;
}
-static void hc_mutate_op(grpc_call_element *elem,
+static void continue_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = elem->call_data;
+ uint8_t *wrptr = calld->payload_bytes;
+ while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
+ &calld->incoming_slice, ~(size_t)0,
+ &calld->got_slice)) {
+ memcpy(wrptr, GPR_SLICE_START_PTR(calld->incoming_slice),
+ GPR_SLICE_LENGTH(calld->incoming_slice));
+ wrptr += GPR_SLICE_LENGTH(calld->incoming_slice);
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ calld->send_message_blocked = false;
+ break;
+ }
+ }
+}
+
+static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
+ grpc_call_element *elem = elemp;
+ call_data *calld = elem->call_data;
+ calld->send_message_blocked = false;
+ gpr_slice_buffer_add(&calld->slices, calld->incoming_slice);
+ if (calld->send_length == calld->slices.length) {
+ /* Pass down the original send_message op that was blocked.*/
+ grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
+ calld->send_flags);
+ calld->send_op.send_message = &calld->replacement_stream.base;
+ calld->post_send = calld->send_op.on_complete;
+ calld->send_op.on_complete = &calld->send_done;
+ grpc_call_next_op(exec_ctx, elem, &calld->send_op);
+ } else {
+ continue_send_message(exec_ctx, elem);
+ }
+}
+
+static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
+
if (op->send_initial_metadata != NULL) {
+ /* Decide which HTTP VERB to use. We use GET if the request is marked
+ cacheable, and the operation contains both initial metadata and send
+ message, and the payload is below the size threshold, and all the data
+ for this request is immediately available. */
+ grpc_mdelem *method = GRPC_MDELEM_METHOD_POST;
+ calld->send_message_blocked = false;
+ if ((op->send_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
+ op->send_message != NULL &&
+ op->send_message->length < channeld->max_payload_size_for_get) {
+ method = GRPC_MDELEM_METHOD_GET;
+ calld->send_message_blocked = true;
+ } else if (op->send_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) {
+ method = GRPC_MDELEM_METHOD_PUT;
+ }
+
+ /* Attempt to read the data from send_message and create a header field. */
+ if (method == GRPC_MDELEM_METHOD_GET) {
+ /* allocate memory to hold the entire payload */
+ calld->payload_bytes = gpr_malloc(op->send_message->length);
+ GPR_ASSERT(calld->payload_bytes);
+
+ /* read slices of send_message and copy into payload_bytes */
+ calld->send_op = *op;
+ calld->send_length = op->send_message->length;
+ calld->send_flags = op->send_message->flags;
+ continue_send_message(exec_ctx, elem);
+
+ if (calld->send_message_blocked == false) {
+ /* when all the send_message data is available, then create a MDELEM and
+ append to headers */
+ grpc_mdelem *payload_bin = grpc_mdelem_from_metadata_strings(
+ GRPC_MDSTR_GRPC_PAYLOAD_BIN,
+ grpc_mdstr_from_buffer(calld->payload_bytes,
+ op->send_message->length));
+ grpc_metadata_batch_add_tail(op->send_initial_metadata,
+ &calld->payload_bin, payload_bin);
+ calld->on_complete = op->on_complete;
+ op->on_complete = &calld->hc_on_complete;
+ op->send_message = NULL;
+ } else {
+ /* Not all data is available. Fall back to POST. */
+ gpr_log(GPR_DEBUG,
+ "Request is marked Cacheable but not all data is available.\
+ Falling back to POST");
+ method = GRPC_MDELEM_METHOD_POST;
+ }
+ }
+
grpc_metadata_batch_filter(op->send_initial_metadata, client_strip_filter,
elem);
/* Send : prefixed headers, which have to be before any application
layer headers. */
- grpc_metadata_batch_add_head(
- op->send_initial_metadata, &calld->method,
- op->send_initial_metadata_flags &
- GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
- ? GRPC_MDELEM_METHOD_PUT
- : GRPC_MDELEM_METHOD_POST);
+ grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->method,
+ method);
grpc_metadata_batch_add_head(op->send_initial_metadata, &calld->scheme,
channeld->static_scheme);
grpc_metadata_batch_add_tail(op->send_initial_metadata, &calld->te_trailers,
@@ -169,9 +292,16 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- hc_mutate_op(elem, op);
+ hc_mutate_op(exec_ctx, elem, op);
GPR_TIMER_END("hc_start_transport_op", 0);
- grpc_call_next_op(exec_ctx, elem, op);
+ call_data *calld = elem->call_data;
+ if (op->send_message != NULL && calld->send_message_blocked) {
+ /* Don't forward the op. send_message contains slices that aren't ready
+ yet. The call will be forwarded by the op_complete of slice read call.
+ */
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
}
/* Constructor for call_data */
@@ -180,14 +310,23 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv = NULL;
+ calld->on_complete = NULL;
+ calld->payload_bytes = NULL;
+ gpr_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
+ grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem);
+ grpc_closure_init(&calld->got_slice, got_slice, elem);
+ grpc_closure_init(&calld->send_done, send_done, elem);
return GRPC_ERROR_NONE;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {}
+ void *ignored) {
+ call_data *calld = elem->call_data;
+ gpr_slice_buffer_destroy(&calld->slices);
+}
static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
@@ -210,6 +349,22 @@ static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
return GRPC_MDELEM_SCHEME_HTTP;
}
+static size_t max_payload_size_from_args(const grpc_channel_args *args) {
+ if (args != NULL) {
+ for (size_t i = 0; i < args->num_args; ++i) {
+ if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET)) {
+ if (args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s: must be an integer",
+ GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET);
+ } else {
+ return (size_t)args->args[i].value.integer;
+ }
+ }
+ }
+ }
+ return kMaxPayloadSizeForGet;
+}
+
static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
const char *transport_name) {
gpr_strvec v;
@@ -268,6 +423,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
+ chand->max_payload_size_for_get =
+ max_payload_size_from_args(args->channel_args);
chand->user_agent = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_USER_AGENT,
user_agent_from_args(args->channel_args,
diff --git a/src/core/lib/channel/http_client_filter.h b/src/core/lib/channel/http_client_filter.h
index 47081175ea..9e6e106e9c 100644
--- a/src/core/lib/channel/http_client_filter.h
+++ b/src/core/lib/channel/http_client_filter.h
@@ -41,4 +41,7 @@ extern const grpc_channel_filter grpc_http_client_filter;
/* Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
+/* Channel arg to determine maximum size of payload eligable for GET request */
+#define GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET "grpc.max_payload_size_for_get"
+
#endif /* GRPC_CORE_LIB_CHANNEL_HTTP_CLIENT_FILTER_H */
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index 5ce51f9016..0f2bf97824 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -49,17 +49,32 @@ typedef struct call_data {
uint8_t seen_scheme;
uint8_t seen_te_trailers;
uint8_t seen_authority;
+ uint8_t seen_payload_bin;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
+ /* flag to ensure payload_bin is delivered only once */
+ uint8_t payload_bin_delivered;
+
grpc_metadata_batch *recv_initial_metadata;
bool *recv_idempotent_request;
+ bool *recv_cacheable_request;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
+ /** Closure to call when we retrieve read message from the payload-bin header
+ */
+ grpc_closure *recv_message_ready;
+ grpc_closure *on_complete;
+ grpc_byte_stream **pp_recv_message;
+ gpr_slice_buffer read_slice_buffer;
+ grpc_slice_buffer_stream read_stream;
+
/** Receive closures are chained: we inject this closure as the on_done_recv
up-call on transport_op, and remember to call our on_done_recv member
after handling it. */
grpc_closure hs_on_recv;
+ grpc_closure hs_on_complete;
+ grpc_closure hs_recv_message_ready;
} call_data;
typedef struct channel_data { uint8_t unused; } channel_data;
@@ -76,16 +91,20 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
/* Check if it is one of the headers we care about. */
if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
- md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_SCHEME_HTTP ||
- md == GRPC_MDELEM_SCHEME_HTTPS ||
+ md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_METHOD_GET ||
+ md == GRPC_MDELEM_SCHEME_HTTP || md == GRPC_MDELEM_SCHEME_HTTPS ||
md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
/* swallow it */
if (md == GRPC_MDELEM_METHOD_POST) {
calld->seen_method = 1;
*calld->recv_idempotent_request = false;
+ *calld->recv_cacheable_request = false;
} else if (md == GRPC_MDELEM_METHOD_PUT) {
calld->seen_method = 1;
*calld->recv_idempotent_request = true;
+ } else if (md == GRPC_MDELEM_METHOD_GET) {
+ calld->seen_method = 1;
+ *calld->recv_cacheable_request = true;
} else if (md->key == GRPC_MDSTR_SCHEME) {
calld->seen_scheme = 1;
} else if (md == GRPC_MDELEM_TE_TRAILERS) {
@@ -137,6 +156,16 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
calld->seen_authority = 1;
return authority;
+ } else if (md->key == GRPC_MDSTR_GRPC_PAYLOAD_BIN) {
+ /* Retrieve the payload from the value of the 'grpc-internal-payload-bin'
+ header field */
+ calld->seen_payload_bin = 1;
+ gpr_slice_buffer_init(&calld->read_slice_buffer);
+ gpr_slice_buffer_add(&calld->read_slice_buffer,
+ gpr_slice_ref(md->value->slice));
+ grpc_slice_buffer_stream_init(&calld->read_stream,
+ &calld->read_slice_buffer, 0);
+ return NULL;
} else {
return md;
}
@@ -189,6 +218,36 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_ERROR_UNREF(err);
}
+static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_error *err) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ /* Call recv_message_ready if we got the payload via the header field */
+ if (calld->seen_payload_bin && calld->recv_message_ready != NULL) {
+ *calld->pp_recv_message = calld->payload_bin_delivered
+ ? NULL
+ : (grpc_byte_stream *)&calld->read_stream;
+ calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
+ err);
+ calld->recv_message_ready = NULL;
+ calld->payload_bin_delivered = true;
+ }
+ calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err);
+}
+
+static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_error *err) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+ if (calld->seen_payload_bin) {
+ /* do nothing. This is probably a GET request, and payload will be returned
+ in hs_on_complete callback. */
+ } else {
+ calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
+ err);
+ }
+}
+
static void hs_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
@@ -206,11 +265,25 @@ static void hs_mutate_op(grpc_call_element *elem,
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
GPR_ASSERT(op->recv_idempotent_request != NULL);
+ GPR_ASSERT(op->recv_cacheable_request != NULL);
calld->recv_initial_metadata = op->recv_initial_metadata;
calld->recv_idempotent_request = op->recv_idempotent_request;
+ calld->recv_cacheable_request = op->recv_cacheable_request;
calld->on_done_recv = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->hs_on_recv;
}
+
+ if (op->recv_message) {
+ calld->recv_message_ready = op->recv_message_ready;
+ calld->pp_recv_message = op->recv_message;
+ if (op->recv_message_ready) {
+ op->recv_message_ready = &calld->hs_recv_message_ready;
+ }
+ if (op->on_complete) {
+ calld->on_complete = op->on_complete;
+ op->on_complete = &calld->hs_on_complete;
+ }
+ }
}
static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
@@ -232,6 +305,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
memset(calld, 0, sizeof(*calld));
grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
+ grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem);
+ grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/security/context/security_context.c b/src/core/lib/security/context/security_context.c
index 127b13ee50..2204fadf54 100644
--- a/src/core/lib/security/context/security_context.c
+++ b/src/core/lib/security/context/security_context.c
@@ -99,6 +99,9 @@ void grpc_client_security_context_destroy(void *ctx) {
grpc_client_security_context *c = (grpc_client_security_context *)ctx;
grpc_call_credentials_unref(c->creds);
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
+ if (c->extension.instance != NULL && c->extension.destroy != NULL) {
+ c->extension.destroy(c->extension.instance);
+ }
gpr_free(ctx);
}
@@ -114,6 +117,9 @@ grpc_server_security_context *grpc_server_security_context_create(void) {
void grpc_server_security_context_destroy(void *ctx) {
grpc_server_security_context *c = (grpc_server_security_context *)ctx;
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
+ if (c->extension.instance != NULL && c->extension.destroy != NULL) {
+ c->extension.destroy(c->extension.instance);
+ }
gpr_free(ctx);
}
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index 4e7666dfe3..1e131a0c23 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -84,6 +84,16 @@ void grpc_auth_context_unref(grpc_auth_context *policy);
void grpc_auth_property_reset(grpc_auth_property *property);
+/* --- grpc_security_context_extension ---
+
+ Extension to the security context that may be set in a filter and accessed
+ later by a higher level method on a grpc_call object. */
+
+typedef struct {
+ void *instance;
+ void (*destroy)(void *);
+} grpc_security_context_extension;
+
/* --- grpc_client_security_context ---
Internal client-side security context. */
@@ -91,6 +101,7 @@ void grpc_auth_property_reset(grpc_auth_property *property);
typedef struct {
grpc_call_credentials *creds;
grpc_auth_context *auth_context;
+ grpc_security_context_extension extension;
} grpc_client_security_context;
grpc_client_security_context *grpc_client_security_context_create(void);
@@ -102,6 +113,7 @@ void grpc_client_security_context_destroy(void *ctx);
typedef struct {
grpc_auth_context *auth_context;
+ grpc_security_context_extension extension;
} grpc_server_security_context;
grpc_server_security_context *grpc_server_security_context_create(void);
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 0827a1e181..56fb80e92e 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -149,6 +149,7 @@ struct call_data {
grpc_metadata_batch *recv_initial_metadata;
bool recv_idempotent_request;
+ bool recv_cacheable_request;
grpc_metadata_array initial_metadata;
request_matcher *request_matcher;
@@ -501,9 +502,12 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
&rc->data.batch.details->method_capacity, calld->path);
rc->data.batch.details->deadline = calld->deadline;
rc->data.batch.details->flags =
- 0 | (calld->recv_idempotent_request
- ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
- : 0);
+ (calld->recv_idempotent_request
+ ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
+ : 0) |
+ (calld->recv_cacheable_request
+ ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST
+ : 0);
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
@@ -783,6 +787,7 @@ static void server_mutate_op(grpc_call_element *elem,
calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->server_on_recv_initial_metadata;
op->recv_idempotent_request = &calld->recv_idempotent_request;
+ op->recv_cacheable_request = &calld->recv_cacheable_request;
}
}
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 8f3e5b5b40..fce591f346 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -51,15 +51,15 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
- {11, 33, 10, 33, 12, 33, 12, 49, 13, 33, 14, 33, 15, 33, 16, 33, 17, 33,
+ {11, 33, 10, 33, 12, 33, 12, 50, 13, 33, 14, 33, 15, 33, 16, 33, 17, 33,
19, 33, 20, 33, 21, 33, 22, 33, 23, 33, 24, 33, 25, 33, 26, 33, 27, 33,
28, 18, 28, 33, 29, 33, 30, 33, 34, 33, 35, 33, 36, 33, 37, 33, 40, 31,
- 40, 32, 40, 48, 40, 53, 40, 54, 40, 55, 40, 56, 42, 31, 42, 48, 42, 53,
- 45, 0, 45, 1, 45, 2, 50, 33, 57, 33, 58, 33, 59, 33, 60, 33, 61, 33,
- 62, 33, 63, 33, 64, 33, 65, 33, 66, 33, 67, 33, 68, 38, 68, 70, 68, 73,
- 69, 81, 69, 82, 71, 33, 72, 33, 74, 33, 75, 33, 76, 33, 77, 33, 78, 39,
- 78, 51, 78, 52, 79, 33, 80, 33, 83, 3, 83, 4, 83, 5, 83, 6, 83, 7,
- 83, 8, 83, 9, 84, 33, 85, 86, 87, 33, 88, 33, 89, 33, 90, 33, 91, 33};
+ 40, 32, 40, 49, 40, 54, 40, 55, 40, 56, 40, 57, 42, 31, 42, 49, 42, 54,
+ 46, 0, 46, 1, 46, 2, 51, 33, 58, 33, 59, 33, 60, 33, 61, 33, 62, 33,
+ 63, 33, 64, 33, 65, 33, 66, 33, 67, 33, 68, 33, 69, 38, 69, 71, 69, 74,
+ 70, 82, 70, 83, 72, 33, 73, 33, 75, 33, 76, 33, 77, 33, 78, 33, 79, 39,
+ 79, 52, 79, 53, 80, 33, 81, 33, 84, 3, 84, 4, 84, 5, 84, 6, 84, 7,
+ 84, 8, 84, 9, 85, 33, 86, 87, 88, 33, 89, 33, 90, 33, 91, 33, 92, 33};
const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"0",
@@ -107,6 +107,7 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"grpc-encoding",
"grpc-internal-encoding-request",
"grpc-message",
+ "grpc-payload-bin",
"grpc-status",
"grpc-timeout",
"grpc-tracing-bin",
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index b51bacac50..54b6f38be1 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -44,7 +44,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 92
+#define GRPC_STATIC_MDSTR_COUNT 93
extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
/* "0" */
#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
@@ -136,101 +136,103 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST (&grpc_static_mdstr_table[43])
/* "grpc-message" */
#define GRPC_MDSTR_GRPC_MESSAGE (&grpc_static_mdstr_table[44])
+/* "grpc-payload-bin" */
+#define GRPC_MDSTR_GRPC_PAYLOAD_BIN (&grpc_static_mdstr_table[45])
/* "grpc-status" */
-#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[45])
+#define GRPC_MDSTR_GRPC_STATUS (&grpc_static_mdstr_table[46])
/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[46])
+#define GRPC_MDSTR_GRPC_TIMEOUT (&grpc_static_mdstr_table[47])
/* "grpc-tracing-bin" */
-#define GRPC_MDSTR_GRPC_TRACING_BIN (&grpc_static_mdstr_table[47])
+#define GRPC_MDSTR_GRPC_TRACING_BIN (&grpc_static_mdstr_table[48])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[48])
+#define GRPC_MDSTR_GZIP (&grpc_static_mdstr_table[49])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[49])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (&grpc_static_mdstr_table[50])
/* "host" */
-#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[50])
+#define GRPC_MDSTR_HOST (&grpc_static_mdstr_table[51])
/* "http" */
-#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[51])
+#define GRPC_MDSTR_HTTP (&grpc_static_mdstr_table[52])
/* "https" */
-#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[52])
+#define GRPC_MDSTR_HTTPS (&grpc_static_mdstr_table[53])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[53])
+#define GRPC_MDSTR_IDENTITY (&grpc_static_mdstr_table[54])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[54])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (&grpc_static_mdstr_table[55])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (&grpc_static_mdstr_table[55])
+ (&grpc_static_mdstr_table[56])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[56])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (&grpc_static_mdstr_table[57])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[57])
+#define GRPC_MDSTR_IF_MATCH (&grpc_static_mdstr_table[58])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[58])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (&grpc_static_mdstr_table[59])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[59])
+#define GRPC_MDSTR_IF_NONE_MATCH (&grpc_static_mdstr_table[60])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[60])
+#define GRPC_MDSTR_IF_RANGE (&grpc_static_mdstr_table[61])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[61])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[62])
+#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63])
/* "link" */
-#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[63])
+#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[64])
/* "load-reporting-initial" */
-#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[64])
+#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[65])
/* "load-reporting-trailing" */
-#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[65])
+#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[66])
/* "location" */
-#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[66])
+#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[67])
+#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[68])
/* ":method" */
-#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[68])
+#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[69])
/* ":path" */
-#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[69])
+#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[70])
/* "POST" */
-#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[70])
+#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[71])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[71])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[72])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[72])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[73])
/* "PUT" */
-#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[73])
+#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[74])
/* "range" */
-#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[74])
+#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[75])
/* "referer" */
-#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[75])
+#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[76])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[76])
+#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[77])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[77])
+#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[78])
/* ":scheme" */
-#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[78])
+#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[79])
/* "server" */
-#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[79])
+#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[80])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[80])
+#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[81])
/* "/" */
-#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[81])
+#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[82])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[82])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[83])
/* ":status" */
-#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[83])
+#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[84])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[84])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[85])
/* "te" */
-#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[85])
+#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[86])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[86])
+#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[87])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[87])
+#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[88])
/* "user-agent" */
-#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[88])
+#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[89])
/* "vary" */
-#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[89])
+#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[90])
/* "via" */
-#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[90])
+#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[91])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[91])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[92])
#define GRPC_STATIC_MDELEM_COUNT 81
extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index d0d0c2a461..8dc393fd61 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -129,6 +129,7 @@ typedef struct grpc_transport_stream_op {
/** Receive initial metadata from the stream, into provided metadata batch. */
grpc_metadata_batch *recv_initial_metadata;
bool *recv_idempotent_request;
+ bool *recv_cacheable_request;
/** Should be enqueued when initial metadata is ready to be processed. */
grpc_closure *recv_initial_metadata_ready;