aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Yuchen Zeng <zyc@google.com>2017-02-27 13:35:41 -0800
committerGravatar Yuchen Zeng <zyc@google.com>2017-03-17 15:57:23 -0700
commit2c977084c8fec4bc395976338683d1d480bab91e (patch)
treecf696a8403371b308a45d02445864159ba527e08 /src/core
parentdc6b569d97aa54e987681f869f13acdd9c89834f (diff)
parentdc720ca6bf27181c040cefcdb298d9dee8bf3058 (diff)
Merge remote-tracking branch 'upstream/master' into cares_bazel_rule
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/client_channel.c579
-rw-r--r--src/core/ext/client_channel/lb_policy.c91
-rw-r--r--src/core/ext/client_channel/lb_policy.h85
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.h1
-rw-r--r--src/core/ext/client_channel/resolver.c25
-rw-r--r--src/core/ext/client_channel/resolver.h35
-rw-r--r--src/core/ext/client_channel/resolver_factory.h1
-rw-r--r--src/core/ext/client_channel/resolver_registry.c4
-rw-r--r--src/core/ext/client_channel/resolver_registry.h3
-rw-r--r--src/core/ext/client_channel/subchannel.c6
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c173
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c181
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c100
-rw-r--r--src/core/ext/load_reporting/load_reporting.c20
-rw-r--r--src/core/ext/load_reporting/load_reporting.h17
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c20
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c61
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c44
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c57
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c39
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h54
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c20
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c18
-rw-r--r--src/core/ext/transport/cronet/client/secure/cronet_channel_create.c13
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_api_dummy.c12
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c246
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.h43
-rw-r--r--src/core/lib/channel/channel_stack.c15
-rw-r--r--src/core/lib/channel/channel_stack.h2
-rw-r--r--src/core/lib/channel/compress_filter.c12
-rw-r--r--src/core/lib/channel/connected_channel.c2
-rw-r--r--src/core/lib/channel/context.h3
-rw-r--r--src/core/lib/channel/deadline_filter.c104
-rw-r--r--src/core/lib/channel/deadline_filter.h12
-rw-r--r--src/core/lib/channel/handshaker.c37
-rw-r--r--src/core/lib/channel/handshaker.h16
-rw-r--r--src/core/lib/channel/http_client_filter.c2
-rw-r--r--src/core/lib/channel/http_server_filter.c25
-rw-r--r--src/core/lib/channel/message_size_filter.c2
-rw-r--r--src/core/lib/http/httpcli.c5
-rw-r--r--src/core/lib/http/httpcli.h3
-rw-r--r--src/core/lib/iomgr/error.h4
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c356
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c70
-rw-r--r--src/core/lib/iomgr/ev_posix.c9
-rw-r--r--src/core/lib/iomgr/ev_posix.h4
-rw-r--r--src/core/lib/iomgr/network_status_tracker.c87
-rw-r--r--src/core/lib/iomgr/pollset.h3
-rw-r--r--src/core/lib/iomgr/pollset_set.h3
-rw-r--r--src/core/lib/iomgr/pollset_set_uv.c3
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.c3
-rw-r--r--src/core/lib/iomgr/pollset_uv.c5
-rw-r--r--src/core/lib/iomgr/pollset_windows.c10
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c5
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c14
-rw-r--r--src/core/lib/iomgr/tcp_uv.c10
-rw-r--r--src/core/lib/iomgr/timer_generic.c28
-rw-r--r--src/core/lib/iomgr/timer_generic.h2
-rw-r--r--src/core/lib/iomgr/timer_uv.c12
-rw-r--r--src/core/lib/iomgr/timer_uv.h2
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c2
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c4
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.h3
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c2
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c2
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c2
-rw-r--r--src/core/lib/support/cpu_posix.c9
-rw-r--r--src/core/lib/support/sync_posix.c10
-rw-r--r--src/core/lib/surface/call.c174
-rw-r--r--src/core/lib/surface/call.h1
-rw-r--r--src/core/lib/surface/channel.c6
-rw-r--r--src/core/lib/surface/completion_queue.c46
-rw-r--r--src/core/lib/surface/completion_queue.h3
-rw-r--r--src/core/lib/surface/init.c2
-rw-r--r--src/core/lib/surface/lame_client.c2
-rw-r--r--src/core/lib/surface/server.c9
-rw-r--r--src/core/lib/transport/connectivity_state.c45
-rw-r--r--src/core/lib/transport/connectivity_state.h20
-rw-r--r--src/core/lib/transport/static_metadata.c763
-rw-r--r--src/core/lib/transport/static_metadata.h221
-rw-r--r--src/core/lib/transport/transport.h4
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c4
86 files changed, 2274 insertions, 1889 deletions
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 65cfe1fa90..b80d831557 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -127,7 +127,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
@@ -146,7 +146,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 06038bb5ba..21ba4301ff 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -51,6 +51,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/deadline_filter.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
@@ -160,13 +161,10 @@ typedef struct client_channel_channel_data {
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
- /** mutex protecting all variables below in this data structure */
- gpr_mu mu;
+ /** combiner protecting all variables below in this data structure */
+ grpc_combiner *combiner;
/** currently active load balancer */
- char *lb_policy_name;
grpc_lb_policy *lb_policy;
- /** service config in JSON form */
- char *service_config_json;
/** maps method names to method_parameters structs */
grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
@@ -183,6 +181,13 @@ typedef struct client_channel_channel_data {
grpc_channel_stack *owning_stack;
/** interested parties (owned) */
grpc_pollset_set *interested_parties;
+
+ /* the following properties are guarded by a mutex since API's require them
+ to be instantaneously available */
+ gpr_mu info_mu;
+ char *info_lb_policy_name;
+ /** service config in JSON form */
+ char *info_service_config_json;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
@@ -195,9 +200,9 @@ typedef struct {
grpc_lb_policy *lb_policy;
} lb_policy_connectivity_watcher;
-static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
- grpc_connectivity_state current_state);
+static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state);
static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
channel_data *chand,
@@ -208,7 +213,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
state == GRPC_CHANNEL_SHUTDOWN) &&
chand->lb_policy != NULL) {
/* cancel picks with wait_for_ready=false */
- grpc_lb_policy_cancel_picks(
+ grpc_lb_policy_cancel_picks_locked(
exec_ctx, chand->lb_policy,
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
@@ -218,54 +223,45 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
}
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
- lb_policy_connectivity_watcher *w,
- grpc_error *error) {
+ void *arg, grpc_error *error) {
+ lb_policy_connectivity_watcher *w = arg;
grpc_connectivity_state publish_state = w->state;
- /* check if the notification is for a stale policy */
- if (w->lb_policy != w->chand->lb_policy) return;
-
- if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
- publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
- GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
- w->chand->lb_policy = NULL;
- }
- set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
- GRPC_ERROR_REF(error), "lb_changed");
- if (w->state != GRPC_CHANNEL_SHUTDOWN) {
- watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
+ /* check if the notification is for the latest policy */
+ if (w->lb_policy == w->chand->lb_policy) {
+ if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
+ publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
+ grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
+ GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
+ w->chand->lb_policy = NULL;
+ }
+ set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
+ GRPC_ERROR_REF(error), "lb_changed");
+ if (w->state != GRPC_CHANNEL_SHUTDOWN) {
+ watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
+ }
}
-}
-
-static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- lb_policy_connectivity_watcher *w = arg;
-
- gpr_mu_lock(&w->chand->mu);
- on_lb_policy_state_changed_locked(exec_ctx, w, error);
- gpr_mu_unlock(&w->chand->mu);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
gpr_free(w);
}
-static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
- grpc_connectivity_state current_state) {
+static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+ grpc_lb_policy *lb_policy,
+ grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
- grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w,
+ grpc_combiner_scheduler(chand->combiner, false));
w->state = current_state;
w->lb_policy = lb_policy;
- grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
- &w->on_changed);
+ grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
+ &w->on_changed);
}
-static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
channel_data *chand = arg;
char *lb_policy_name = NULL;
grpc_lb_policy *lb_policy = NULL;
@@ -317,13 +313,14 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
+ lb_policy_args.combiner = chand->combiner;
lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (lb_policy != NULL) {
GRPC_LB_POLICY_REF(lb_policy, "config_change");
GRPC_ERROR_UNREF(state_error);
- state =
- grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
+ state = grpc_lb_policy_check_connectivity_locked(exec_ctx, lb_policy,
+ &state_error);
}
// Find service config.
channel_arg =
@@ -353,17 +350,18 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->interested_parties);
}
- gpr_mu_lock(&chand->mu);
+ gpr_mu_lock(&chand->info_mu);
if (lb_policy_name != NULL) {
- gpr_free(chand->lb_policy_name);
- chand->lb_policy_name = lb_policy_name;
+ gpr_free(chand->info_lb_policy_name);
+ chand->info_lb_policy_name = lb_policy_name;
}
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
if (service_config_json != NULL) {
- gpr_free(chand->service_config_json);
- chand->service_config_json = service_config_json;
+ gpr_free(chand->info_service_config_json);
+ chand->info_service_config_json = service_config_json;
}
+ gpr_mu_unlock(&chand->info_mu);
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
@@ -386,15 +384,15 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
set_channel_connectivity_state_locked(
exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
if (lb_policy != NULL) {
- watch_lb_policy(exec_ctx, chand, lb_policy, state);
+ watch_lb_policy_locked(exec_ctx, chand, lb_policy, state);
}
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
- gpr_mu_unlock(&chand->mu);
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
} else {
if (chand->resolver != NULL) {
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
}
@@ -404,11 +402,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
GPR_ARRAY_SIZE(refs)),
"resolver_gone");
- gpr_mu_unlock(&chand->mu);
}
if (exit_idle) {
- grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
+ grpc_lb_policy_exit_idle_locked(exec_ctx, lb_policy);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
}
@@ -426,20 +423,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_UNREF(state_error);
}
-static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
+static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ grpc_transport_op *op = arg;
+ grpc_channel_element *elem = op->transport_private.args[0];
channel_data *chand = elem->channel_data;
- grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
-
- GPR_ASSERT(op->set_accept_stream == false);
- if (op->bind_pollset != NULL) {
- grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
- op->bind_pollset);
- }
-
- gpr_mu_lock(&chand->mu);
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &chand->state_tracker, op->connectivity_state,
@@ -453,7 +442,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_closure_sched(exec_ctx, op->send_ping,
GRPC_ERROR_CREATE("Ping with no load balancing"));
} else {
- grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
+ grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
}
op->send_ping = NULL;
@@ -464,7 +453,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
+ grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
chand->resolver = NULL;
if (!chand->started_resolving) {
@@ -482,25 +471,48 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
- gpr_mu_unlock(&chand->mu);
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
+
+ grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+}
+
+static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_transport_op *op) {
+ channel_data *chand = elem->channel_data;
+
+ GPR_ASSERT(op->set_accept_stream == false);
+ if (op->bind_pollset != NULL) {
+ grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
+ op->bind_pollset);
+ }
+
+ op->transport_private.args[0] = elem;
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
+ grpc_closure_sched(
+ exec_ctx, grpc_closure_init(
+ &op->transport_private.closure, start_transport_op_locked,
+ op, grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
}
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
channel_data *chand = elem->channel_data;
- gpr_mu_lock(&chand->mu);
+ gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
- *info->lb_policy_name = chand->lb_policy_name == NULL
+ *info->lb_policy_name = chand->info_lb_policy_name == NULL
? NULL
- : gpr_strdup(chand->lb_policy_name);
+ : gpr_strdup(chand->info_lb_policy_name);
}
if (info->service_config_json != NULL) {
- *info->service_config_json = chand->service_config_json == NULL
- ? NULL
- : gpr_strdup(chand->service_config_json);
+ *info->service_config_json =
+ chand->info_service_config_json == NULL
+ ? NULL
+ : gpr_strdup(chand->info_service_config_json);
}
- gpr_mu_unlock(&chand->mu);
+ gpr_mu_unlock(&chand->info_mu);
}
/* Constructor for channel_data */
@@ -512,11 +524,12 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
- gpr_mu_init(&chand->mu);
+ chand->combiner = grpc_combiner_create(NULL);
+ gpr_mu_init(&chand->info_mu);
chand->owning_stack = args->channel_stack;
grpc_closure_init(&chand->on_resolver_result_changed,
- on_resolver_result_changed, chand,
- grpc_schedule_on_exec_ctx);
+ on_resolver_result_changed_locked, chand,
+ grpc_combiner_scheduler(chand->combiner, false));
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
@@ -539,7 +552,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->resolver = grpc_resolver_create(
exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
new_args != NULL ? new_args : args->channel_args,
- chand->interested_parties);
+ chand->interested_parties, chand->combiner);
if (proxy_name != NULL) gpr_free(proxy_name);
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
if (chand->resolver == NULL) {
@@ -548,13 +561,23 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
+static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resolver *resolver = arg;
+ grpc_resolver_shutdown_locked(exec_ctx, resolver);
+ GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
+}
+
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
if (chand->resolver != NULL) {
- grpc_resolver_shutdown(exec_ctx, chand->resolver);
- GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(shutdown_resolver_locked, chand->resolver,
+ grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
@@ -565,14 +588,15 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
- gpr_free(chand->lb_policy_name);
- gpr_free(chand->service_config_json);
+ gpr_free(chand->info_lb_policy_name);
+ gpr_free(chand->info_service_config_json);
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
- grpc_pollset_set_destroy(chand->interested_parties);
- gpr_mu_destroy(&chand->mu);
+ grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
+ GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
+ gpr_mu_destroy(&chand->info_mu);
}
/*************************************************************************
@@ -615,8 +639,6 @@ typedef struct client_channel_call_data {
grpc_subchannel_call */
gpr_atm subchannel_call;
- gpr_mu mu;
-
subchannel_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
grpc_polling_entity *pollent;
@@ -661,52 +683,32 @@ static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld,
GRPC_ERROR_UNREF(error);
}
-typedef struct {
- grpc_transport_stream_op **ops;
- size_t nops;
- grpc_subchannel_call *call;
-} retry_ops_args;
-
-static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- retry_ops_args *a = args;
- size_t i;
- for (i = 0; i < a->nops; i++) {
- grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]);
- }
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops");
- gpr_free(a->ops);
- gpr_free(a);
-}
-
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
if (calld->waiting_ops_count == 0) {
return;
}
- retry_ops_args *a = gpr_malloc(sizeof(*a));
- a->ops = calld->waiting_ops;
- a->nops = calld->waiting_ops_count;
- a->call = GET_CALL(calld);
- if (a->call == CANCELLED_CALL) {
- gpr_free(a);
+ grpc_subchannel_call *call = GET_CALL(calld);
+ grpc_transport_stream_op **ops = calld->waiting_ops;
+ size_t nops = calld->waiting_ops_count;
+ if (call == CANCELLED_CALL) {
fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED);
return;
}
calld->waiting_ops = NULL;
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
- GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
- grpc_closure_sched(
- exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ for (size_t i = 0; i < nops; i++) {
+ grpc_subchannel_call_process_op(exec_ctx, call, ops[i]);
+ }
+ gpr_free(ops);
}
-static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
@@ -742,7 +744,6 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, calld);
}
- gpr_mu_unlock(&calld->mu);
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
}
@@ -768,37 +769,35 @@ typedef struct {
/** Return true if subchannel is available immediately (in which case on_ready
should not be called), or false otherwise (in which case on_ready should be
called when the subchannel is available). */
-static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata,
- uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_closure *on_ready, grpc_error *error);
-
-static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static bool pick_subchannel_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
+ grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
+ grpc_error *error);
+
+static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
continue_picking_args *cpa = arg;
if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
} else {
- call_data *calld = cpa->elem->call_data;
- gpr_mu_lock(&calld->mu);
- if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags, cpa->connected_subchannel,
- cpa->on_ready, GRPC_ERROR_NONE)) {
+ if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->initial_metadata_flags,
+ cpa->connected_subchannel, cpa->on_ready,
+ GRPC_ERROR_NONE)) {
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
}
- gpr_mu_unlock(&calld->mu);
}
gpr_free(cpa);
}
-static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata,
- uint32_t initial_metadata_flags,
- grpc_connected_subchannel **connected_subchannel,
- grpc_closure *on_ready, grpc_error *error) {
+static bool pick_subchannel_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
+ grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
+ grpc_error *error) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
channel_data *chand = elem->channel_data;
@@ -808,11 +807,11 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(connected_subchannel);
- gpr_mu_lock(&chand->mu);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
- grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
- connected_subchannel, GRPC_ERROR_REF(error));
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
+ connected_subchannel,
+ GRPC_ERROR_REF(error));
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = closure->next_data.next) {
@@ -824,7 +823,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
}
}
- gpr_mu_unlock(&chand->mu);
GPR_TIMER_END("pick_subchannel", 0);
GRPC_ERROR_UNREF(error);
return true;
@@ -833,7 +831,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
- gpr_mu_unlock(&chand->mu);
// If the application explicitly set wait_for_ready, use that.
// Otherwise, if the service config specified a value for this
// method, use that.
@@ -853,7 +850,7 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_lb_policy_pick_args inputs = {
initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
gpr_inf_future(GPR_CLOCK_MONOTONIC)};
- const bool result = grpc_lb_policy_pick(
+ const bool result = grpc_lb_policy_pick_locked(
exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
@@ -862,8 +859,9 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
}
if (chand->resolver != NULL) {
cpa = gpr_malloc(sizeof(*cpa));
@@ -872,88 +870,66 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
- grpc_closure_init(&cpa->closure, continue_picking, cpa,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&cpa->closure, continue_picking_locked, cpa,
+ grpc_combiner_scheduler(chand->combiner, true));
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
}
- gpr_mu_unlock(&chand->mu);
GPR_TIMER_END("pick_subchannel", 0);
return false;
}
-// The logic here is fairly complicated, due to (a) the fact that we
-// need to handle the case where we receive the send op before the
-// initial metadata op, and (b) the need for efficiency, especially in
-// the streaming case.
-// TODO(ctiller): Explain this more thoroughly.
-static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
- call_data *calld = elem->call_data;
+static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
+ grpc_transport_stream_op *op,
+ grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
- /* try to (atomically) get the call */
- grpc_subchannel_call *call = GET_CALL(calld);
- GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
- if (call == CANCELLED_CALL) {
- grpc_transport_stream_op_finish_with_failure(
- exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
- GPR_TIMER_END("cc_start_transport_stream_op", 0);
- return;
- }
- if (call != NULL) {
- grpc_subchannel_call_process_op(exec_ctx, call, op);
- GPR_TIMER_END("cc_start_transport_stream_op", 0);
- return;
- }
- /* we failed; lock and figure out what to do */
- gpr_mu_lock(&calld->mu);
-retry:
+ call_data *calld = elem->call_data;
+ grpc_subchannel_call *call;
+
/* need to recheck that another thread hasn't set the call */
call = GET_CALL(calld);
if (call == CANCELLED_CALL) {
- gpr_mu_unlock(&calld->mu);
grpc_transport_stream_op_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
- GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ /* early out */
return;
}
if (call != NULL) {
- gpr_mu_unlock(&calld->mu);
grpc_subchannel_call_process_op(exec_ctx, call, op);
- GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ /* early out */
return;
}
/* if this is a cancellation, then we can raise our cancelled flag */
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
(gpr_atm)(uintptr_t)CANCELLED_CALL)) {
- goto retry;
+ /* recurse to retry */
+ start_transport_stream_op_locked_inner(exec_ctx, op, elem);
+ /* early out */
+ return;
} else {
- // Stash a copy of cancel_error in our call data, so that we can use
- // it for subsequent operations. This ensures that if the call is
- // cancelled before any ops are passed down (e.g., if the deadline
- // is in the past when the call starts), we can return the right
- // error to the caller when the first op does get passed down.
+ /* Stash a copy of cancel_error in our call data, so that we can use
+ it for subsequent operations. This ensures that if the call is
+ cancelled before any ops are passed down (e.g., if the deadline
+ is in the past when the call starts), we can return the right
+ error to the caller when the first op does get passed down. */
calld->cancel_error = GRPC_ERROR_REF(op->cancel_error);
switch (calld->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
- pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
- NULL, GRPC_ERROR_REF(op->cancel_error));
+ pick_subchannel_locked(exec_ctx, elem, NULL, 0,
+ &calld->connected_subchannel, NULL,
+ GRPC_ERROR_REF(op->cancel_error));
break;
}
- gpr_mu_unlock(&calld->mu);
grpc_transport_stream_op_finish_with_failure(
exec_ctx, op, GRPC_ERROR_REF(op->cancel_error));
- GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ /* early out */
return;
}
}
@@ -962,16 +938,16 @@ retry:
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
- grpc_closure_init(&calld->next_step, subchannel_ready, elem,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem,
+ grpc_combiner_scheduler(chand->combiner, true));
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
/* If a subchannel is not available immediately, the polling entity from
call_data should be provided to channel_data's interested_parties, so
that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
- op->send_initial_metadata_flags,
- &calld->connected_subchannel, &calld->next_step,
- GRPC_ERROR_NONE)) {
+ if (pick_subchannel_locked(exec_ctx, elem, op->send_initial_metadata,
+ op->send_initial_metadata_flags,
+ &calld->connected_subchannel, &calld->next_step,
+ GRPC_ERROR_NONE)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
} else {
@@ -994,31 +970,89 @@ retry:
gpr_atm_rel_store(&calld->subchannel_call,
(gpr_atm)(uintptr_t)subchannel_call);
retry_waiting_locked(exec_ctx, calld);
- goto retry;
+ /* recurse to retry */
+ start_transport_stream_op_locked_inner(exec_ctx, op, elem);
+ /* early out */
+ return;
}
/* nothing to be done but wait */
add_waiting_locked(calld, op);
- gpr_mu_unlock(&calld->mu);
+}
+
+static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op_locked", 0);
+
+ grpc_transport_stream_op *op = arg;
+ grpc_call_element *elem = op->handler_private.args[0];
+ call_data *calld = elem->call_data;
+
+ start_transport_stream_op_locked_inner(exec_ctx, op, elem);
+
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "start_transport_stream_op");
+ GPR_TIMER_END("cc_start_transport_stream_op_locked", 0);
+}
+
+/* The logic here is fairly complicated, due to (a) the fact that we
+ need to handle the case where we receive the send op before the
+ initial metadata op, and (b) the need for efficiency, especially in
+ the streaming case.
+
+ We use double-checked locking to initially see if initialization has been
+ performed. If it has not, we acquire the combiner and perform initialization.
+ If it has, we proceed on the fast path. */
+static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
+ grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
+ /* try to (atomically) get the call */
+ grpc_subchannel_call *call = GET_CALL(calld);
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
+ if (call == CANCELLED_CALL) {
+ grpc_transport_stream_op_finish_with_failure(
+ exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ /* early out */
+ return;
+ }
+ if (call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, call, op);
+ GPR_TIMER_END("cc_start_transport_stream_op", 0);
+ /* early out */
+ return;
+ }
+ /* we failed; lock and figure out what to do */
+ GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op");
+ op->handler_private.args[0] = elem;
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(&op->handler_private.closure,
+ cc_start_transport_stream_op_locked, op,
+ grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
// Gets data from the service config. Invoked when the resolver returns
// its initial result.
-static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If this is an error, there's no point in looking at the service config.
if (error == GRPC_ERROR_NONE) {
// Get the method config table from channel data.
- gpr_mu_lock(&chand->mu);
grpc_slice_hash_table *method_params_table = NULL;
if (chand->method_params_table != NULL) {
method_params_table =
grpc_slice_hash_table_ref(chand->method_params_table);
}
- gpr_mu_unlock(&chand->mu);
// If the method config table was present, use it.
if (method_params_table != NULL) {
const method_parameters *method_params = grpc_method_config_table_get(
@@ -1028,7 +1062,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0;
if (have_method_timeout ||
method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
- gpr_mu_lock(&calld->mu);
if (have_method_timeout) {
const gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, method_params->timeout);
@@ -1042,7 +1075,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
calld->wait_for_ready_from_service_config =
method_params->wait_for_ready;
}
- gpr_mu_unlock(&calld->mu);
}
}
grpc_slice_hash_table_unref(exec_ctx, method_params_table);
@@ -1051,43 +1083,25 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
}
-/* Constructor for call_data */
-static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_call_element_args *args) {
+static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error_ignored) {
+ grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- // Initialize data members.
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
- calld->path = grpc_slice_ref_internal(args->path);
- calld->call_start_time = args->start_time;
- calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
- calld->cancel_error = GRPC_ERROR_NONE;
- gpr_atm_rel_store(&calld->subchannel_call, 0);
- gpr_mu_init(&calld->mu);
- calld->connected_subchannel = NULL;
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
- calld->owning_call = args->call_stack;
- calld->pollent = NULL;
// If the resolver has already returned results, then we can access
// the service config parameters immediately. Otherwise, we need to
// defer that work until the resolver returns an initial result.
// TODO(roth): This code is almost but not quite identical to the code
// in read_service_config() above. It would be nice to find a way to
// combine them, to avoid having to maintain it twice.
- gpr_mu_lock(&chand->mu);
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
if (chand->method_params_table != NULL) {
grpc_slice_hash_table *method_params_table =
grpc_slice_hash_table_ref(chand->method_params_table);
- gpr_mu_unlock(&chand->mu);
method_parameters *method_params = grpc_method_config_table_get(
- exec_ctx, method_params_table, args->path);
+ exec_ctx, method_params_table, calld->path);
if (method_params != NULL) {
if (gpr_time_cmp(method_params->timeout,
gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
@@ -1101,24 +1115,53 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
}
}
grpc_slice_hash_table_unref(exec_ctx, method_params_table);
- } else {
- gpr_mu_unlock(&chand->mu);
}
} else {
// We don't yet have a resolver result, so register a callback to
// get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
- grpc_closure_init(&calld->read_service_config, read_service_config, elem,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&calld->read_service_config, read_service_config_locked,
+ elem, grpc_combiner_scheduler(chand->combiner, false));
grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE);
- gpr_mu_unlock(&chand->mu);
}
// Start the deadline timer with the current deadline value. If we
// do not yet have service config data, then the timer may be reset
// later.
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "initial_read_service_config");
+}
+
+/* Constructor for call_data */
+static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_call_element_args *args) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ // Initialize data members.
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+ calld->path = grpc_slice_ref_internal(args->path);
+ calld->call_start_time = args->start_time;
+ calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
+ calld->cancel_error = GRPC_ERROR_NONE;
+ gpr_atm_rel_store(&calld->subchannel_call, 0);
+ calld->connected_subchannel = NULL;
+ calld->waiting_ops = NULL;
+ calld->waiting_ops_count = 0;
+ calld->waiting_ops_capacity = 0;
+ calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
+ calld->owning_call = args->call_stack;
+ calld->pollent = NULL;
+ GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config");
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(&calld->read_service_config,
+ initial_read_service_config_locked, elem,
+ grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
return GRPC_ERROR_NONE;
}
@@ -1136,7 +1179,6 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
- gpr_mu_destroy(&calld->mu);
GPR_ASSERT(calld->waiting_ops_count == 0);
if (calld->connected_subchannel != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
@@ -1172,26 +1214,37 @@ const grpc_channel_filter grpc_client_channel_filter = {
"client-channel",
};
+static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ channel_data *chand = arg;
+ if (chand->lb_policy != NULL) {
+ grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
+ } else {
+ chand->exit_idle_when_lb_policy_arrives = true;
+ if (!chand->started_resolving && chand->resolver != NULL) {
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
+ chand->started_resolving = true;
+ grpc_resolver_next_locked(exec_ctx, chand->resolver,
+ &chand->resolver_result,
+ &chand->on_resolver_result_changed);
+ }
+ }
+ GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");
+}
+
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
- grpc_connectivity_state out;
- gpr_mu_lock(&chand->mu);
- out = grpc_connectivity_state_check(&chand->state_tracker, NULL);
+ grpc_connectivity_state out =
+ grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
- if (chand->lb_policy != NULL) {
- grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
- } else {
- chand->exit_idle_when_lb_policy_arrives = true;
- if (!chand->started_resolving && chand->resolver != NULL) {
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
- chand->started_resolving = true;
- grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
- &chand->on_resolver_result_changed);
- }
- }
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(try_to_connect_locked, chand,
+ grpc_combiner_scheduler(chand->combiner, false)),
+ GRPC_ERROR_NONE);
}
- gpr_mu_unlock(&chand->mu);
return out;
}
@@ -1199,6 +1252,7 @@ typedef struct {
channel_data *chand;
grpc_pollset *pollset;
grpc_closure *on_complete;
+ grpc_connectivity_state *state;
grpc_closure my_closure;
} external_connectivity_watcher;
@@ -1211,7 +1265,16 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
"external_connectivity_watcher");
gpr_free(w);
- follow_up->cb(exec_ctx, follow_up->cb_arg, error);
+ grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+}
+
+static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error_ignored) {
+ external_connectivity_watcher *w = arg;
+ grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
+ grpc_schedule_on_exec_ctx);
+ grpc_connectivity_state_notify_on_state_change(
+ exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
}
void grpc_client_channel_watch_connectivity_state(
@@ -1222,13 +1285,13 @@ void grpc_client_channel_watch_connectivity_state(
w->chand = chand;
w->pollset = pollset;
w->on_complete = on_complete;
+ w->state = state;
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
- grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
- grpc_schedule_on_exec_ctx);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");
- gpr_mu_lock(&chand->mu);
- grpc_connectivity_state_notify_on_state_change(
- exec_ctx, &chand->state_tracker, state, &w->my_closure);
- gpr_mu_unlock(&chand->mu);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w,
+ grpc_combiner_scheduler(chand->combiner, true)),
+ GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/client_channel/lb_policy.c b/src/core/ext/client_channel/lb_policy.c
index 45ee72e2f0..aba51add53 100644
--- a/src/core/ext/client_channel/lb_policy.c
+++ b/src/core/ext/client_channel/lb_policy.c
@@ -32,14 +32,17 @@
*/
#include "src/core/ext/client_channel/lb_policy.h"
+#include "src/core/lib/iomgr/combiner.h"
#define WEAK_REF_BITS 16
void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable) {
+ const grpc_lb_policy_vtable *vtable,
+ grpc_combiner *combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
+ policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy");
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
@@ -71,6 +74,13 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_lb_policy *policy = arg;
+ policy->vtable->shutdown_locked(exec_ctx, policy);
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
+}
+
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
@@ -79,10 +89,15 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
- policy->vtable->shutdown(exec_ctx, policy);
+ grpc_closure_sched(
+ exec_ctx,
+ grpc_closure_create(shutdown_locked, policy,
+ grpc_combiner_scheduler(policy->combiner, false)),
+ GRPC_ERROR_NONE);
+ } else {
+ grpc_lb_policy_weak_unref(exec_ctx,
+ policy REF_FUNC_PASS_ARGS("strong-unref"));
}
- grpc_lb_policy_weak_unref(exec_ctx,
- policy REF_FUNC_PASS_ARGS("strong-unref"));
}
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
@@ -94,53 +109,59 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
- grpc_pollset_set_destroy(policy->interested_parties);
+ grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
+ grpc_combiner *combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
+ GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
-int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
- return policy->vtable->pick(exec_ctx, policy, pick_args, target, user_data,
- on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target,
+ void **user_data, grpc_closure *on_complete) {
+ return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
+ user_data, on_complete);
}
-void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- policy->vtable->cancel_pick(exec_ctx, policy, target, error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
+ policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
-void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- policy->vtable->cancel_picks(exec_ctx, policy, initial_metadata_flags_mask,
- initial_metadata_flags_eq, error);
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
+ policy->vtable->cancel_picks_locked(exec_ctx, policy,
+ initial_metadata_flags_mask,
+ initial_metadata_flags_eq, error);
}
-void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
- policy->vtable->exit_idle(exec_ctx, policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy) {
+ policy->vtable->exit_idle_locked(exec_ctx, policy);
}
-void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure) {
- policy->vtable->ping_one(exec_ctx, policy, closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_closure *closure) {
+ policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
-void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure) {
- policy->vtable->notify_on_state_change(exec_ctx, policy, state, closure);
+void grpc_lb_policy_notify_on_state_change_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connectivity_state *state, grpc_closure *closure) {
+ policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
+ closure);
}
-grpc_connectivity_state grpc_lb_policy_check_connectivity(
+grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error) {
- return policy->vtable->check_connectivity(exec_ctx, policy,
- connectivity_error);
+ return policy->vtable->check_connectivity_locked(exec_ctx, policy,
+ connectivity_error);
}
diff --git a/src/core/ext/client_channel/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 120c641edc..3405709c2c 100644
--- a/src/core/ext/client_channel/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -51,6 +51,8 @@ struct grpc_lb_policy {
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set *interested_parties;
+ /* combiner under which lb_policy actions take place */
+ grpc_combiner *combiner;
};
/** Extra arguments for an LB pick */
@@ -69,42 +71,44 @@ typedef struct grpc_lb_policy_pick_args {
struct grpc_lb_policy_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** \see grpc_lb_policy_pick */
- int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete);
+ int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete);
/** \see grpc_lb_policy_cancel_pick */
- void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target, grpc_error *error);
+ void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error);
/** \see grpc_lb_policy_cancel_picks */
- void (*cancel_picks)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq, grpc_error *error);
+ void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error);
/** \see grpc_lb_policy_ping_one */
- void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+ void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_closure *closure);
/** Try to enter a READY connectivity state */
- void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** check the current connectivity of the lb_policy */
- grpc_connectivity_state (*check_connectivity)(
+ grpc_connectivity_state (*check_connectivity_locked)(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
- void (*notify_on_state_change)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+ void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connectivity_state *state,
+ grpc_closure *closure);
};
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
@@ -144,7 +148,8 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable);
+ const grpc_lb_policy_vtable *vtable,
+ grpc_combiner *combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@@ -159,43 +164,45 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target,
+ void **user_data, grpc_closure *on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_closure *closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
-void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ grpc_connected_subchannel **target,
+ grpc_error *error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error);
/** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
-void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+void grpc_lb_policy_notify_on_state_change_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ grpc_connectivity_state *state, grpc_closure *closure);
-grpc_connectivity_state grpc_lb_policy_check_connectivity(
+grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
diff --git a/src/core/ext/client_channel/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h
index 9b8b03f982..27c12c0d73 100644
--- a/src/core/ext/client_channel/lb_policy_factory.h
+++ b/src/core/ext/client_channel/lb_policy_factory.h
@@ -107,6 +107,7 @@ grpc_arg grpc_lb_addresses_create_channel_arg(
typedef struct grpc_lb_policy_args {
grpc_client_channel_factory *client_channel_factory;
grpc_channel_args *args;
+ grpc_combiner *combiner;
} grpc_lb_policy_args;
struct grpc_lb_policy_factory_vtable {
diff --git a/src/core/ext/client_channel/resolver.c b/src/core/ext/client_channel/resolver.c
index 2ae4fe862e..b1a1faa6c9 100644
--- a/src/core/ext/client_channel/resolver.c
+++ b/src/core/ext/client_channel/resolver.c
@@ -32,10 +32,13 @@
*/
#include "src/core/ext/client_channel/resolver.h"
+#include "src/core/lib/iomgr/combiner.h"
void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable) {
+ const grpc_resolver_vtable *vtable,
+ grpc_combiner *combiner) {
resolver->vtable = vtable;
+ resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
@@ -62,20 +65,24 @@ void grpc_resolver_unref(grpc_resolver *resolver,
void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
+ grpc_combiner *combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
+ GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
-void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
- resolver->vtable->shutdown(exec_ctx, resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
-void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- resolver->vtable->channel_saw_error(exec_ctx, resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
-void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete) {
- resolver->vtable->next(exec_ctx, resolver, result, on_complete);
+void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result,
+ grpc_closure *on_complete) {
+ resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/client_channel/resolver.h b/src/core/ext/client_channel/resolver.h
index 96ece92b9d..bbba424ca5 100644
--- a/src/core/ext/client_channel/resolver.h
+++ b/src/core/ext/client_channel/resolver.h
@@ -44,14 +44,16 @@ typedef struct grpc_resolver_vtable grpc_resolver_vtable;
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
gpr_refcount refs;
+ grpc_combiner *combiner;
};
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+ void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
+ void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
@@ -70,21 +72,30 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
#endif
void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable);
+ const grpc_resolver_vtable *vtable,
+ grpc_combiner *combiner);
-void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
- Can be used as a hint that re-resolution is desirable soon. */
-void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+ Can be used as a hint that re-resolution is desirable soon.
+
+ Must be called from the combiner passed as a resolver_arg at construction
+ time.*/
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
If resolution is fatally broken, set \a *result to NULL and
- schedule \a on_complete. */
-void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ schedule \a on_complete.
+
+ Must be called from the combiner passed as a resolver_arg at construction
+ time.*/
+void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **result,
+ grpc_closure *on_complete);
#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/client_channel/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h
index 3792ddca18..e3cd99ec5a 100644
--- a/src/core/ext/client_channel/resolver_factory.h
+++ b/src/core/ext/client_channel/resolver_factory.h
@@ -50,6 +50,7 @@ typedef struct grpc_resolver_args {
grpc_uri *uri;
const grpc_channel_args *args;
grpc_pollset_set *pollset_set;
+ grpc_combiner *combiner;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index 8e716e8e7e..1b93d0b3c9 100644
--- a/src/core/ext/client_channel/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -132,7 +132,8 @@ static grpc_resolver_factory *resolve_factory(const char *target,
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
- grpc_pollset_set *pollset_set) {
+ grpc_pollset_set *pollset_set,
+ grpc_combiner *combiner) {
grpc_uri *uri = NULL;
char *canonical_target = NULL;
grpc_resolver_factory *factory =
@@ -143,6 +144,7 @@ grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
resolver_args.uri = uri;
resolver_args.args = args;
resolver_args.pollset_set = pollset_set;
+ resolver_args.combiner = combiner;
resolver =
grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args);
grpc_uri_destroy(uri);
diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index a4606463eb..e2c189cf0c 100644
--- a/src/core/ext/client_channel/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -65,7 +65,8 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
should not be NULL. */
grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
- grpc_pollset_set *pollset_set);
+ grpc_pollset_set *pollset_set,
+ grpc_combiner *combiner);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index aa036e883b..f2da148e49 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -217,7 +217,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_slice_unref_internal(exec_ctx, c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
- grpc_pollset_set_destroy(c->pollset_set);
+ grpc_pollset_set_destroy(exec_ctx, c->pollset_set);
grpc_subchannel_key_destroy(exec_ctx, c->key);
gpr_free(c);
}
@@ -419,7 +419,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
grpc_error **error) {
grpc_connectivity_state state;
gpr_mu_lock(&c->mu);
- state = grpc_connectivity_state_check(&c->state_tracker, error);
+ state = grpc_connectivity_state_get(&c->state_tracker, error);
gpr_mu_unlock(&c->mu);
return state;
}
@@ -438,7 +438,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&w->subchannel->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
gpr_free(w);
- follow_up->cb(exec_ctx, follow_up->cb_arg, error);
+ grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index ab62e5ed6a..b5210cb046 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -115,6 +115,7 @@
#include "src/core/ext/lb_policy/grpclb/grpclb_channel.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
@@ -285,9 +286,6 @@ typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- /** mutex protecting remaining members */
- gpr_mu mu;
-
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
@@ -492,9 +490,8 @@ static grpc_lb_addresses *process_serverlist_locked(
static bool update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state new_rr_state, grpc_error *new_rr_state_error) {
- grpc_error *curr_state_error;
- const grpc_connectivity_state curr_glb_state = grpc_connectivity_state_check(
- &glb_policy->state_tracker, &curr_state_error);
+ const grpc_connectivity_state curr_glb_state =
+ grpc_connectivity_state_check(&glb_policy->state_tracker);
/* The new connectivity status is a function of the previous one and the new
* input coming from the status of the RR policy.
@@ -558,9 +555,9 @@ static bool pick_from_internal_rr_locked(
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
GPR_ASSERT(rr_policy != NULL);
- const bool pick_done =
- grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
- (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ const bool pick_done = grpc_lb_policy_pick_locked(
+ exec_ctx, rr_policy, pick_args, target, (void **)&wc_arg->lb_token,
+ &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (grpc_lb_glb_trace) {
@@ -591,6 +588,7 @@ static grpc_lb_policy *create_rr_locked(
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
+ args.combiner = glb_policy->base.combiner;
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, serverlist);
@@ -609,8 +607,8 @@ static grpc_lb_policy *create_rr_locked(
return rr;
}
-static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error);
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
@@ -634,8 +632,8 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
grpc_error *new_rr_state_error = NULL;
const grpc_connectivity_state new_rr_state =
- grpc_lb_policy_check_connectivity(exec_ctx, new_rr_policy,
- &new_rr_state_error);
+ grpc_lb_policy_check_connectivity_locked(exec_ctx, new_rr_policy,
+ &new_rr_state_error);
/* Connectivity state is a function of the new RR policy just created */
const bool replace_old_rr = update_lb_connectivity_status_locked(
exec_ctx, glb_policy, new_rr_state, new_rr_state_error);
@@ -678,17 +676,18 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
rr_connectivity_data *rr_connectivity =
gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
- grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
- rr_connectivity, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&rr_connectivity->on_change,
+ glb_rr_connectivity_changed_locked, rr_connectivity,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = new_rr_state;
/* Subscribe to changes to the connectivity of the new RR */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
- grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
+ grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+ &rr_connectivity->state,
+ &rr_connectivity->on_change);
+ grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
/* Update picks and pings in wait */
pending_pick *pp;
@@ -714,17 +713,16 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
- grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
- &pping->wrapped_notify_arg.wrapper_closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
+ &pping->wrapped_notify_arg.wrapper_closure);
}
}
-static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
rr_connectivity_data *rr_connectivity = arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
- gpr_mu_lock(&glb_policy->mu);
const bool shutting_down = glb_policy->shutting_down;
bool unref_needed = false;
GRPC_ERROR_REF(error);
@@ -741,11 +739,10 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
update_lb_connectivity_status_locked(exec_ctx, glb_policy,
rr_connectivity->state, error);
/* Resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
- grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
- &rr_connectivity->state,
- &rr_connectivity->on_change);
+ grpc_lb_policy_notify_on_state_change_locked(
+ exec_ctx, glb_policy->rr_policy, &rr_connectivity->state,
+ &rr_connectivity->on_change);
}
- gpr_mu_unlock(&glb_policy->mu);
if (unref_needed) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb");
@@ -900,8 +897,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
gpr_free(glb_policy);
return NULL;
}
- grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
- gpr_mu_init(&glb_policy->mu);
+ grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
@@ -919,13 +915,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- gpr_mu_destroy(&glb_policy->mu);
gpr_free(glb_policy);
}
-static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
@@ -942,7 +936,6 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
grpc_call *lb_call = glb_policy->lb_call;
- gpr_mu_unlock(&glb_policy->mu);
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
@@ -968,11 +961,10 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
-static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -988,16 +980,15 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
-static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@@ -1013,7 +1004,6 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_ERROR_UNREF(error);
}
@@ -1026,19 +1016,17 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
query_for_backends_locked(exec_ctx, glb_policy);
}
-static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
- gpr_mu_unlock(&glb_policy->mu);
}
-static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
grpc_closure_sched(
@@ -1049,7 +1037,6 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
glb_policy->deadline = pick_args->deadline;
bool pick_done;
@@ -1088,53 +1075,43 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pick_done = false;
}
- gpr_mu_unlock(&glb_policy->mu);
return pick_done;
}
-static grpc_connectivity_state glb_check_connectivity(
+static grpc_connectivity_state glb_check_connectivity_locked(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_error **connectivity_error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&glb_policy->mu);
- st = grpc_connectivity_state_check(&glb_policy->state_tracker,
+ return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
- gpr_mu_unlock(&glb_policy->mu);
- return st;
}
-static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
if (glb_policy->rr_policy) {
- grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy, closure);
+ grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
}
- gpr_mu_unlock(&glb_policy->mu);
}
-static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- gpr_mu_lock(&glb_policy->mu);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &glb_policy->state_tracker, current, notify);
-
- gpr_mu_unlock(&glb_policy->mu);
}
-static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error);
+static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
@@ -1163,11 +1140,11 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_grpclb_request_destroy(request);
grpc_closure_init(&glb_policy->lb_on_server_status_received,
- lb_on_server_status_received, glb_policy,
- grpc_schedule_on_exec_ctx);
+ lb_on_server_status_received_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_closure_init(&glb_policy->lb_on_response_received,
- lb_on_response_received, glb_policy,
- grpc_schedule_on_exec_ctx);
+ lb_on_response_received_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner, false));
gpr_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
@@ -1262,14 +1239,13 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
- gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@@ -1343,20 +1319,17 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
- gpr_mu_unlock(&glb_policy->mu);
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload");
}
}
-static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
glb_lb_policy *glb_policy = arg;
- gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->shutting_down) {
if (grpc_lb_glb_trace) {
@@ -1366,15 +1339,13 @@ static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer");
}
-static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
- gpr_mu_lock(&glb_policy->mu);
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1409,21 +1380,27 @@ static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- grpc_closure_init(&glb_policy->lb_on_call_retry, lb_call_on_retry_timer,
- glb_policy, grpc_schedule_on_exec_ctx);
+ grpc_closure_init(
+ &glb_policy->lb_on_call_retry, lb_call_on_retry_timer_locked,
+ glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner, false));
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
&glb_policy->lb_on_call_retry, now);
}
- gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */
static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
- glb_destroy, glb_shutdown, glb_pick,
- glb_cancel_pick, glb_cancel_picks, glb_ping_one,
- glb_exit_idle, glb_check_connectivity, glb_notify_on_state_change};
+ glb_destroy,
+ glb_shutdown_locked,
+ glb_pick_locked,
+ glb_cancel_pick_locked,
+ glb_cancel_picks_locked,
+ glb_ping_one_locked,
+ glb_exit_idle_locked,
+ glb_check_connectivity_locked,
+ glb_notify_on_state_change_locked};
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 9f2aa461be..501cb6d94d 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -38,6 +38,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -57,11 +58,11 @@ typedef struct {
grpc_closure connectivity_changed;
- /** the selected channel (a grpc_connected_subchannel) */
- gpr_atm selected;
+ /** remaining members are protected by the combiner */
+
+ /** the selected channel */
+ grpc_connected_subchannel *selected;
- /** mutex protecting remaining members */
- gpr_mu mu;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@@ -77,32 +78,24 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-#define GET_SELECTED(p) \
- ((grpc_connected_subchannel *)gpr_atm_acq_load(&(p)->selected))
-
static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connected_subchannel *selected = GET_SELECTED(p);
size_t i;
GPR_ASSERT(p->pending_picks == NULL);
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
- if (selected != NULL) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, selected, "picked_first");
+ if (p->selected != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
- gpr_mu_destroy(&p->mu);
gpr_free(p);
}
-static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- grpc_connected_subchannel *selected;
- gpr_mu_lock(&p->mu);
- selected = GET_SELECTED(p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
@@ -110,15 +103,14 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
/* cancel subscription */
- if (selected != NULL) {
+ if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
+ exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else if (p->num_subchannels > 0) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
- gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
@@ -128,12 +120,11 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
}
-static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -150,17 +141,15 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -177,7 +166,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
@@ -192,63 +180,48 @@ static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
&p->connectivity_changed);
}
-static void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- gpr_mu_unlock(&p->mu);
}
-static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
/* Check atomically for a selected channel */
- grpc_connected_subchannel *selected = GET_SELECTED(p);
- if (selected != NULL) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
+ if (p->selected != NULL) {
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
return 1;
}
- /* No subchannel selected yet, so acquire lock and then attempt again */
- gpr_mu_lock(&p->mu);
- selected = GET_SELECTED(p);
- if (selected) {
- gpr_mu_unlock(&p->mu);
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
- return 1;
- } else {
- if (!p->started_picking) {
- start_picking(exec_ctx, p);
- }
- pp = gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->on_complete = on_complete;
- p->pending_picks = pp;
- gpr_mu_unlock(&p->mu);
- return 0;
+ /* No subchannel selected yet, so try again */
+ if (!p->started_picking) {
+ start_picking(exec_ctx, p);
}
+ pp = gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->target = target;
+ pp->initial_metadata_flags = pick_args->initial_metadata_flags;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ return 0;
}
-static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- pick_first_lb_policy *p = arg;
+static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
size_t i;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
- gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
@@ -258,25 +231,19 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(subchannels);
}
-static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
- grpc_connected_subchannel *selected;
GRPC_ERROR_REF(error);
- gpr_mu_lock(&p->mu);
-
- selected = GET_SELECTED(p);
-
if (p->shutdown) {
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_ERROR_UNREF(error);
return;
- } else if (selected != NULL) {
+ } else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
@@ -286,7 +253,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, p->base.interested_parties,
+ exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
@@ -301,26 +268,21 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
"connecting_ready");
selected_subchannel = p->subchannels[p->checking_subchannel];
- selected =
- grpc_subchannel_get_connected_subchannel(selected_subchannel);
- GPR_ASSERT(selected != NULL);
- GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
+ p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected_subchannel),
+ "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
- gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
- grpc_closure_sched(exec_ctx,
- grpc_closure_create(destroy_subchannels, p,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ destroy_subchannels_locked(exec_ctx, p);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, selected, p->base.interested_parties,
+ exec_ctx, p->selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
@@ -387,48 +349,44 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
}
- gpr_mu_unlock(&p->mu);
-
GRPC_ERROR_UNREF(error);
}
-static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_error **error) {
+static grpc_connectivity_state pf_check_connectivity_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&p->mu);
- st = grpc_connectivity_state_check(&p->state_tracker, error);
- gpr_mu_unlock(&p->mu);
- return st;
+ return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
- gpr_mu_unlock(&p->mu);
}
-static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connected_subchannel *selected = GET_SELECTED(p);
- if (selected) {
- grpc_connected_subchannel_ping(exec_ctx, selected, closure);
+ if (p->selected) {
+ grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
}
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
- pf_destroy, pf_shutdown, pf_pick,
- pf_cancel_pick, pf_cancel_picks, pf_ping_one,
- pf_exit_idle, pf_check_connectivity, pf_notify_on_state_change};
+ pf_destroy,
+ pf_shutdown_locked,
+ pf_pick_locked,
+ pf_cancel_pick_locked,
+ pf_cancel_picks_locked,
+ pf_ping_one_locked,
+ pf_exit_idle_locked,
+ pf_check_connectivity_locked,
+ pf_notify_on_state_change_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
@@ -489,10 +447,9 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
}
p->num_subchannels = subchannel_idx;
- grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
- grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
- grpc_schedule_on_exec_ctx);
- gpr_mu_init(&p->mu);
+ grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
+ grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed_locked, p,
+ grpc_combiner_scheduler(args->combiner, false));
return &p->base;
}
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 3e060d189a..687df170ad 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -67,6 +67,7 @@
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -134,7 +135,6 @@ typedef struct {
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- gpr_mu mu;
/** total number of addresses received at creation time */
size_t num_addresses;
@@ -293,7 +293,6 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
- gpr_mu_destroy(&p->mu);
elem = p->ready_list.next;
while (elem != NULL && elem != &p->ready_list) {
@@ -309,12 +308,11 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
-static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
size_t i;
- gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
@@ -335,15 +333,13 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
- gpr_mu_unlock(&p->mu);
}
-static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -360,17 +356,15 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
- gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
@@ -388,11 +382,11 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
pp = next;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
@@ -411,23 +405,20 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
}
}
-static void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
if (!p->started_picking) {
- start_picking(exec_ctx, p);
+ start_picking_locked(exec_ctx, p);
}
- gpr_mu_unlock(&p->mu);
}
-static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target, void **user_data,
- grpc_closure *on_complete) {
+static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, void **user_data,
+ grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
- gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
@@ -449,12 +440,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
- gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
- start_picking(exec_ctx, p);
+ start_picking_locked(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
@@ -463,7 +453,6 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->user_data = user_data;
p->pending_picks = pp;
- gpr_mu_unlock(&p->mu);
return 0;
}
}
@@ -538,17 +527,15 @@ static grpc_connectivity_state update_lb_connectivity_status(
return sd->curr_connectivity_state;
}
-static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
GRPC_ERROR_REF(error);
- gpr_mu_lock(&p->mu);
if (p->shutdown) {
- gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
GRPC_ERROR_UNREF(error);
return;
@@ -645,56 +632,51 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
break;
}
- gpr_mu_unlock(&p->mu);
GRPC_ERROR_UNREF(error);
}
-static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_error **error) {
+static grpc_connectivity_state rr_check_connectivity_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- grpc_connectivity_state st;
- gpr_mu_lock(&p->mu);
- st = grpc_connectivity_state_check(&p->state_tracker, error);
- gpr_mu_unlock(&p->mu);
- return st;
+ return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
+static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- gpr_mu_lock(&p->mu);
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
- gpr_mu_unlock(&p->mu);
}
-static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
+static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *selected;
grpc_connected_subchannel *target;
- gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
- gpr_mu_unlock(&p->mu);
target = GRPC_CONNECTED_SUBCHANNEL_REF(
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"rr_picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
- gpr_mu_unlock(&p->mu);
grpc_closure_sched(exec_ctx, closure,
GRPC_ERROR_CREATE("Round Robin not connected"));
}
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
- rr_destroy, rr_shutdown, rr_pick,
- rr_cancel_pick, rr_cancel_picks, rr_ping_one,
- rr_exit_idle, rr_check_connectivity, rr_notify_on_state_change};
+ rr_destroy,
+ rr_shutdown_locked,
+ rr_pick_locked,
+ rr_cancel_pick_locked,
+ rr_cancel_picks_locked,
+ rr_ping_one_locked,
+ rr_exit_idle_locked,
+ rr_check_connectivity_locked,
+ rr_notify_on_state_change_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@@ -762,7 +744,8 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
}
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
- rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
+ rr_connectivity_changed_locked, sd,
+ grpc_combiner_scheduler(args->combiner, false));
}
}
if (subchannel_idx == 0) {
@@ -779,7 +762,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
- grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
+ grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
@@ -787,7 +770,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
(void *)p, (unsigned long)p->num_subchannels);
}
- gpr_mu_init(&p->mu);
return &p->base;
}
diff --git a/src/core/ext/load_reporting/load_reporting.c b/src/core/ext/load_reporting/load_reporting.c
index 37b06a737f..942aea4fd1 100644
--- a/src/core/ext/load_reporting/load_reporting.c
+++ b/src/core/ext/load_reporting/load_reporting.c
@@ -34,14 +34,34 @@
#include <limits.h>
#include <string.h>
+#include <grpc/load_reporting.h>
#include <grpc/support/alloc.h>
#include <grpc/support/sync.h>
#include "src/core/ext/load_reporting/load_reporting.h"
#include "src/core/ext/load_reporting/load_reporting_filter.h"
#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
+static void destroy_lr_cost_context(void *c) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_load_reporting_cost_context *cost_ctx = c;
+ for (size_t i = 0; i < cost_ctx->values_count; ++i) {
+ grpc_slice_unref_internal(&exec_ctx, cost_ctx->values[i]);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ gpr_free(cost_ctx->values);
+ gpr_free(cost_ctx);
+}
+
+void grpc_call_set_load_reporting_cost_context(
+ grpc_call *call, grpc_load_reporting_cost_context *ctx) {
+ grpc_call_context_set(call, GRPC_CONTEXT_LR_COST, ctx,
+ destroy_lr_cost_context);
+}
+
static bool is_load_reporting_enabled(const grpc_channel_args *a) {
if (a == NULL) return false;
for (size_t i = 0; i < a->num_args; i++) {
diff --git a/src/core/ext/load_reporting/load_reporting.h b/src/core/ext/load_reporting/load_reporting.h
index a157844734..22859a599a 100644
--- a/src/core/ext/load_reporting/load_reporting.h
+++ b/src/core/ext/load_reporting/load_reporting.h
@@ -35,23 +35,8 @@
#define GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_H
#include <grpc/impl/codegen/grpc_types.h>
-#include "src/core/lib/channel/channel_stack.h"
-
-/** Metadata key for the gRPC LB load balancer token.
- *
- * The value corresponding to this key is an opaque token that is given to the
- * frontend as part of each pick; the frontend sends this token to the backend
- * in each request it sends when using that pick. The token is used by the
- * backend to verify the request and to allow the backend to report load to the
- * gRPC LB system. */
-#define GRPC_LB_TOKEN_MD_KEY "lb-token"
-/** Metadata key for gRPC LB cost reporting.
- *
- * The value corresponding to this key is an opaque binary blob reported by the
- * backend as part of its trailing metadata containing cost information for the
- * call. */
-#define GRPC_LB_COST_MD_KEY "lb-cost-bin"
+#include "src/core/lib/channel/channel_stack.h"
/** Identifiers for the invocation point of the users LR callback */
typedef enum grpc_load_reporting_source {
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 8af6191c3b..c6386a8942 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -31,11 +31,13 @@
*
*/
+#include <string.h>
+
+#include <grpc/load_reporting.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include <string.h>
#include "src/core/ext/load_reporting/load_reporting.h"
#include "src/core/ext/load_reporting/load_reporting_filter.h"
@@ -46,8 +48,6 @@
typedef struct call_data {
intptr_t id; /**< an id unique to the call */
- bool have_trailing_md_string;
- grpc_slice trailing_md_string;
bool have_initial_md_string;
grpc_slice initial_md_string;
bool have_service_method;
@@ -100,7 +100,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(call_data));
@@ -142,9 +142,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (calld->have_initial_md_string) {
grpc_slice_unref_internal(exec_ctx, calld->initial_md_string);
}
- if (calld->have_trailing_md_string) {
- grpc_slice_unref_internal(exec_ctx, calld->trailing_md_string);
- }
if (calld->have_service_method) {
grpc_slice_unref_internal(exec_ctx, calld->service_method);
}
@@ -201,15 +198,6 @@ static void lr_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* substitute our callback for the higher callback */
calld->ops_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
op->recv_initial_metadata_ready = &calld->on_initial_md_ready;
- } else if (op->send_trailing_metadata) {
- if (op->send_trailing_metadata->idx.named.lb_cost_bin != NULL) {
- calld->trailing_md_string = grpc_slice_ref_internal(
- GRPC_MDVALUE(op->send_trailing_metadata->idx.named.lb_cost_bin->md));
- calld->have_trailing_md_string = true;
- grpc_metadata_batch_remove(
- exec_ctx, op->send_trailing_metadata,
- op->send_trailing_metadata->idx.named.lb_cost_bin);
- }
}
grpc_call_next_op(exec_ctx, elem, op);
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index 7b473cff02..6e6a804eaa 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -40,6 +40,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
@@ -64,8 +65,6 @@ typedef struct {
/** pollset_set to drive the name resolution process */
grpc_pollset_set *interested_parties;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** are we currently resolving? */
bool resolving;
/** which version of the result have we published? */
@@ -96,18 +95,20 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
-static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
- dns_destroy, dns_shutdown, dns_channel_saw_error, dns_next};
+ dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
+ dns_next_locked};
-static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -117,25 +118,21 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
GRPC_ERROR_CREATE("Resolver Shutdown"));
r->next_completion = NULL;
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (!r->resolving) {
gpr_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -145,30 +142,26 @@ static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
}
- gpr_mu_unlock(&r->mu);
}
-static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
dns_resolver *r = arg;
- gpr_mu_lock(&r->mu);
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
dns_start_resolving_locked(exec_ctx, r);
}
}
- gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
dns_resolver *r = arg;
grpc_channel_args *result = NULL;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving);
r->resolving = false;
if (r->addresses != NULL) {
@@ -199,8 +192,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
- grpc_closure_init(&r->on_retry, dns_on_retry_timer, r,
- grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&r->on_retry, dns_on_retry_timer_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
}
if (r->resolved_result != NULL) {
@@ -209,7 +202,6 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@@ -222,7 +214,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->addresses = NULL;
grpc_resolve_address(
exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
- grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
+ grpc_closure_create(dns_on_resolved_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false)),
&r->addresses);
}
@@ -241,11 +234,10 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
- gpr_mu_destroy(&r->mu);
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
- grpc_pollset_set_destroy(r->interested_parties);
+ grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
gpr_free(r->name_to_resolve);
gpr_free(r->default_port);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
@@ -265,8 +257,7 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
// Create resolver.
dns_resolver *r = gpr_malloc(sizeof(dns_resolver));
memset(r, 0, sizeof(*r));
- gpr_mu_init(&r->mu);
- grpc_resolver_init(&r->base, &dns_resolver_vtable);
+ grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index a1365f6465..e7f66649b5 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -45,6 +45,7 @@
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -58,8 +59,6 @@ typedef struct {
grpc_lb_addresses *addresses;
/** channel args */
grpc_channel_args *channel_args;
- /** mutex guarding the rest of the state */
- gpr_mu mu;
/** have we published? */
bool published;
/** pending next completion, or NULL */
@@ -73,48 +72,43 @@ static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r);
-static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
- sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
- sockaddr_next};
+ sockaddr_destroy, sockaddr_shutdown_locked,
+ sockaddr_channel_saw_error_locked, sockaddr_next_locked};
-static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_result = NULL;
grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
r->next_completion = NULL;
}
- gpr_mu_unlock(&r->mu);
}
-static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
r->published = false;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
}
-static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
- gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
- gpr_mu_unlock(&r->mu);
}
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -131,7 +125,6 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
- gpr_mu_destroy(&r->mu);
grpc_lb_addresses_destroy(exec_ctx, r->addresses);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
@@ -201,8 +194,7 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
memset(r, 0, sizeof(*r));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
- gpr_mu_init(&r->mu);
- grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
+ grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
return &r->base;
}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index ae2c3838ed..0fc180d52f 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -55,11 +55,6 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
-typedef struct pending_handshake_manager_node {
- grpc_handshake_manager *handshake_mgr;
- struct pending_handshake_manager_node *next;
-} pending_handshake_manager_node;
-
typedef struct {
grpc_server *server;
grpc_tcp_server *tcp_server;
@@ -68,7 +63,7 @@ typedef struct {
bool shutdown;
grpc_closure tcp_server_shutdown_complete;
grpc_closure *server_destroy_listener_done;
- pending_handshake_manager_node *pending_handshake_mgrs;
+ grpc_handshake_manager *pending_handshake_mgrs;
} server_state;
typedef struct {
@@ -78,44 +73,6 @@ typedef struct {
grpc_handshake_manager *handshake_mgr;
} server_connection_state;
-static void pending_handshake_manager_add_locked(
- server_state *state, grpc_handshake_manager *handshake_mgr) {
- pending_handshake_manager_node *node = gpr_malloc(sizeof(*node));
- node->handshake_mgr = handshake_mgr;
- node->next = state->pending_handshake_mgrs;
- state->pending_handshake_mgrs = node;
-}
-
-static void pending_handshake_manager_remove_locked(
- server_state *state, grpc_handshake_manager *handshake_mgr) {
- pending_handshake_manager_node **prev_node = &state->pending_handshake_mgrs;
- for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
- node != NULL; node = node->next) {
- if (node->handshake_mgr == handshake_mgr) {
- *prev_node = node->next;
- gpr_free(node);
- break;
- }
- prev_node = &node->next;
- }
-}
-
-static void pending_handshake_manager_shutdown_locked(grpc_exec_ctx *exec_ctx,
- server_state *state,
- grpc_error *why) {
- pending_handshake_manager_node *prev_node = NULL;
- for (pending_handshake_manager_node *node = state->pending_handshake_mgrs;
- node != NULL; node = node->next) {
- grpc_handshake_manager_shutdown(exec_ctx, node->handshake_mgr,
- GRPC_ERROR_REF(why));
- gpr_free(prev_node);
- prev_node = node;
- }
- gpr_free(prev_node);
- state->pending_handshake_mgrs = NULL;
- GRPC_ERROR_UNREF(why);
-}
-
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_handshaker_args *args = arg;
@@ -153,8 +110,9 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args_destroy(exec_ctx, args->args);
}
}
- pending_handshake_manager_remove_locked(connection_state->server_state,
- connection_state->handshake_mgr);
+ grpc_handshake_manager_pending_list_remove(
+ &connection_state->server_state->pending_handshake_mgrs,
+ connection_state->handshake_mgr);
gpr_mu_unlock(&connection_state->server_state->mu);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
@@ -174,7 +132,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
return;
}
grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create();
- pending_handshake_manager_add_locked(state, handshake_mgr);
+ grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs,
+ handshake_mgr);
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
server_connection_state *connection_state =
@@ -213,8 +172,8 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
GPR_ASSERT(state->shutdown);
- pending_handshake_manager_shutdown_locked(exec_ctx, state,
- GRPC_ERROR_REF(error));
+ grpc_handshake_manager_pending_list_shutdown_all(
+ exec_ctx, state->pending_handshake_mgrs, GRPC_ERROR_REF(error));
gpr_mu_unlock(&state->mu);
// Flush queued work before destroying handshaker factory, since that
// may do a synchronous unref.
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 3ee5e976f8..28a3166832 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -265,7 +265,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
.gain_d = 0,
.initial_control_value = log2(DEFAULT_WINDOW),
.min_control_value = -1,
- .max_control_value = 22,
+ .max_control_value = 25,
.integral_range = 10});
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
@@ -569,6 +569,14 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_ERROR_UNREF(s->read_closed_error);
GRPC_ERROR_UNREF(s->write_closed_error);
+ if (s->incoming_window_delta > 0) {
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA(
+ "destroy", t, s, s->incoming_window_delta);
+ } else if (s->incoming_window_delta < 0) {
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM_INCOMING_WINDOW_DELTA(
+ "destroy", t, s, -s->incoming_window_delta);
+ }
+
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
GPR_TIMER_END("destroy_stream", 0);
@@ -1033,8 +1041,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
grpc_transport_stream_op *op = stream_op;
- grpc_chttp2_transport *t = op->transport_private.args[0];
- grpc_chttp2_stream *s = op->transport_private.args[1];
+ grpc_chttp2_transport *t = op->handler_private.args[0];
+ grpc_chttp2_stream *s = op->handler_private.args[1];
if (grpc_http_trace) {
char *str = grpc_transport_stream_op_string(op);
@@ -1106,8 +1114,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_chttp2_list_add_waiting_for_concurrency(t, s);
maybe_start_some_streams(exec_ctx, t);
} else {
- grpc_chttp2_cancel_stream(exec_ctx, t, s,
- GRPC_ERROR_CREATE("Transport closed"));
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Transport closed"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);
@@ -1255,13 +1266,13 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_free(str);
}
- op->transport_private.args[0] = gt;
- op->transport_private.args[1] = gs;
+ op->handler_private.args[0] = gt;
+ op->handler_private.args[1] = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
grpc_closure_sched(
exec_ctx,
grpc_closure_init(
- &op->transport_private.closure, perform_stream_op_locked, op,
+ &op->handler_private.closure, perform_stream_op_locked, op,
grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
GRPC_ERROR_NONE);
GPR_TIMER_END("perform_stream_op", 0);
@@ -1801,13 +1812,13 @@ static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (delta == 0 || (bdp != 0 && delta > -1024 && delta < 1024)) {
return;
}
+ if (grpc_bdp_estimator_trace) {
+ gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string,
+ (int)bdp);
+ }
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, bdp);
}
-/*******************************************************************************
- * INPUT PROCESSING - PARSING
- */
-
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_http_parser parser;
@@ -2054,8 +2065,8 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
(int64_t)have_already) {
write_type = GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
}
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, incoming_window_delta,
- add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM_INCOMING_WINDOW_DELTA("op", t, s,
+ add_max_recv_bytes);
GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, announce_window,
add_max_recv_bytes);
if ((int64_t)s->incoming_window_delta + (int64_t)initial_window_size -
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index a29dc82106..44137798c0 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -87,7 +87,7 @@ extern const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
/* Create a settings frame by diffing old & new, and updating old to be new */
-grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *newval,
uint32_t force_mask, size_t count);
/* Create an ack settings frame */
grpc_slice grpc_chttp2_settings_ack_create(void);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 63df8e135f..84586cd998 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -173,6 +173,7 @@ static void add_header_data(framer_state *st, grpc_slice slice) {
static uint8_t *add_tiny_header_data(framer_state *st, size_t len) {
ensure_space(st, len);
+ st->stats->header_bytes += len;
return grpc_slice_buffer_tiny_add(st->output, len);
}
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 075d421dd4..5d41f4bfda 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -271,10 +271,6 @@ struct grpc_chttp2_transport {
/** data to write next write */
grpc_slice_buffer qbuf;
- /** window available to announce to peer */
- int64_t announce_incoming_window;
- /** how much window would we like to have for incoming_window */
- uint32_t connection_window_target;
/** how much data are we willing to buffer when the WRITE_BUFFER_HINT is set?
*/
uint32_t write_buffer_size;
@@ -328,6 +324,16 @@ struct grpc_chttp2_transport {
/** window available for peer to send to us */
int64_t incoming_window;
+ /** calculating what we should give for incoming window:
+ we track the total amount of flow control over initial window size
+ across all streams: this is data that we want to receive right now (it
+ has an outstanding read)
+ and the total amount of flow control under initial window size across all
+ streams: this is data we've read early
+ we want to adjust incoming_window such that:
+ incoming_window = total_over - max(bdp - total_under, 0) */
+ int64_t stream_total_over_incoming_window;
+ int64_t stream_total_under_incoming_window;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
@@ -634,6 +640,44 @@ typedef enum {
GRPC_CHTTP2_FLOW_CREDIT_COMMON(phase, dst_context, 0, dst_context, dst_var, \
amount)
+#define GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_PREUPDATE( \
+ phase, transport, dst_context) \
+ if (dst_context->incoming_window_delta < 0) { \
+ transport->stream_total_under_incoming_window += \
+ dst_context->incoming_window_delta; \
+ } else if (dst_context->incoming_window_delta > 0) { \
+ transport->stream_total_over_incoming_window -= \
+ dst_context->incoming_window_delta; \
+ }
+
+#define GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_POSTUPDATE( \
+ phase, transport, dst_context) \
+ if (dst_context->incoming_window_delta < 0) { \
+ transport->stream_total_under_incoming_window -= \
+ dst_context->incoming_window_delta; \
+ } else if (dst_context->incoming_window_delta > 0) { \
+ transport->stream_total_over_incoming_window += \
+ dst_context->incoming_window_delta; \
+ }
+
+#define GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA( \
+ phase, transport, dst_context, amount) \
+ GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_PREUPDATE(phase, transport, \
+ dst_context); \
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM(phase, transport, dst_context, \
+ incoming_window_delta, amount); \
+ GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_POSTUPDATE(phase, transport, \
+ dst_context);
+
+#define GRPC_CHTTP2_FLOW_CREDIT_STREAM_INCOMING_WINDOW_DELTA( \
+ phase, transport, dst_context, amount) \
+ GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_PREUPDATE(phase, transport, \
+ dst_context); \
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM(phase, transport, dst_context, \
+ incoming_window_delta, amount); \
+ GRPC_CHTTP2_FLOW_STREAM_INCOMING_WINDOW_DELTA_POSTUPDATE(phase, transport, \
+ dst_context);
+
#define GRPC_CHTTP2_FLOW_DEBIT_COMMON(phase, transport, id, dst_context, \
dst_var, amount) \
do { \
@@ -752,4 +796,6 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error);
+uint32_t grpc_chttp2_target_incoming_window(grpc_chttp2_transport *t);
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 24bd93067b..7ed00522c3 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -376,15 +376,6 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
return err;
}
- uint32_t target_incoming_window = GPR_MAX(
- t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
- 1024);
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
- incoming_frame_size);
- if (t->incoming_window <= target_incoming_window / 2) {
- grpc_chttp2_initiate_write(exec_ctx, t, false, "flow_control");
- }
-
if (s != NULL) {
if (incoming_frame_size >
s->incoming_window_delta +
@@ -402,8 +393,8 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
return err;
}
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", t, s, incoming_window_delta,
- incoming_frame_size);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s,
+ incoming_frame_size);
if ((int64_t)t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] +
(int64_t)s->incoming_window_delta - (int64_t)s->announce_window <=
@@ -417,6 +408,13 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
s->received_bytes += incoming_frame_size;
}
+ uint32_t target_incoming_window = grpc_chttp2_target_incoming_window(t);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
+ incoming_frame_size);
+ if (t->incoming_window <= target_incoming_window / 2) {
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "flow_control");
+ }
+
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 05e6f59947..2b9d93cae7 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -152,6 +152,17 @@ static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
return true;
}
+uint32_t grpc_chttp2_target_incoming_window(grpc_chttp2_transport *t) {
+ return (uint32_t)GPR_MAX(
+ (int64_t)((1u << 31) - 1),
+ t->stream_total_over_incoming_window +
+ (int64_t)GPR_MAX(
+ t->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] -
+ t->stream_total_under_incoming_window,
+ 0));
+}
+
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
@@ -310,13 +321,12 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
- uint32_t target_incoming_window = GPR_MAX(
- t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
- 1024);
+ uint32_t target_incoming_window = grpc_chttp2_target_incoming_window(t);
uint32_t threshold_to_send_transport_window_update =
t->outbuf.count > 0 ? 3 * target_incoming_window / 4
: target_incoming_window / 2;
- if (t->incoming_window <= threshold_to_send_transport_window_update) {
+ if (t->incoming_window <= threshold_to_send_transport_window_update &&
+ t->incoming_window != target_incoming_window) {
maybe_initiate_ping(exec_ctx, t,
GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE);
uint32_t announced = (uint32_t)GPR_CLAMP(
diff --git a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
index 477cf07f45..b6e9e845df 100644
--- a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
+++ b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
@@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/ext/transport/cronet/transport/cronet_transport.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport_impl.h"
@@ -54,16 +55,14 @@ extern grpc_transport_vtable grpc_cronet_vtable;
GRPCAPI grpc_channel *grpc_cronet_secure_channel_create(
void *engine, const char *target, const grpc_channel_args *args,
void *reserved) {
- cronet_transport *ct = gpr_malloc(sizeof(cronet_transport));
- ct->base.vtable = &grpc_cronet_vtable;
- ct->engine = engine;
- ct->host = gpr_malloc(strlen(target) + 1);
- strcpy(ct->host, target);
gpr_log(GPR_DEBUG,
"grpc_create_cronet_transport: stream_engine = %p, target=%s", engine,
- ct->host);
+ target);
+
+ grpc_transport *ct =
+ grpc_create_cronet_transport(engine, target, args, reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
return grpc_channel_create(&exec_ctx, target, args,
- GRPC_CLIENT_DIRECT_CHANNEL, (grpc_transport *)ct);
+ GRPC_CLIENT_DIRECT_CHANNEL, ct);
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c b/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
index da6c0b4fbc..0dc6a5152f 100644
--- a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
+++ b/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
@@ -80,4 +80,16 @@ void bidirectional_stream_cancel(bidirectional_stream* stream) {
GPR_ASSERT(0);
}
+void bidirectional_stream_disable_auto_flush(bidirectional_stream* stream,
+ bool disable_auto_flush) {
+ GPR_ASSERT(0);
+}
+
+void bidirectional_stream_delay_request_headers_until_flush(
+ bidirectional_stream* stream, bool delay_headers_until_flush) {
+ GPR_ASSERT(0);
+}
+
+void bidirectional_stream_flush(bidirectional_stream* stream) { GPR_ASSERT(0); }
+
#endif /* GRPC_COMPILE_WITH_CRONET */
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index d755b1f147..01a03533da 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -88,7 +88,7 @@ enum e_op_id {
/* Cronet callbacks. See cronet_c_for_grpc.h for documentation for each. */
-static void on_request_headers_sent(bidirectional_stream *);
+static void on_stream_ready(bidirectional_stream *);
static void on_response_headers_received(
bidirectional_stream *, const bidirectional_stream_header_array *,
const char *);
@@ -100,7 +100,7 @@ static void on_succeeded(bidirectional_stream *);
static void on_failed(bidirectional_stream *, int);
static void on_canceled(bidirectional_stream *);
static bidirectional_stream_callback cronet_callbacks = {
- on_request_headers_sent,
+ on_stream_ready,
on_response_headers_received,
on_read_completed,
on_write_completed,
@@ -114,6 +114,7 @@ struct grpc_cronet_transport {
grpc_transport base; /* must be first element in this structure */
stream_engine *engine;
char *host;
+ bool use_packet_coalescing;
};
typedef struct grpc_cronet_transport grpc_cronet_transport;
@@ -152,6 +153,9 @@ struct op_state {
bool state_callback_received[OP_NUM_OPS];
bool fail_state;
bool flush_read;
+ bool flush_cronet_when_ready;
+ bool pending_write_for_trailer;
+ bool unprocessed_send_message;
grpc_error *cancel_error;
/* data structure for storing data coming from server */
struct read_state rs;
@@ -175,7 +179,7 @@ struct op_storage {
struct stream_obj {
struct op_and_state *oas;
grpc_transport_stream_op *curr_op;
- grpc_cronet_transport curr_ct;
+ grpc_cronet_transport *curr_ct;
grpc_stream *curr_gs;
bidirectional_stream *cbs;
bidirectional_stream_header_array header_array;
@@ -274,6 +278,9 @@ static void add_to_storage(struct stream_obj *s, grpc_transport_stream_op *op) {
new_op->next = storage->head;
storage->head = new_op;
storage->num_pending_ops++;
+ if (op->send_message) {
+ s->state.unprocessed_send_message = true;
+ }
CRONET_LOG(GPR_DEBUG, "adding new op %p. %d in the queue.", new_op,
storage->num_pending_ops);
gpr_mu_unlock(&s->mu);
@@ -406,9 +413,10 @@ static void on_succeeded(bidirectional_stream *stream) {
/*
Cronet callback
*/
-static void on_request_headers_sent(bidirectional_stream *stream) {
- CRONET_LOG(GPR_DEBUG, "W: on_request_headers_sent(%p)", stream);
+static void on_stream_ready(bidirectional_stream *stream) {
+ CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
stream_obj *s = (stream_obj *)stream->annotation;
+ grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
gpr_mu_lock(&s->mu);
s->state.state_op_done[OP_SEND_INITIAL_METADATA] = true;
s->state.state_callback_received[OP_SEND_INITIAL_METADATA] = true;
@@ -417,6 +425,14 @@ static void on_request_headers_sent(bidirectional_stream *stream) {
gpr_free(s->header_array.headers);
s->header_array.headers = NULL;
}
+ /* Send the initial metadata on wire if there is no SEND_MESSAGE or
+ * SEND_TRAILING_METADATA ops pending */
+ if (t->use_packet_coalescing) {
+ if (s->state.flush_cronet_when_ready) {
+ CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_flush (%p)", s->cbs);
+ bidirectional_stream_flush(stream);
+ }
+ }
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
@@ -528,6 +544,7 @@ static void on_response_trailers_received(
CRONET_LOG(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)", stream,
trailers);
stream_obj *s = (stream_obj *)stream->annotation;
+ grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
gpr_mu_lock(&s->mu);
memset(&s->state.rs.trailing_metadata, 0,
sizeof(s->state.rs.trailing_metadata));
@@ -558,6 +575,10 @@ static void on_response_trailers_received(
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
s->state.state_callback_received[OP_SEND_MESSAGE] = false;
bidirectional_stream_write(s->cbs, "", 0, true);
+ if (t->use_packet_coalescing) {
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
+ bidirectional_stream_flush(s->cbs);
+ }
s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
gpr_mu_unlock(&s->mu);
@@ -607,7 +628,7 @@ static void convert_metadata_to_cronet_headers(
curr = curr->next;
num_headers_available++;
}
- /* Allocate enough memory. It is freed in the on_request_headers_sent callback
+ /* Allocate enough memory. It is freed in the on_stream_ready callback
*/
bidirectional_stream_header *headers =
(bidirectional_stream_header *)gpr_malloc(
@@ -687,8 +708,10 @@ static bool header_has_authority(grpc_linked_mdelem *head) {
executed. This is the heart of the state machine.
*/
static bool op_can_be_run(grpc_transport_stream_op *curr_op,
- struct op_state *stream_state,
- struct op_state *op_state, enum e_op_id op_id) {
+ struct stream_obj *s, struct op_state *op_state,
+ enum e_op_id op_id) {
+ struct op_state *stream_state = &s->state;
+ grpc_cronet_transport *t = s->curr_ct;
bool result = true;
/* When call is canceled, every op can be run, except under following
conditions
@@ -755,12 +778,14 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
else if (!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA])
result = false;
/* we haven't sent message yet */
- else if (curr_op->send_message &&
+ else if (stream_state->unprocessed_send_message &&
!stream_state->state_op_done[OP_SEND_MESSAGE])
result = false;
/* we haven't got on_write_completed for the send yet */
else if (stream_state->state_op_done[OP_SEND_MESSAGE] &&
- !stream_state->state_callback_received[OP_SEND_MESSAGE])
+ !stream_state->state_callback_received[OP_SEND_MESSAGE] &&
+ !(t->use_packet_coalescing &&
+ stream_state->pending_write_for_trailer))
result = false;
} else if (op_id == OP_CANCEL_ERROR) {
/* already executed */
@@ -833,24 +858,28 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
struct op_and_state *oas) {
grpc_transport_stream_op *stream_op = &oas->op;
struct stream_obj *s = oas->s;
+ grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
struct op_state *stream_state = &s->state;
enum e_op_result result = NO_ACTION_POSSIBLE;
if (stream_op->send_initial_metadata &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_SEND_INITIAL_METADATA)) {
+ op_can_be_run(stream_op, s, &oas->state, OP_SEND_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA", oas);
/* Start new cronet stream. It is destroyed in on_succeeded, on_canceled,
* on_failed */
GPR_ASSERT(s->cbs == NULL);
GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]);
- s->cbs = bidirectional_stream_create(s->curr_ct.engine, s->curr_gs,
- &cronet_callbacks);
+ s->cbs =
+ bidirectional_stream_create(t->engine, s->curr_gs, &cronet_callbacks);
CRONET_LOG(GPR_DEBUG, "%p = bidirectional_stream_create()", s->cbs);
+ if (t->use_packet_coalescing) {
+ bidirectional_stream_disable_auto_flush(s->cbs, true);
+ bidirectional_stream_delay_request_headers_until_flush(s->cbs, true);
+ }
char *url = NULL;
const char *method = "POST";
s->header_array.headers = NULL;
convert_metadata_to_cronet_headers(
- stream_op->send_initial_metadata->list.head, s->curr_ct.host, &url,
+ stream_op->send_initial_metadata->list.head, t->host, &url,
&s->header_array.headers, &s->header_array.count, &method);
s->header_array.capacity = s->header_array.count;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_start(%p, %s)", s->cbs, url);
@@ -862,30 +891,16 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
gpr_free((void *)s->header_array.headers[header_index].value);
}
stream_state->state_op_done[OP_SEND_INITIAL_METADATA] = true;
- result = ACTION_TAKEN_WITH_CALLBACK;
- } else if (stream_op->recv_initial_metadata &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_RECV_INITIAL_METADATA)) {
- CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
- if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
- grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE);
- } else if (stream_state->state_callback_received[OP_FAILED]) {
- grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE);
- } else {
- grpc_chttp2_incoming_metadata_buffer_publish(
- exec_ctx, &oas->s->state.rs.initial_metadata,
- stream_op->recv_initial_metadata);
- grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
- GRPC_ERROR_NONE);
+ if (t->use_packet_coalescing) {
+ if (!stream_op->send_message && !stream_op->send_trailing_metadata) {
+ s->state.flush_cronet_when_ready = true;
+ }
}
- stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true;
- result = ACTION_TAKEN_NO_CALLBACK;
+ result = ACTION_TAKEN_WITH_CALLBACK;
} else if (stream_op->send_message &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_SEND_MESSAGE)) {
+ op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
+ stream_state->unprocessed_send_message = false;
if (stream_state->state_callback_received[OP_FAILED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
@@ -916,16 +931,63 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
bidirectional_stream_write(s->cbs, stream_state->ws.write_buffer,
(int)write_buffer_size, false);
- result = ACTION_TAKEN_WITH_CALLBACK;
+ if (t->use_packet_coalescing) {
+ if (!stream_op->send_trailing_metadata) {
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
+ bidirectional_stream_flush(s->cbs);
+ result = ACTION_TAKEN_WITH_CALLBACK;
+ } else {
+ stream_state->pending_write_for_trailer = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
+ }
+ } else {
+ result = ACTION_TAKEN_WITH_CALLBACK;
+ }
} else {
result = NO_ACTION_POSSIBLE;
}
}
stream_state->state_op_done[OP_SEND_MESSAGE] = true;
oas->state.state_op_done[OP_SEND_MESSAGE] = true;
+ } else if (stream_op->send_trailing_metadata &&
+ op_can_be_run(stream_op, s, &oas->state,
+ OP_SEND_TRAILING_METADATA)) {
+ CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas);
+ if (stream_state->state_callback_received[OP_FAILED]) {
+ result = NO_ACTION_POSSIBLE;
+ CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
+ } else {
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
+ stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
+ bidirectional_stream_write(s->cbs, "", 0, true);
+ if (t->use_packet_coalescing) {
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
+ bidirectional_stream_flush(s->cbs);
+ }
+ result = ACTION_TAKEN_WITH_CALLBACK;
+ }
+ stream_state->state_op_done[OP_SEND_TRAILING_METADATA] = true;
+ } else if (stream_op->recv_initial_metadata &&
+ op_can_be_run(stream_op, s, &oas->state,
+ OP_RECV_INITIAL_METADATA)) {
+ CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
+ if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
+ } else if (stream_state->state_callback_received[OP_FAILED]) {
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
+ } else {
+ grpc_chttp2_incoming_metadata_buffer_publish(
+ exec_ctx, &oas->s->state.rs.initial_metadata,
+ stream_op->recv_initial_metadata);
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
+ }
+ stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_op->recv_message &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_RECV_MESSAGE)) {
+ op_can_be_run(stream_op, s, &oas->state, OP_RECV_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
@@ -980,6 +1042,16 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
+
+ /* Extra read to trigger on_succeed */
+ stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
+ stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
+ stream_state->rs.received_bytes = 0;
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
+ stream_state->state_op_done[OP_READ_REQ_MADE] =
+ true; /* Indicates that at least one read request has been made */
+ bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
+ stream_state->rs.remaining_bytes);
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_state->rs.remaining_bytes == 0) {
@@ -1027,7 +1099,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_op->recv_trailing_metadata &&
- op_can_be_run(stream_op, stream_state, &oas->state,
+ op_can_be_run(stream_op, s, &oas->state,
OP_RECV_TRAILING_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_TRAILING_METADATA", oas);
if (oas->s->state.rs.trailing_metadata_valid) {
@@ -1038,23 +1110,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
}
stream_state->state_op_done[OP_RECV_TRAILING_METADATA] = true;
result = ACTION_TAKEN_NO_CALLBACK;
- } else if (stream_op->send_trailing_metadata &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_SEND_TRAILING_METADATA)) {
- CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas);
- if (stream_state->state_callback_received[OP_FAILED]) {
- result = NO_ACTION_POSSIBLE;
- CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
- } else {
- CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
- stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
- bidirectional_stream_write(s->cbs, "", 0, true);
- result = ACTION_TAKEN_WITH_CALLBACK;
- }
- stream_state->state_op_done[OP_SEND_TRAILING_METADATA] = true;
} else if (stream_op->cancel_error &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_CANCEL_ERROR)) {
+ op_can_be_run(stream_op, s, &oas->state, OP_CANCEL_ERROR)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
if (s->cbs) {
@@ -1068,8 +1125,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->cancel_error = GRPC_ERROR_REF(stream_op->cancel_error);
}
} else if (stream_op->on_complete &&
- op_can_be_run(stream_op, stream_state, &oas->state,
- OP_ON_COMPLETE)) {
+ op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_closure_sched(exec_ctx, stream_op->on_complete,
@@ -1133,6 +1189,12 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
sizeof(s->state.state_callback_received));
s->state.fail_state = s->state.flush_read = false;
s->state.cancel_error = NULL;
+ s->state.flush_cronet_when_ready = s->state.pending_write_for_trailer = false;
+ s->state.unprocessed_send_message = false;
+
+ s->curr_gs = gs;
+ s->curr_ct = (grpc_cronet_transport *)gt;
+
gpr_mu_init(&s->mu);
return 0;
}
@@ -1148,8 +1210,6 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_transport_stream_op *op) {
CRONET_LOG(GPR_DEBUG, "perform_stream_op");
stream_obj *s = (stream_obj *)gs;
- s->curr_gs = gs;
- memcpy(&s->curr_ct, gt, sizeof(grpc_cronet_transport));
add_to_storage(s, op);
if (op->send_initial_metadata &&
header_has_authority(op->send_initial_metadata->list.head)) {
@@ -1197,14 +1257,58 @@ static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
static void perform_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {}
-const grpc_transport_vtable grpc_cronet_vtable = {sizeof(stream_obj),
- "cronet_http",
- init_stream,
- set_pollset_do_nothing,
- set_pollset_set_do_nothing,
- perform_stream_op,
- perform_op,
- destroy_stream,
- destroy_transport,
- get_peer,
- get_endpoint};
+static const grpc_transport_vtable grpc_cronet_vtable = {
+ sizeof(stream_obj),
+ "cronet_http",
+ init_stream,
+ set_pollset_do_nothing,
+ set_pollset_set_do_nothing,
+ perform_stream_op,
+ perform_op,
+ destroy_stream,
+ destroy_transport,
+ get_peer,
+ get_endpoint};
+
+grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
+ const grpc_channel_args *args,
+ void *reserved) {
+ grpc_cronet_transport *ct = gpr_malloc(sizeof(grpc_cronet_transport));
+ if (!ct) {
+ goto error;
+ }
+ ct->base.vtable = &grpc_cronet_vtable;
+ ct->engine = engine;
+ ct->host = gpr_malloc(strlen(target) + 1);
+ if (!ct->host) {
+ goto error;
+ }
+ strcpy(ct->host, target);
+
+ ct->use_packet_coalescing = true;
+ if (args) {
+ for (size_t i = 0; i < args->num_args; i++) {
+ if (0 ==
+ strcmp(args->args[i].key, GRPC_ARG_USE_CRONET_PACKET_COALESCING)) {
+ if (args->args[i].type != GRPC_ARG_INTEGER) {
+ gpr_log(GPR_ERROR, "%s ignored: it must be an integer",
+ GRPC_ARG_USE_CRONET_PACKET_COALESCING);
+ } else {
+ ct->use_packet_coalescing = (args->args[i].value.integer != 0);
+ }
+ }
+ }
+ }
+
+ return &ct->base;
+
+error:
+ if (ct) {
+ if (ct->host) {
+ gpr_free(ct->host);
+ }
+ gpr_free(ct);
+ }
+
+ return NULL;
+}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.h b/src/core/ext/transport/cronet/transport/cronet_transport.h
new file mode 100644
index 0000000000..169ce31fd7
--- /dev/null
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H
+#define GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H
+
+#include "src/core/lib/transport/transport.h"
+
+grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
+ const grpc_channel_args *args,
+ void *reserved);
+
+#endif /* GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H */
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index ec973d4e7f..3fb2a60ac7 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -173,7 +173,6 @@ grpc_error *grpc_call_stack_init(
grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
- grpc_call_element_args args;
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
@@ -188,13 +187,15 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
- args.start_time = start_time;
+ const grpc_call_element_args args = {
+ .start_time = start_time,
+ .call_stack = call_stack,
+ .server_transport_data = transport_server_data,
+ .context = context,
+ .path = path,
+ .deadline = deadline,
+ };
for (i = 0; i < count; i++) {
- args.call_stack = call_stack;
- args.server_transport_data = transport_server_data;
- args.context = context;
- args.path = path;
- args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 1cf07d43c2..1e943dc2e5 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -131,7 +131,7 @@ typedef struct {
argument. */
grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args);
+ const grpc_call_element_args *args);
void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index c860d60d88..aa41014a21 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -105,7 +105,6 @@ static grpc_error *process_send_initial_metadata(
static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata) {
- grpc_error *error;
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* Parse incoming request for compression. If any, it'll be available
@@ -144,10 +143,13 @@ static grpc_error *process_send_initial_metadata(
calld->has_compression_algorithm = 1; /* GPR_TRUE */
}
+ grpc_error *error = GRPC_ERROR_NONE;
/* hint compression algorithm */
- error = grpc_metadata_batch_add_tail(
- exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
- grpc_compression_encoding_mdelem(calld->compression_algorithm));
+ if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
+ grpc_compression_encoding_mdelem(calld->compression_algorithm));
+ }
if (error != GRPC_ERROR_NONE) return error;
@@ -272,7 +274,7 @@ static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 068c61c92a..29796f7ca7 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -83,7 +83,7 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
int r = grpc_transport_init_stream(
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 6c931ad28a..2c1174ce7a 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -50,6 +50,9 @@ typedef enum {
/// Reserved for traffic_class_context.
GRPC_CONTEXT_TRAFFIC,
+ /// Costs for Load Reporting.
+ GRPC_CONTEXT_LR_COST,
+
GRPC_CONTEXT_COUNT
} grpc_context_index;
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index bc9a2effc2..f9668be0fa 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -52,9 +52,6 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- deadline_state->timer_pending = false;
- gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
@@ -66,53 +63,64 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
-static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- gpr_timespec deadline) {
- grpc_deadline_state* deadline_state = elem->call_data;
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- // Note: We do not start the timer if there is already a timer
- // pending. This should be okay, because this is only called from two
- // functions exported by this module: grpc_deadline_state_start(), which
- // starts the initial timer, and grpc_deadline_state_reset(), which
- // cancels any pre-existing timer before starting a new one. In
- // particular, we want to ensure that if grpc_deadline_state_start()
- // winds up trying to start the timer after grpc_deadline_state_reset()
- // has already done so, we ignore the value from the former.
- if (!deadline_state->timer_pending &&
- gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
- // Take a reference to the call stack, to be owned by the timer.
- GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- deadline_state->timer_pending = true;
- grpc_closure_init(&deadline_state->timer_callback, timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &deadline_state->timer, deadline,
- &deadline_state->timer_callback,
- gpr_now(GPR_CLOCK_MONOTONIC));
- }
-}
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
+ if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
+ return;
+ }
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- start_timer_if_needed_locked(exec_ctx, elem, deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ grpc_deadline_timer_state cur_state;
+ grpc_closure* closure = NULL;
+retry:
+ cur_state =
+ (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
+ switch (cur_state) {
+ case GRPC_DEADLINE_STATE_PENDING:
+ // Note: We do not start the timer if there is already a timer
+ return;
+ case GRPC_DEADLINE_STATE_FINISHED:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_FINISHED,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure = grpc_closure_create(timer_callback, elem,
+ grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ case GRPC_DEADLINE_STATE_INITIAL:
+ if (gpr_atm_rel_cas(&deadline_state->timer_state,
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING)) {
+ closure =
+ grpc_closure_init(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
+ } else {
+ goto retry;
+ }
+ break;
+ }
+ GPR_ASSERT(closure);
+ GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
+ grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
+ gpr_now(GPR_CLOCK_MONOTONIC));
}
// Cancels the deadline timer.
-static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
- grpc_deadline_state* deadline_state) {
- if (deadline_state->timer_pending) {
- grpc_timer_cancel(exec_ctx, &deadline_state->timer);
- deadline_state->timer_pending = false;
- }
-}
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED)) {
+ grpc_timer_cancel(exec_ctx, &deadline_state->timer);
+ } else {
+ // timer was either in STATE_INITAL (nothing to cancel)
+ // OR in STATE_FINISHED (again nothing to cancel)
+ }
}
// Callback run when the call is complete.
@@ -120,8 +128,8 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_deadline_state* deadline_state = arg;
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
- deadline_state->next_on_complete->cb(
- exec_ctx, deadline_state->next_on_complete->cb_arg, error);
+ grpc_closure_run(exec_ctx, deadline_state->next_on_complete,
+ GRPC_ERROR_REF(error));
}
// Inject our own on_complete callback into op.
@@ -138,14 +146,12 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_deadline_state* deadline_state = elem->call_data;
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
- gpr_mu_init(&deadline_state->timer_mu);
}
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
- gpr_mu_destroy(&deadline_state->timer_mu);
}
// Callback and associated state for starting the timer after call stack
@@ -187,10 +193,8 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
- gpr_mu_lock(&deadline_state->timer_mu);
- cancel_timer_if_needed_locked(exec_ctx, deadline_state);
- start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
- gpr_mu_unlock(&deadline_state->timer_mu);
+ cancel_timer_if_needed(exec_ctx, deadline_state);
+ start_timer_if_needed(exec_ctx, elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op(
@@ -244,7 +248,7 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
+ const grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index bd2b84f79e..94717f6bc7 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -35,16 +35,18 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
+typedef enum grpc_deadline_timer_state {
+ GRPC_DEADLINE_STATE_INITIAL,
+ GRPC_DEADLINE_STATE_PENDING,
+ GRPC_DEADLINE_STATE_FINISHED
+} grpc_deadline_timer_state;
+
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- // Guards access to timer_pending and timer.
- gpr_mu timer_mu;
- // True if the timer callback is currently pending.
- bool timer_pending;
- // The deadline timer.
+ gpr_atm timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 5bed2d041d..82c361c7ef 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -92,6 +92,10 @@ struct grpc_handshake_manager {
void* user_data;
// Handshaker args.
grpc_handshaker_args args;
+ // Links to the previous and next managers in a list of all pending handshakes
+ // Used at server side only.
+ grpc_handshake_manager* prev;
+ grpc_handshake_manager* next;
};
grpc_handshake_manager* grpc_handshake_manager_create() {
@@ -102,6 +106,39 @@ grpc_handshake_manager* grpc_handshake_manager_create() {
return mgr;
}
+void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr) {
+ GPR_ASSERT(mgr->prev == NULL);
+ GPR_ASSERT(mgr->next == NULL);
+ mgr->next = *head;
+ if (*head) {
+ (*head)->prev = mgr;
+ }
+ *head = mgr;
+}
+
+void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr) {
+ if (mgr->next != NULL) {
+ mgr->next->prev = mgr->prev;
+ }
+ if (mgr->prev != NULL) {
+ mgr->prev->next = mgr->next;
+ } else {
+ GPR_ASSERT(*head == mgr);
+ *head = mgr->next;
+ }
+}
+
+void grpc_handshake_manager_pending_list_shutdown_all(
+ grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) {
+ while (head != NULL) {
+ grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why));
+ head = head->next;
+ }
+ GRPC_ERROR_UNREF(why);
+}
+
static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; }
void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index a8e3692add..5f97c3fc73 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -163,4 +163,20 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
+/// Add \a mgr to the server side list of all pending handshake managers, the
+/// list starts with \a *head.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr);
+
+/// Remove \a mgr from the server side list of all pending handshake managers.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
+ grpc_handshake_manager* mgr);
+
+/// Shutdown all pending handshake managers on the server side.
+// Not thread-safe. Caller needs to synchronize.
+void grpc_handshake_manager_pending_list_shutdown_all(
+ grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why);
+
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 49a2a980e0..c031533dd8 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -386,7 +386,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
calld->on_done_recv_initial_metadata = NULL;
calld->on_done_recv_trailing_metadata = NULL;
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index 3f992977c0..ce519f9c92 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -198,14 +198,17 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_STR_KEY, ":path"));
}
- if (b->idx.named.host != NULL) {
+ if (b->idx.named.host != NULL && b->idx.named.authority == NULL) {
+ grpc_linked_mdelem *el = b->idx.named.host;
+ grpc_mdelem md = GRPC_MDELEM_REF(el->md);
+ grpc_metadata_batch_remove(exec_ctx, b, el);
add_error(
error_name, &error,
- grpc_metadata_batch_substitute(
- exec_ctx, b, b->idx.named.host,
- grpc_mdelem_from_slices(
- exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.host->md)))));
+ grpc_metadata_batch_add_head(
+ exec_ctx, b, el, grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
+ GRPC_MDELEM_UNREF(exec_ctx, md);
}
if (b->idx.named.authority == NULL) {
@@ -249,12 +252,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
- err);
+ grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
- calld->on_complete->cb(exec_ctx, calld->on_complete->cb_arg, err);
+ grpc_closure_run(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
@@ -265,8 +267,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
- calld->recv_message_ready->cb(exec_ctx, calld->recv_message_ready->cb_arg,
- err);
+ grpc_closure_run(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
@@ -340,7 +341,7 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
/* initialize members */
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index 5e22860cfb..22938c64b5 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -166,7 +166,7 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- grpc_call_element_args* args) {
+ const grpc_call_element_args* args) {
channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index fb2108987b..6d7aa43b81 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -93,8 +93,9 @@ void grpc_httpcli_context_init(grpc_httpcli_context *context) {
context->pollset_set = grpc_pollset_set_create();
}
-void grpc_httpcli_context_destroy(grpc_httpcli_context *context) {
- grpc_pollset_set_destroy(context->pollset_set);
+void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_httpcli_context *context) {
+ grpc_pollset_set_destroy(exec_ctx, context->pollset_set);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 11e03b44df..8ae03ee78f 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -83,7 +83,8 @@ typedef struct grpc_httpcli_request {
typedef struct grpc_http_response grpc_httpcli_response;
void grpc_httpcli_context_init(grpc_httpcli_context *context);
-void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
+void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_httpcli_context *context);
/* Asynchronously perform a HTTP GET.
'context' specifies the http context under which to do the get
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index ffacdac393..2613512acb 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -137,8 +137,8 @@ typedef enum {
} grpc_error_times;
/// The following "special" errors can be propagated without allocating memory.
-/// They are always even so that other code (particularly combiner locks) can
-/// safely use the lower bit for themselves.
+/// They are always even so that other code (particularly combiner locks,
+/// polling engines) can safely use the lower bit for themselves.
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
#define GRPC_ERROR_OOM ((grpc_error *)2)
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index 51842fc208..11208b9ad1 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -68,7 +68,7 @@ static int grpc_polling_trace = 0; /* Disabled by default */
gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
}
-/* Uncomment the following enable extra checks on poll_object operations */
+/* Uncomment the following to enable extra checks on poll_object operations */
/* #define PO_DEBUG */
static int grpc_wakeup_signal = -1;
@@ -140,24 +140,61 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
- /* Indicates that the fd is shutdown and that any pending read/write closures
- should fail */
- bool shutdown;
- grpc_error *shutdown_error; /* reason for shutdown: set iff shutdown==true */
+ /* Internally stores data of type (grpc_error *). If the FD is shutdown, this
+ contains reason for shutdown (i.e a pointer to grpc_error) ORed with
+ FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit
+ of (grpc_error *) addresses is guaranteed to be zero. Even if the
+ (grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM
+ etc, the lower bit is guaranteed to be zero.
- /* The fd is either closed or we relinquished control of it. In either cases,
- this indicates that the 'fd' on this structure is no longer valid */
+ Once an fd is shutdown, any pending or future read/write closures on the
+ fd should fail */
+ gpr_atm shutdown_error;
+
+ /* The fd is either closed or we relinquished control of it. In either
+ cases, this indicates that the 'fd' on this structure is no longer
+ valid */
bool orphaned;
- /* TODO: sreek - Move this to a lockfree implementation */
- grpc_closure *read_closure;
- grpc_closure *write_closure;
+ /* Closures to call when the fd is readable or writable respectively. These
+ fields contain one of the following values:
+ CLOSURE_READY : The fd has an I/O event of interest but there is no
+ closure yet to execute
+
+ CLOSURE_NOT_READY : The fd has no I/O event of interest
+
+ closure ptr : The closure to be executed when the fd has an I/O
+ event of interest
+
+ shutdown_error | FD_SHUTDOWN_BIT :
+ 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ This indicates that the fd is shutdown. Since all
+ memory allocations are word-aligned, the lower two
+ bits of the shutdown_error pointer are always 0. So
+ it is safe to OR these with FD_SHUTDOWN_BIT
+
+ Valid state transitions:
+
+ <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ | | ^ | ^ | |
+ | | | | | | |
+ | +--------------4----------+ 6 +---------2---------------+ |
+ | | |
+ | v |
+ +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+
+ For 1, 4 : See set_ready() function
+ For 2, 3 : See notify_on() function
+ For 5,6,7: See set_shutdown() function */
+ gpr_atm read_closure;
+ gpr_atm write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
- /* The pollset that last noticed that the fd is readable */
- grpc_pollset *read_notifier_pollset;
+ /* The pollset that last noticed that the fd is readable. The actual type
+ * stored in this is (grpc_pollset *) */
+ gpr_atm read_notifier_pollset;
grpc_iomgr_object iomgr_object;
};
@@ -180,8 +217,10 @@ static void fd_unref(grpc_fd *fd);
static void fd_global_init(void);
static void fd_global_shutdown(void);
-#define CLOSURE_NOT_READY ((grpc_closure *)0)
-#define CLOSURE_READY ((grpc_closure *)1)
+#define CLOSURE_NOT_READY ((gpr_atm)0)
+#define CLOSURE_READY ((gpr_atm)2)
+
+#define FD_SHUTDOWN_BIT 1
/*******************************************************************************
* Polling island Declarations
@@ -908,7 +947,11 @@ static void unref_by(grpc_fd *fd, int n) {
fd->freelist_next = fd_freelist;
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- if (fd->shutdown) GRPC_ERROR_UNREF(fd->shutdown_error);
+
+ grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
+ /* Clear the least significant bit if it set (in case fd was shutdown) */
+ err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT);
+ GRPC_ERROR_UNREF(err);
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -972,13 +1015,14 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- new_fd->shutdown = false;
+ gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE);
new_fd->orphaned = false;
- new_fd->read_closure = CLOSURE_NOT_READY;
- new_fd->write_closure = CLOSURE_NOT_READY;
+ gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY);
+ gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY);
+ gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
+
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
- new_fd->read_notifier_pollset = NULL;
gpr_mu_unlock(&new_fd->po.mu);
@@ -1060,101 +1104,206 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_ERROR_UNREF(error);
}
-static grpc_error *fd_shutdown_error(grpc_fd *fd) {
- if (!fd->shutdown) {
- return GRPC_ERROR_NONE;
- } else {
- return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1);
+static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
+ grpc_closure *closure) {
+ while (true) {
+ /* Fast-path: CLOSURE_NOT_READY -> <closure>.
+ The 'release' cas here matches the 'acquire' load in set_ready and
+ set_shutdown ensuring that the closure (scheduled by set_ready or
+ set_shutdown) happens-after the I/O event on the fd */
+ if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ return; /* Fast-path successful. Return */
+ }
+
+ /* Slowpath. The 'acquire' load matches the 'release' cas in set_ready and
+ set_shutdown */
+ gpr_atm curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_NOT_READY: {
+ break; /* retry */
+ }
+
+ case CLOSURE_READY: {
+ /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ successful. If not, the state most likely transitioned to shutdown.
+ We should retry.
+
+ This can be a no-barrier cas since the state is being transitioned to
+ CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ closure when transitioning out of CLOSURE_NO_READY state (i.e there
+ is no other code that needs to 'happen-after' this) */
+ if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ return; /* Slow-path successful. Return */
+ }
+
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
+ contains a pointer to the shutdown-error). If the fd is shutdown,
+ schedule the closure with the shutdown error */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
+ grpc_closure_sched(
+ exec_ctx, closure,
+ GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ return;
+ }
+
+ /* There is already a closure!. This indicates a bug in the code */
+ gpr_log(GPR_ERROR,
+ "notify_on called with a previous callback still pending");
+ abort();
+ }
+ }
}
+
+ GPR_UNREACHABLE_CODE(return );
}
-static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st, grpc_closure *closure) {
- if (fd->shutdown) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready ==> switch to a waiting state by setting the closure */
- *st = closure;
- } else if (*st == CLOSURE_READY) {
- /* already ready ==> queue the closure to run immediately */
- *st = CLOSURE_NOT_READY;
- grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd));
- } else {
- /* upcallptr was set to a different closure. This is an error! */
- gpr_log(GPR_ERROR,
- "User called a notify_on function with a previous callback still "
- "pending");
- abort();
+static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
+ grpc_error *shutdown_err) {
+ /* Try the fast-path first (i.e expect the current value to be
+ CLOSURE_NOT_READY */
+ gpr_atm curr = CLOSURE_NOT_READY;
+ gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+
+ while (true) {
+ /* The 'release' cas here matches the 'acquire' load in notify_on to ensure
+ that the closure it schedules 'happens-after' the set_shutdown is called
+ on the fd */
+ if (gpr_atm_rel_cas(state, curr, new_state)) {
+ return; /* Fast-path successful. Return */
+ }
+
+ /* Fallback to slowpath. This 'acquire' load matches the 'release' cas in
+ notify_on and set_ready */
+ curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_READY: {
+ break; /* retry */
+ }
+
+ case CLOSURE_NOT_READY: {
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is already shutdown */
+
+ /* If fd is already shutdown, we are done */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ return;
+ }
+
+ /* Fd is not shutdown. Schedule the closure and move the state to
+ shutdown state. The 'release' cas here matches the 'acquire' load in
+ notify_on to ensure that the closure it schedules 'happens-after'
+ the set_shutdown is called on the fd */
+ if (gpr_atm_rel_cas(state, curr, new_state)) {
+ grpc_closure_sched(
+ exec_ctx, (grpc_closure *)curr,
+ GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ return;
+ }
+
+ /* 'curr' was a closure but now changed to a different state. We will
+ have to retry */
+ break;
+ }
+ }
}
+
+ GPR_UNREACHABLE_CODE(return );
}
-/* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st) {
- if (*st == CLOSURE_READY) {
- /* duplicate ready ==> ignore */
- return 0;
- } else if (*st == CLOSURE_NOT_READY) {
- /* not ready, and not waiting ==> flag ready */
- *st = CLOSURE_READY;
- return 0;
- } else {
- /* waiting ==> queue closure */
- grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd));
- *st = CLOSURE_NOT_READY;
- return 1;
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) {
+ /* Try an optimistic case first (i.e assume current state is
+ CLOSURE_NOT_READY).
+
+ This 'release' cas matches the 'acquire' load in notify_on ensuring that
+ any closure (scheduled by notify_on) 'happens-after' the return from
+ epoll_pwait */
+ if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ return; /* early out */
+ }
+
+ /* The 'acquire' load here matches the 'release' cas in notify_on and
+ set_shutdown */
+ gpr_atm curr = gpr_atm_acq_load(state);
+ switch (curr) {
+ case CLOSURE_READY: {
+ /* Already ready. We are done here */
+ break;
+ }
+
+ case CLOSURE_NOT_READY: {
+ /* The state was not CLOSURE_NOT_READY when we checked initially at the
+ beginning of this function but now it is CLOSURE_NOT_READY again.
+ This is only possible if the state transitioned out of
+ CLOSURE_NOT_READY to either CLOSURE_READY or <some closure> and then
+ back to CLOSURE_NOT_READY again (i.e after we entered this function,
+ the fd became "ready" and the necessary actions were already done).
+ So there is no need to make the state CLOSURE_READY now */
+ break;
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ /* The fd is shutdown. Do nothing */
+ } else if (gpr_atm_no_barrier_cas(state, curr, CLOSURE_NOT_READY)) {
+ /* The cas above was no-barrier since the state is being transitioned to
+ CLOSURE_NOT_READY; notify_on and set_shutdown do not schedule any
+ closures when transitioning out of CLOSURE_NO_READY state (i.e there
+ is no other code that needs to 'happen-after' this) */
+
+ grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
+ }
+ /* else the state changed again (only possible by either a racing
+ set_ready or set_shutdown functions. In both these cases, the closure
+ would have been scheduled for execution. So we are done here */
+ break;
+ }
}
}
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
- grpc_pollset *notifier = NULL;
-
- gpr_mu_lock(&fd->po.mu);
- notifier = fd->read_notifier_pollset;
- gpr_mu_unlock(&fd->po.mu);
-
- return notifier;
+ gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
+ return (grpc_pollset *)notifier;
}
static bool fd_is_shutdown(grpc_fd *fd) {
- gpr_mu_lock(&fd->po.mu);
- const bool r = fd->shutdown;
- gpr_mu_unlock(&fd->po.mu);
- return r;
+ grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
+ return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0);
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- gpr_mu_lock(&fd->po.mu);
- /* Do the actual shutdown only once */
- if (!fd->shutdown) {
- fd->shutdown = true;
- fd->shutdown_error = why;
-
+ /* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */
+ if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE,
+ (gpr_atm)why | FD_SHUTDOWN_BIT)) {
shutdown(fd->fd, SHUT_RDWR);
- /* Flush any pending read and write closures. Since fd->shutdown is 'true'
- at this point, the closures would be called with 'success = false' */
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
+
+ set_shutdown(exec_ctx, fd, &fd->read_closure, why);
+ set_shutdown(exec_ctx, fd, &fd->write_closure, why);
} else {
+ /* Shutdown already called */
GRPC_ERROR_UNREF(why);
}
- gpr_mu_unlock(&fd->po.mu);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->po.mu);
- notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
- gpr_mu_unlock(&fd->po.mu);
+ notify_on(exec_ctx, fd, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- gpr_mu_lock(&fd->po.mu);
- notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
- gpr_mu_unlock(&fd->po.mu);
+ notify_on(exec_ctx, fd, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
@@ -1343,18 +1492,19 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
- /* Need the fd->po.mu since we might be racing with fd_notify_on_read */
- gpr_mu_lock(&fd->po.mu);
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- fd->read_notifier_pollset = notifier;
- gpr_mu_unlock(&fd->po.mu);
+ set_ready(exec_ctx, fd, &fd->read_closure);
+
+ /* Note, it is possible that fd_become_readable might be called twice with
+ different 'notifier's when an fd becomes readable and it is in two epoll
+ sets (This can happen briefly during polling island merges). In such cases
+ it does not really matter which notifer is set as the read_notifier_pollset
+ (They would both point to the same polling island anyway) */
+ /* Use release store to match with acquire load in fd_get_read_notifier */
+ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- /* Need the fd->po.mu since we might be racing with fd_notify_on_write */
- gpr_mu_lock(&fd->po.mu);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
- gpr_mu_unlock(&fd->po.mu);
+ set_ready(exec_ctx, fd, &fd->write_closure);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@@ -1405,16 +1555,6 @@ static void pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->po.mu);
}
-static void pollset_reset(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(!pollset_has_workers(pollset));
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->kicked_without_pollers = false;
- pollset->shutdown_done = NULL;
- GPR_ASSERT(pollset->po.pi == NULL);
-}
-
static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
polling_island *pi) {
if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
@@ -1747,7 +1887,7 @@ retry:
(void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
(void *)bag);
/* No need to lock 'pi_new' here since this is a new polling island
- * and no one has a reference to it yet */
+ and no one has a reference to it yet */
polling_island_remove_all_fds_locked(pi_new, true, &error);
/* Ref and unref so that the polling island gets deleted during unref
@@ -1852,13 +1992,12 @@ static grpc_pollset_set *pollset_set_create(void) {
return pss;
}
-static void pollset_set_destroy(grpc_pollset_set *pss) {
+static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pss) {
gpr_mu_destroy(&pss->po.mu);
if (pss->po.pi != NULL) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy");
- grpc_exec_ctx_finish(&exec_ctx);
+ PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
}
gpr_free(pss);
@@ -1958,7 +2097,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
- .pollset_reset = pollset_reset,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index ca12932219..c03fadaebb 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -149,7 +149,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
static bool fd_is_orphaned(grpc_fd *fd);
/* Reference counting for fds */
-/*#define GRPC_FD_REF_COUNT_DEBUG*/
+//#define GRPC_FD_REF_COUNT_DEBUG
#ifdef GRPC_FD_REF_COUNT_DEBUG
static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
@@ -191,6 +191,7 @@ struct grpc_pollset {
int kicked_without_pollers;
grpc_closure *shutdown_done;
grpc_closure_list idle_jobs;
+ int pollset_set_count;
/* all polled fds */
size_t fd_count;
size_t fd_capacity;
@@ -228,7 +229,7 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
/* Return 1 if the pollset has active threads in pollset_work (pollset must
* be locked) */
-static int pollset_has_workers(grpc_pollset *pollset);
+static bool pollset_has_workers(grpc_pollset *pollset);
/*******************************************************************************
* pollset_set definitions
@@ -282,8 +283,8 @@ cv_fd_table g_cvfds;
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ (int)gpr_atm_no_barrier_load(&fd->refst),
+ (int)gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
@@ -297,8 +298,8 @@ static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
int line) {
gpr_atm old;
gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
- gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ (int)gpr_atm_no_barrier_load(&fd->refst),
+ (int)gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd *fd, int n) {
gpr_atm old;
@@ -658,10 +659,18 @@ static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->next->prev = worker->prev;
}
-static int pollset_has_workers(grpc_pollset *p) {
+static bool pollset_has_workers(grpc_pollset *p) {
return p->root_worker.next != &p->root_worker;
}
+static bool pollset_in_pollset_sets(grpc_pollset *p) {
+ return p->pollset_set_count;
+}
+
+static bool pollset_has_observers(grpc_pollset *p) {
+ return pollset_has_workers(p) || pollset_in_pollset_sets(p);
+}
+
static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
if (pollset_has_workers(p)) {
grpc_pollset_worker *w = p->root_worker.next;
@@ -800,6 +809,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->fd_count = 0;
pollset->fd_capacity = 0;
pollset->fds = NULL;
+ pollset->pollset_set_count = 0;
}
static void pollset_destroy(grpc_pollset *pollset) {
@@ -815,16 +825,6 @@ static void pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static void pollset_reset(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(!pollset_has_workers(pollset));
- GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
- GPR_ASSERT(pollset->fd_count == 0);
- pollset->shutting_down = 0;
- pollset->called_shutdown = 0;
- pollset->kicked_without_pollers = 0;
-}
-
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
@@ -1071,7 +1071,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
pollset_kick(pollset, NULL);
- } else if (!pollset->called_shutdown) {
+ } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
finish_shutdown(exec_ctx, pollset);
@@ -1103,7 +1103,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (!pollset_has_workers(pollset)) {
grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
}
- if (!pollset->called_shutdown && !pollset_has_workers(pollset)) {
+ if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
finish_shutdown(exec_ctx, pollset);
}
@@ -1137,12 +1137,27 @@ static grpc_pollset_set *pollset_set_create(void) {
return pollset_set;
}
-static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
+static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
+ for (i = 0; i < pollset_set->pollset_count; i++) {
+ grpc_pollset *pollset = pollset_set->pollsets[i];
+ gpr_mu_lock(&pollset->mu);
+ pollset->pollset_set_count--;
+ /* check shutdown */
+ if (pollset->shutting_down && !pollset->called_shutdown &&
+ !pollset_has_observers(pollset)) {
+ pollset->called_shutdown = 1;
+ gpr_mu_unlock(&pollset->mu);
+ finish_shutdown(exec_ctx, pollset);
+ } else {
+ gpr_mu_unlock(&pollset->mu);
+ }
+ }
gpr_free(pollset_set->pollsets);
gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
@@ -1153,6 +1168,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset) {
size_t i, j;
+ gpr_mu_lock(&pollset->mu);
+ pollset->pollset_set_count++;
+ gpr_mu_unlock(&pollset->mu);
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
@@ -1188,6 +1206,17 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
}
}
gpr_mu_unlock(&pollset_set->mu);
+ gpr_mu_lock(&pollset->mu);
+ pollset->pollset_set_count--;
+ /* check shutdown */
+ if (pollset->shutting_down && !pollset->called_shutdown &&
+ !pollset_has_observers(pollset)) {
+ pollset->called_shutdown = 1;
+ gpr_mu_unlock(&pollset->mu);
+ finish_shutdown(exec_ctx, pollset);
+ } else {
+ gpr_mu_unlock(&pollset->mu);
+ }
}
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -1514,7 +1543,6 @@ static const grpc_event_engine_vtable vtable = {
.pollset_init = pollset_init,
.pollset_shutdown = pollset_shutdown,
- .pollset_reset = pollset_reset,
.pollset_destroy = pollset_destroy,
.pollset_work = pollset_work,
.pollset_kick = pollset_kick,
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 5bb55631d6..b5be5504b9 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -191,10 +191,6 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
g_event_engine->pollset_shutdown(exec_ctx, pollset, closure);
}
-void grpc_pollset_reset(grpc_pollset *pollset) {
- g_event_engine->pollset_reset(pollset);
-}
-
void grpc_pollset_destroy(grpc_pollset *pollset) {
g_event_engine->pollset_destroy(pollset);
}
@@ -219,8 +215,9 @@ grpc_pollset_set *grpc_pollset_set_create(void) {
return g_event_engine->pollset_set_create();
}
-void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
- g_event_engine->pollset_set_destroy(pollset_set);
+void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set) {
+ g_event_engine->pollset_set_destroy(exec_ctx, pollset_set);
}
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index a589efdeec..1a9e5c115a 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -64,7 +64,6 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu);
void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
- void (*pollset_reset)(grpc_pollset *pollset);
void (*pollset_destroy)(grpc_pollset *pollset);
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker, gpr_timespec now,
@@ -75,7 +74,8 @@ typedef struct grpc_event_engine_vtable {
struct grpc_fd *fd);
grpc_pollset_set *(*pollset_set_create)(void);
- void (*pollset_set_destroy)(grpc_pollset_set *pollset_set);
+ void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set);
void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.c
index 1601a39002..4104bf927a 100644
--- a/src/core/lib/iomgr/network_status_tracker.c
+++ b/src/core/lib/iomgr/network_status_tracker.c
@@ -31,95 +31,18 @@
*
*/
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
#include "src/core/lib/iomgr/endpoint.h"
-typedef struct endpoint_ll_node {
- grpc_endpoint *ep;
- struct endpoint_ll_node *next;
-} endpoint_ll_node;
-
-static endpoint_ll_node *head = NULL;
-static gpr_mu g_endpoint_mutex;
-
-void grpc_network_status_shutdown(void) {
- if (head != NULL) {
- gpr_log(GPR_ERROR,
- "Memory leaked as not all network endpoints were shut down");
- }
- gpr_mu_destroy(&g_endpoint_mutex);
-}
+void grpc_network_status_shutdown(void) {}
void grpc_network_status_init(void) {
- gpr_mu_init(&g_endpoint_mutex);
// TODO(makarandd): Install callback with OS to monitor network status.
}
-void grpc_destroy_network_status_monitor() {
- for (endpoint_ll_node *curr = head; curr != NULL;) {
- endpoint_ll_node *next = curr->next;
- gpr_free(curr);
- curr = next;
- }
- gpr_mu_destroy(&g_endpoint_mutex);
-}
-
-void grpc_network_status_register_endpoint(grpc_endpoint *ep) {
- gpr_mu_lock(&g_endpoint_mutex);
- if (head == NULL) {
- head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
- head->ep = ep;
- head->next = NULL;
- } else {
- endpoint_ll_node *prev_head = head;
- head = (endpoint_ll_node *)gpr_malloc(sizeof(endpoint_ll_node));
- head->ep = ep;
- head->next = prev_head;
- }
- gpr_mu_unlock(&g_endpoint_mutex);
-}
+void grpc_destroy_network_status_monitor() {}
-void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) {
- gpr_mu_lock(&g_endpoint_mutex);
- GPR_ASSERT(head);
- bool found = false;
- endpoint_ll_node *prev = head;
- // if we're unregistering the head, just move head to the next
- if (ep == head->ep) {
- head = head->next;
- gpr_free(prev);
- found = true;
- } else {
- for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) {
- if (ep == curr->ep) {
- prev->next = curr->next;
- gpr_free(curr);
- found = true;
- break;
- }
- prev = curr;
- }
- }
- gpr_mu_unlock(&g_endpoint_mutex);
- GPR_ASSERT(found);
-}
+void grpc_network_status_register_endpoint(grpc_endpoint *ep) { (void)ep; }
-// Walk the linked-list from head and execute shutdown. It is possible that
-// other threads might be in the process of shutdown as well, but that has
-// no side effect since endpoint shutdown is idempotent.
-void grpc_network_status_shutdown_all_endpoints() {
- gpr_mu_lock(&g_endpoint_mutex);
- if (head == NULL) {
- gpr_mu_unlock(&g_endpoint_mutex);
- return;
- }
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { (void)ep; }
- for (endpoint_ll_node *curr = head; curr != NULL; curr = curr->next) {
- curr->ep->vtable->shutdown(&exec_ctx, curr->ep,
- GRPC_ERROR_CREATE("Network unavailable"));
- }
- gpr_mu_unlock(&g_endpoint_mutex);
- grpc_exec_ctx_finish(&exec_ctx);
-}
+void grpc_network_status_shutdown_all_endpoints() {}
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 8d9edc8406..c4b62a2790 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -58,9 +58,6 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
* pollset's mutex must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
-/** Reset the pollset to its initial state (perhaps with some cached objects);
- * must have been previously shutdown */
-void grpc_pollset_reset(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index 34bb728c41..d11801d63b 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -44,7 +44,8 @@
typedef struct grpc_pollset_set grpc_pollset_set;
grpc_pollset_set *grpc_pollset_set_create(void);
-void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
+void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.c
index e5ef8b29e0..836cfee4ef 100644
--- a/src/core/lib/iomgr/pollset_set_uv.c
+++ b/src/core/lib/iomgr/pollset_set_uv.c
@@ -41,7 +41,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {}
void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c
index 645650db9b..ae18c8a3ce 100644
--- a/src/core/lib/iomgr/pollset_set_windows.c
+++ b/src/core/lib/iomgr/pollset_set_windows.c
@@ -42,7 +42,8 @@ grpc_pollset_set* grpc_pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {}
void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index ed3edeee94..5847771108 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -97,11 +97,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
}
}
-void grpc_pollset_reset(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- pollset->shutting_down = 0;
-}
-
static void timer_run_cb(uv_timer_t *timer) {}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 2a45e708df..9be73a7ce8 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -117,16 +117,6 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
void grpc_pollset_destroy(grpc_pollset *pollset) {}
-void grpc_pollset_reset(grpc_pollset *pollset) {
- GPR_ASSERT(pollset->shutting_down);
- GPR_ASSERT(
- !has_workers(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET));
- pollset->shutting_down = 0;
- pollset->is_iocp_worker = 0;
- pollset->kicked_without_pollers = 0;
- pollset->on_shutdown = NULL;
-}
-
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 9b5f3209f0..79ff910738 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -113,14 +113,15 @@ static grpc_error *try_split_host_port(const char *name,
/* parse name, splitting it into host and port parts */
grpc_error *error;
gpr_split_host_port(name, host, port);
- if (host == NULL) {
+ if (*host == NULL) {
char *msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
error = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return error;
}
- if (port == NULL) {
+ if (*port == NULL) {
+ // TODO(murgatroid99): add tests for this case
if (default_port == NULL) {
char *msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c
index 628ad4a45b..5bbe8fa34c 100644
--- a/src/core/lib/iomgr/socket_utils_windows.c
+++ b/src/core/lib/iomgr/socket_utils_windows.c
@@ -41,8 +41,12 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+#ifdef GPR_WIN_INET_NTOP
+ return inet_ntop(af, src, dst, size);
+#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
+#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index 5225a5402b..3de0795187 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -46,6 +46,8 @@
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/iomgr/timer.h"
+extern int grpc_tcp_trace;
+
typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
grpc_timer alarm;
@@ -70,6 +72,12 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
grpc_uv_tcp_connect *connect = acp;
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
+ connect->addr_name, str);
+ grpc_error_free_string(str);
+ }
if (error == GRPC_ERROR_NONE) {
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
it was cancelled, then the handler that cancelled it also should close
@@ -145,6 +153,12 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
+
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
+ connect->addr_name);
+ }
+
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 5fb398c50b..5541c62068 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -79,8 +79,6 @@ typedef struct {
grpc_pollset *pollset;
} grpc_tcp;
-static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
-
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp);
@@ -119,6 +117,13 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void uv_close_callback(uv_handle_t *handle) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp *tcp = handle->data;
+ TCP_UNREF(&exec_ctx, tcp, "destroy");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -313,7 +318,6 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
- TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char *uv_get_peer(grpc_endpoint *ep) {
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 40c8351472..6d638bcbaa 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -121,12 +121,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
g_initialized = false;
}
-/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
-static size_t shard_idx(const grpc_timer *info) {
- size_t x = (size_t)info;
- return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1);
-}
-
static double ts_to_dbl(gpr_timespec ts) {
return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
}
@@ -181,30 +175,30 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_closure *closure,
gpr_timespec now) {
int is_first_timer = 0;
- shard_type *shard = &g_shards[shard_idx(timer)];
+ shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
timer->deadline = deadline;
- timer->triggered = 0;
if (!g_initialized) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(
exec_ctx, timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
return;
}
+ gpr_mu_lock(&shard->mu);
+ timer->pending = true;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&shard->mu);
+ /* early out */
return;
}
- /* TODO(ctiller): check deadline expired */
-
- gpr_mu_lock(&shard->mu);
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
@@ -247,11 +241,11 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
return;
}
- shard_type *shard = &g_shards[shard_idx(timer)];
+ shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
- if (!timer->triggered) {
+ if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
- timer->triggered = 1;
+ timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
} else {
@@ -302,7 +296,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
}
timer = grpc_timer_heap_top(&shard->heap);
if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
- timer->triggered = 1;
+ timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
}
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index 9d901c7e68..1608dce9fb 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -40,7 +40,7 @@
struct grpc_timer {
gpr_timespec deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
- int triggered;
+ bool pending;
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index fa2cdee964..f28a14405d 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -53,8 +53,8 @@ static void stop_uv_timer(uv_timer_t *handle) {
void run_expired_timer(uv_timer_t *handle) {
grpc_timer *timer = (grpc_timer *)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GPR_ASSERT(!timer->triggered);
- timer->triggered = 1;
+ GPR_ASSERT(timer->pending);
+ timer->pending = 0;
grpc_closure_sched(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
stop_uv_timer(handle);
grpc_exec_ctx_finish(&exec_ctx);
@@ -67,11 +67,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_timer_t *uv_timer;
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
- timer->triggered = 1;
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
- timer->triggered = 0;
+ timer->pending = 1;
timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
uv_timer = gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
@@ -81,8 +81,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
- if (!timer->triggered) {
- timer->triggered = 1;
+ if (timer->pending) {
+ timer->pending = 0;
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
stop_uv_timer((uv_timer_t *)timer->uv_timer);
}
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
index 13cf8bd4fa..9870cd4a5c 100644
--- a/src/core/lib/iomgr/timer_uv.h
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -41,7 +41,7 @@ struct grpc_timer {
/* This is actually a uv_timer_t*, but we want to keep platform-specific
types out of headers */
void *uv_timer;
- int triggered;
+ int pending;
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index a098741b70..ecd26de9fa 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -154,7 +154,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
}
gpr_mu_unlock(g_polling_mu);
- grpc_httpcli_context_destroy(&context);
+ grpc_httpcli_context_destroy(exec_ctx, &context);
grpc_closure_init(&destroy_closure, destroy_pollset,
grpc_polling_entity_pollset(&detector.pollent),
grpc_schedule_on_exec_ctx);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 2270be8f44..f128177e8c 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -898,10 +898,10 @@ grpc_jwt_verifier *grpc_jwt_verifier_create(
return v;
}
-void grpc_jwt_verifier_destroy(grpc_jwt_verifier *v) {
+void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_verifier *v) {
size_t i;
if (v == NULL) return;
- grpc_httpcli_context_destroy(&v->http_ctx);
+ grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx);
if (v->mappings != NULL) {
for (i = 0; i < v->num_mappings; i++) {
gpr_free(v->mappings[i].email_domain);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index 4fa320a415..5c3d2a7788 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -109,7 +109,8 @@ grpc_jwt_verifier *grpc_jwt_verifier_create(
size_t num_mappings);
/*The verifier must not be destroyed if there are still outstanding callbacks.*/
-void grpc_jwt_verifier_destroy(grpc_jwt_verifier *verifier);
+void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_jwt_verifier *verifier);
/* User provided callback that will be called when the verification of the JWT
is done (maybe in another thread).
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index 1b0e43a1e4..c0f260f938 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -124,7 +124,7 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx,
(grpc_oauth2_token_fetcher_credentials *)creds;
grpc_credentials_md_store_unref(exec_ctx, c->access_token_md);
gpr_mu_destroy(&c->mu);
- grpc_httpcli_context_destroy(&c->httpcli_context);
+ grpc_httpcli_context_destroy(exec_ctx, &c->httpcli_context);
}
grpc_credentials_status
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index b9bbe1b304..a23082a866 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -302,7 +302,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
memset(calld, 0, sizeof(*calld));
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 36e81d6501..14619d97ca 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -197,7 +197,7 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
diff --git a/src/core/lib/support/cpu_posix.c b/src/core/lib/support/cpu_posix.c
index 667bde7cad..245f12f06d 100644
--- a/src/core/lib/support/cpu_posix.c
+++ b/src/core/lib/support/cpu_posix.c
@@ -41,6 +41,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
static __thread char magic_thread_local;
@@ -60,18 +61,12 @@ unsigned gpr_cpu_num_cores(void) {
return (unsigned)ncpus;
}
-/* This is a cheap, but good enough, pointer hash for sharding things: */
-static size_t shard_ptr(const void *info) {
- size_t x = (size_t)info;
- return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) % gpr_cpu_num_cores();
-}
-
unsigned gpr_cpu_current_cpu(void) {
/* NOTE: there's no way I know to return the actual cpu index portably...
most code that's using this is using it to shard across work queues though,
so here we use thread identity instead to achieve a similar though not
identical effect */
- return (unsigned)shard_ptr(&magic_thread_local);
+ return (unsigned)GPR_HASH_POINTER(&magic_thread_local, gpr_cpu_num_cores());
}
#endif /* GPR_CPU_POSIX */
diff --git a/src/core/lib/support/sync_posix.c b/src/core/lib/support/sync_posix.c
index de0f0484b5..16e7d6e12a 100644
--- a/src/core/lib/support/sync_posix.c
+++ b/src/core/lib/support/sync_posix.c
@@ -42,8 +42,10 @@
#include <time.h>
#include "src/core/lib/profiling/timers.h"
-#ifdef GPR_MU_COUNTERS
-gpr_atm grpc_mu_locks = 0;
+#ifdef GPR_LOW_LEVEL_COUNTERS
+gpr_atm gpr_mu_locks = 0;
+gpr_atm gpr_counter_atm_cas = 0;
+gpr_atm gpr_counter_atm_add = 0;
#endif
void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
@@ -51,8 +53,8 @@ void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) {
-#ifdef GPR_MU_COUNTERS
- gpr_atm_no_barrier_fetch_add(&grpc_mu_locks, 1);
+#ifdef GPR_LOW_LEVEL_COUNTERS
+ GPR_ATM_INC_COUNTER(gpr_mu_locks);
#endif
GPR_TIMER_BEGIN("gpr_mu_lock", 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 3352e427cd..af32c5b378 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -101,7 +101,18 @@ typedef struct {
grpc_error *error;
} received_status;
-#define MAX_ERRORS_PER_BATCH 3
+static gpr_atm pack_received_status(received_status r) {
+ return r.is_set ? (1 | (gpr_atm)r.error) : 0;
+}
+
+static received_status unpack_received_status(gpr_atm atm) {
+ return (atm & 1) == 0
+ ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE}
+ : (received_status){.is_set = true,
+ .error = (grpc_error *)(atm & ~(gpr_atm)1)};
+}
+
+#define MAX_ERRORS_PER_BATCH 4
typedef struct batch_control {
grpc_call *call;
@@ -142,8 +153,6 @@ struct grpc_call {
bool destroy_called;
/** flag indicating that cancellation is inherited */
bool cancellation_is_inherited;
- /** bitmask of live batches */
- uint8_t used_batches;
/** which ops are in-flight */
bool sent_initial_metadata;
bool sending_message;
@@ -165,8 +174,8 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
- /* Received call statuses from various sources */
- received_status status[STATUS_SOURCE_COUNT];
+ /* Packed received call statuses from various sources */
+ gpr_atm status[STATUS_SOURCE_COUNT];
/* Call data useful used for reporting. Only valid after the call has
* completed */
@@ -245,7 +254,7 @@ static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error);
+ grpc_error *error, bool has_cancelled);
static void add_init_error(grpc_error **composite, grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
@@ -446,7 +455,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- GRPC_ERROR_UNREF(c->status[i].error);
+ GRPC_ERROR_UNREF(
+ unpack_received_status(gpr_atm_no_barrier_load(&c->status[i])).error);
}
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
@@ -481,7 +491,10 @@ void grpc_call_destroy(grpc_call *c) {
c->destroy_called = 1;
cancel = !c->received_final_op;
gpr_mu_unlock(&c->mu);
- if (cancel) grpc_call_cancel(c, NULL);
+ if (cancel) {
+ cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_CANCELLED);
+ }
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_destroy", 0);
@@ -490,8 +503,11 @@ void grpc_call_destroy(grpc_call *c) {
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
GPR_ASSERT(!reserved);
- return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
- NULL);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ cancel_with_error(&exec_ctx, call, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_CANCELLED);
+ grpc_exec_ctx_finish(&exec_ctx);
+ return GRPC_CALL_OK;
}
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
@@ -608,13 +624,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
*/
static bool get_final_status_from(
- grpc_call *call, status_source from_source, bool allow_ok_status,
+ grpc_call *call, grpc_error *error, bool allow_ok_status,
void (*set_value)(grpc_status_code code, void *user_data),
void *set_value_user_data, grpc_slice *details) {
grpc_status_code code;
const char *msg = NULL;
- grpc_error_get_status(call->status[from_source].error, call->send_deadline,
- &code, &msg, NULL);
+ grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@@ -632,12 +647,15 @@ static void get_final_status(grpc_call *call,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
int i;
+ received_status status[STATUS_SOURCE_COUNT];
+ for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
+ status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
+ }
if (grpc_call_error_trace) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- gpr_log(GPR_DEBUG, " %d: %s", i,
- grpc_error_string(call->status[i].error));
+ if (status[i].is_set) {
+ gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error));
}
}
}
@@ -647,9 +665,9 @@ static void get_final_status(grpc_call *call,
/* search for the best status we can present: ideally the error we use has a
clearly defined grpc-status, and we'll prefer that. */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set &&
- grpc_error_has_clear_grpc_status(call->status[i].error)) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set &&
+ grpc_error_has_clear_grpc_status(status[i].error)) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -657,8 +675,8 @@ static void get_final_status(grpc_call *call,
}
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
- if (call->status[i].is_set) {
- if (get_final_status_from(call, (status_source)i, allow_ok_status != 0,
+ if (status[i].is_set) {
+ if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details)) {
return;
}
@@ -675,12 +693,13 @@ static void get_final_status(grpc_call *call,
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
- if (call->status[source].is_set) {
+ if (!gpr_atm_rel_cas(&call->status[source],
+ pack_received_status((received_status){
+ .is_set = false, .error = GRPC_ERROR_NONE}),
+ pack_received_status((received_status){
+ .is_set = true, .error = error}))) {
GRPC_ERROR_UNREF(error);
- return;
}
- call->status[source].is_set = true;
- call->status[source].error = error;
}
/*******************************************************************************
@@ -991,25 +1010,48 @@ static bool are_initial_metadata_flags_valid(uint32_t flags, bool is_client) {
return !(flags & invalid_positions);
}
-static batch_control *allocate_batch_control(grpc_call *call) {
- size_t i;
- for (i = 0; i < MAX_CONCURRENT_BATCHES; i++) {
- if ((call->used_batches & (1 << i)) == 0) {
- call->used_batches = (uint8_t)(call->used_batches | (uint8_t)(1 << i));
- return &call->active_batches[i];
- }
+static int batch_slot_for_op(grpc_op_type type) {
+ switch (type) {
+ case GRPC_OP_SEND_INITIAL_METADATA:
+ return 0;
+ case GRPC_OP_SEND_MESSAGE:
+ return 1;
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ return 2;
+ case GRPC_OP_RECV_INITIAL_METADATA:
+ return 3;
+ case GRPC_OP_RECV_MESSAGE:
+ return 4;
+ case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ return 5;
+ }
+ GPR_UNREACHABLE_CODE(return 123456789);
+}
+
+static batch_control *allocate_batch_control(grpc_call *call,
+ const grpc_op *ops,
+ size_t num_ops) {
+ int slot = batch_slot_for_op(ops[0].op);
+ for (size_t i = 1; i < num_ops; i++) {
+ int op_slot = batch_slot_for_op(ops[i].op);
+ slot = GPR_MIN(slot, op_slot);
}
- return NULL;
+ batch_control *bctl = &call->active_batches[slot];
+ if (bctl->call != NULL) {
+ return NULL;
+ }
+ memset(bctl, 0, sizeof(*bctl));
+ bctl->call = call;
+ return bctl;
}
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
batch_control *bctl = user_data;
grpc_call *call = bctl->call;
- gpr_mu_lock(&call->mu);
- call->used_batches = (uint8_t)(
- call->used_batches & ~(uint8_t)(1 << (bctl - call->active_batches)));
- gpr_mu_unlock(&call->mu);
+ bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
@@ -1092,12 +1134,8 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
+ bctl->call = NULL;
grpc_closure_run(exec_ctx, bctl->notify_tag, error);
- gpr_mu_lock(&call->mu);
- bctl->call->used_batches =
- (uint8_t)(bctl->call->used_batches &
- ~(uint8_t)(1 << (bctl - bctl->call->active_batches)));
- gpr_mu_unlock(&call->mu);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1185,6 +1223,11 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *call = bctl->call;
gpr_mu_lock(&bctl->call->mu);
if (error != GRPC_ERROR_NONE) {
+ if (call->receiving_stream != NULL) {
+ grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+ call->receiving_stream = NULL;
+ }
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), true);
cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
GRPC_ERROR_REF(error));
}
@@ -1251,10 +1294,10 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error) {
+ grpc_error *error, bool has_cancelled) {
if (error == GRPC_ERROR_NONE) return;
int idx = (int)gpr_atm_no_barrier_fetch_add(&bctl->num_errors, 1);
- if (idx == 0) {
+ if (idx == 0 && !has_cancelled) {
cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE,
GRPC_ERROR_REF(error));
}
@@ -1268,7 +1311,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
- add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@@ -1305,10 +1348,15 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
batch_control *bctl = bctlp;
- add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error));
+ add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
+static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p,
+ grpc_cq_completion *completion) {
+ gpr_free(completion);
+}
+
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_call *call, const grpc_op *ops,
size_t nops, void *notify_tag,
@@ -1323,32 +1371,34 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_metadata compression_md;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
-
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
- /* TODO(ctiller): this feels like it could be made lock-free */
- gpr_mu_lock(&call->mu);
- bctl = allocate_batch_control(call);
- memset(bctl, 0, sizeof(*bctl));
- bctl->call = call;
- bctl->notify_tag = notify_tag;
- bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
-
- grpc_transport_stream_op *stream_op = &bctl->op;
- memset(stream_op, 0, sizeof(*stream_op));
- stream_op->covered_by_poller = true;
-
if (nops == 0) {
- GRPC_CALL_INTERNAL_REF(call, "completion");
if (!is_notify_tag_closure) {
grpc_cq_begin_op(call->cq, notify_tag);
+ grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ gpr_malloc(sizeof(grpc_cq_completion)));
+ } else {
+ grpc_closure_sched(exec_ctx, notify_tag, GRPC_ERROR_NONE);
}
- gpr_mu_unlock(&call->mu);
- post_batch_completion(exec_ctx, bctl);
error = GRPC_CALL_OK;
goto done;
}
+ /* TODO(ctiller): this feels like it could be made lock-free */
+ bctl = allocate_batch_control(call, ops, nops);
+ if (bctl == NULL) {
+ return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
+ }
+ bctl->notify_tag = notify_tag;
+ bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+
+ gpr_mu_lock(&call->mu);
+ grpc_transport_stream_op *stream_op = &bctl->op;
+ memset(stream_op, 0, sizeof(*stream_op));
+ stream_op->covered_by_poller = true;
+
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
op = &ops[i];
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index b70343ddf1..7d4d0db28d 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -110,6 +110,7 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
+/* TODO(#9731): add exec_ctx to destroy */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
void *value, void (*destroy)(void *value));
/* Get a context pointer. */
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 429dbad7c7..d6acd392c1 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -128,7 +128,8 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
}
channel->default_authority = grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_slice_from_copied_string(args->args[i].value.string));
+ grpc_slice_intern(
+ grpc_slice_from_static_string(args->args[i].value.string)));
}
} else if (0 ==
strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
@@ -144,7 +145,8 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
} else {
channel->default_authority = grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_slice_from_copied_string(args->args[i].value.string));
+ grpc_slice_intern(
+ grpc_slice_from_static_string(args->args[i].value.string)));
}
}
} else if (0 == strcmp(args->args[i].key,
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 1830842d00..b80656aa8d 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -96,9 +96,6 @@ struct grpc_completion_queue {
#define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
#define CQ_FROM_POLLSET(ps) (((grpc_completion_queue *)ps) - 1)
-static gpr_mu g_freelist_mu;
-static grpc_completion_queue *g_freelist;
-
int grpc_cq_pluck_trace;
int grpc_cq_event_timeout_trace;
@@ -113,21 +110,6 @@ int grpc_cq_event_timeout_trace;
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cc,
grpc_error *error);
-void grpc_cq_global_init(void) { gpr_mu_init(&g_freelist_mu); }
-
-void grpc_cq_global_shutdown(void) {
- gpr_mu_destroy(&g_freelist_mu);
- while (g_freelist) {
- grpc_completion_queue *next = g_freelist->next_free;
- grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
-#ifndef NDEBUG
- gpr_free(g_freelist->outstanding_tags);
-#endif
- gpr_free(g_freelist);
- g_freelist = next;
- }
-}
-
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
@@ -136,22 +118,12 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
GRPC_API_TRACE("grpc_completion_queue_create(reserved=%p)", 1, (reserved));
- gpr_mu_lock(&g_freelist_mu);
- if (g_freelist == NULL) {
- gpr_mu_unlock(&g_freelist_mu);
-
- cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
- grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
+ cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
+ grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
#ifndef NDEBUG
- cc->outstanding_tags = NULL;
- cc->outstanding_tag_capacity = 0;
+ cc->outstanding_tags = NULL;
+ cc->outstanding_tag_capacity = 0;
#endif
- } else {
- cc = g_freelist;
- g_freelist = g_freelist->next_free;
- gpr_mu_unlock(&g_freelist_mu);
- /* pollset already initialized */
- }
/* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->pending_events, 1);
@@ -203,11 +175,11 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
- grpc_pollset_reset(POLLSET_FROM_CQ(cc));
- gpr_mu_lock(&g_freelist_mu);
- cc->next_free = g_freelist;
- g_freelist = cc;
- gpr_mu_unlock(&g_freelist_mu);
+ grpc_pollset_destroy(POLLSET_FROM_CQ(cc));
+#ifndef NDEBUG
+ gpr_free(cc->outstanding_tags);
+#endif
+ gpr_free(cc);
}
}
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index c1cafba5f2..5d73dd7216 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -99,7 +99,4 @@ bool grpc_cq_is_non_listening_server_cq(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc);
-void grpc_cq_global_init(void);
-void grpc_cq_global_shutdown(void);
-
#endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H */
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index b338ac4c48..91bd014a0e 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -209,7 +209,6 @@ void grpc_init(void) {
grpc_iomgr_init();
grpc_executor_init();
gpr_timers_global_init();
- grpc_cq_global_init();
grpc_handshaker_factory_registry_init();
grpc_security_init();
for (i = 0; i < g_number_of_plugins; i++) {
@@ -236,7 +235,6 @@ void grpc_shutdown(void) {
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
grpc_executor_shutdown(&exec_ctx);
- grpc_cq_global_shutdown();
grpc_iomgr_shutdown(&exec_ctx);
gpr_timers_global_destroy();
grpc_tracer_shutdown();
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 48de0e1d5b..49bc4c114b 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -122,7 +122,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
gpr_atm_no_barrier_store(&calld->filled_metadata, 0);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 6ab1c0d94d..fdb230b3e2 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -540,7 +540,8 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
grpc_schedule_on_exec_ctx);
- grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error);
+ grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_REF(error));
return;
}
@@ -879,7 +880,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_call_element_args *args) {
+ const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
@@ -1198,7 +1199,9 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
crm->server_registered_method = rm;
crm->flags = rm->flags;
crm->has_host = has_host;
- crm->host = host;
+ if (has_host) {
+ crm->host = host;
+ }
crm->method = method;
}
GPR_ASSERT(slots <= UINT32_MAX);
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 8fc5bf3e9a..afe1f6164d 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -62,7 +62,7 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state init_state,
const char *name) {
- tracker->current_state = init_state;
+ gpr_atm_no_barrier_store(&tracker->current_state_atm, init_state);
tracker->current_error = GRPC_ERROR_NONE;
tracker->watchers = NULL;
tracker->name = gpr_strdup(name);
@@ -89,15 +89,30 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
}
grpc_connectivity_state grpc_connectivity_state_check(
+ grpc_connectivity_state_tracker *tracker) {
+ grpc_connectivity_state cur =
+ (grpc_connectivity_state)gpr_atm_no_barrier_load(
+ &tracker->current_state_atm);
+ if (grpc_connectivity_state_trace) {
+ gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
+ grpc_connectivity_state_name(cur));
+ }
+ return cur;
+}
+
+grpc_connectivity_state grpc_connectivity_state_get(
grpc_connectivity_state_tracker *tracker, grpc_error **error) {
+ grpc_connectivity_state cur =
+ (grpc_connectivity_state)gpr_atm_no_barrier_load(
+ &tracker->current_state_atm);
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
- grpc_connectivity_state_name(tracker->current_state));
+ grpc_connectivity_state_name(cur));
}
if (error != NULL) {
*error = GRPC_ERROR_REF(tracker->current_error);
}
- return tracker->current_state;
+ return cur;
}
bool grpc_connectivity_state_has_watchers(
@@ -108,6 +123,9 @@ bool grpc_connectivity_state_has_watchers(
bool grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify) {
+ grpc_connectivity_state cur =
+ (grpc_connectivity_state)gpr_atm_no_barrier_load(
+ &tracker->current_state_atm);
if (grpc_connectivity_state_trace) {
if (current == NULL) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
@@ -115,7 +133,7 @@ bool grpc_connectivity_state_notify_on_state_change(
} else {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
tracker->name, grpc_connectivity_state_name(*current),
- grpc_connectivity_state_name(tracker->current_state), notify);
+ grpc_connectivity_state_name(cur), notify);
}
}
if (current == NULL) {
@@ -138,8 +156,8 @@ bool grpc_connectivity_state_notify_on_state_change(
}
return false;
} else {
- if (tracker->current_state != *current) {
- *current = tracker->current_state;
+ if (cur != *current) {
+ *current = cur;
grpc_closure_sched(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
@@ -149,7 +167,7 @@ bool grpc_connectivity_state_notify_on_state_change(
w->next = tracker->watchers;
tracker->watchers = w;
}
- return tracker->current_state == GRPC_CHANNEL_IDLE;
+ return cur == GRPC_CHANNEL_IDLE;
}
}
@@ -157,11 +175,14 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state state,
grpc_error *error, const char *reason) {
+ grpc_connectivity_state cur =
+ (grpc_connectivity_state)gpr_atm_no_barrier_load(
+ &tracker->current_state_atm);
grpc_connectivity_state_watcher *w;
if (grpc_connectivity_state_trace) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
- tracker->name, grpc_connectivity_state_name(tracker->current_state),
+ tracker->name, grpc_connectivity_state_name(cur),
grpc_connectivity_state_name(state), reason, error, error_string);
}
switch (state) {
@@ -178,13 +199,13 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
}
GRPC_ERROR_UNREF(tracker->current_error);
tracker->current_error = error;
- if (tracker->current_state == state) {
+ if (cur == state) {
return;
}
- GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_SHUTDOWN);
- tracker->current_state = state;
+ GPR_ASSERT(cur != GRPC_CHANNEL_SHUTDOWN);
+ gpr_atm_no_barrier_store(&tracker->current_state_atm, state);
while ((w = tracker->watchers) != NULL) {
- *w->current = tracker->current_state;
+ *w->current = state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h
index 769c675b79..c9604c34dd 100644
--- a/src/core/lib/transport/connectivity_state.h
+++ b/src/core/lib/transport/connectivity_state.h
@@ -47,8 +47,8 @@ typedef struct grpc_connectivity_state_watcher {
} grpc_connectivity_state_watcher;
typedef struct {
- /** current connectivity state */
- grpc_connectivity_state current_state;
+ /** current grpc_connectivity_state */
+ gpr_atm current_state_atm;
/** error associated with state */
grpc_error *current_error;
/** all our watchers */
@@ -59,6 +59,7 @@ typedef struct {
extern int grpc_connectivity_state_trace;
+/** enum --> string conversion */
const char *grpc_connectivity_state_name(grpc_connectivity_state state);
void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
@@ -68,22 +69,31 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_tracker *tracker);
/** Set connectivity state; not thread safe; access must be serialized with an
- * external lock */
+ * external lock */
void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state state,
grpc_error *associated_error,
const char *reason);
+/** Return true if this connectivity state has watchers.
+ Access must be serialized with an external lock. */
bool grpc_connectivity_state_has_watchers(
grpc_connectivity_state_tracker *tracker);
+/** Return the last seen connectivity state. No need to synchronize access. */
grpc_connectivity_state grpc_connectivity_state_check(
- grpc_connectivity_state_tracker *tracker, grpc_error **current_error);
+ grpc_connectivity_state_tracker *tracker);
+
+/** Return the last seen connectivity state, and the associated error.
+ Access must be serialized with an external lock. */
+grpc_connectivity_state grpc_connectivity_state_get(
+ grpc_connectivity_state_tracker *tracker, grpc_error **error);
/** Return 1 if the channel should start connecting, 0 otherwise.
If current==NULL cancel notify if it is already queued (success==0 in that
- case) */
+ case).
+ Access must be serialized with an external lock. */
bool grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify);
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 750711befd..c13ba230b3 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -55,64 +55,63 @@ static uint8_t g_bytes[] = {
112, 101, 103, 114, 112, 99, 45, 105, 110, 116, 101, 114, 110, 97, 108,
45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101,
115, 116, 117, 115, 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115,
- 116, 108, 98, 45, 116, 111, 107, 101, 110, 108, 98, 45, 99, 111, 115,
- 116, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
- 117, 116, 103, 114, 112, 99, 45, 116, 114, 97, 99, 105, 110, 103, 45,
- 98, 105, 110, 103, 114, 112, 99, 45, 115, 116, 97, 116, 115, 45, 98,
- 105, 110, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
- 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
- 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
- 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
- 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
- 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
- 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
- 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
- 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
- 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
- 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
- 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
- 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
- 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
- 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
- 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
- 104, 97, 114, 115, 101, 116, 97, 99, 99, 101, 112, 116, 45, 101, 110,
- 99, 111, 100, 105, 110, 103, 103, 122, 105, 112, 44, 32, 100, 101, 102,
- 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
- 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
- 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
- 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
- 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
- 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
- 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
- 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
- 110, 116, 101, 110, 116, 45, 101, 110, 99, 111, 100, 105, 110, 103, 99,
- 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101,
- 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99,
- 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110,
- 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111,
- 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112,
- 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105,
- 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102,
- 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110,
- 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101,
- 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115,
- 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105,
- 101, 100, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105, 111, 110, 109,
- 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112, 114, 111, 120,
- 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 112,
- 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116,
- 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101, 114, 101, 114,
- 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121, 45, 97, 102,
- 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116, 45, 99, 111,
- 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116, 114, 97, 110,
- 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105, 116, 121, 116,
- 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111, 100, 105, 110,
- 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45, 97, 117, 116,
- 104, 101, 110, 116, 105, 99, 97, 116, 101, 105, 100, 101, 110, 116, 105,
- 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116,
- 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101,
- 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100,
- 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
+ 116, 108, 98, 45, 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116,
+ 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 45, 116, 114, 97, 99,
+ 105, 110, 103, 45, 98, 105, 110, 103, 114, 112, 99, 45, 115, 116, 97,
+ 116, 115, 45, 98, 105, 110, 103, 114, 112, 99, 46, 119, 97, 105, 116,
+ 95, 102, 111, 114, 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46,
+ 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120,
+ 95, 114, 101, 113, 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103,
+ 101, 95, 98, 121, 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120,
+ 95, 114, 101, 115, 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97,
+ 103, 101, 95, 98, 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108,
+ 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99,
+ 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48,
+ 49, 50, 105, 100, 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100,
+ 101, 102, 108, 97, 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97,
+ 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99,
+ 80, 79, 83, 84, 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104,
+ 116, 116, 112, 115, 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47,
+ 47, 105, 110, 100, 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50,
+ 48, 54, 51, 48, 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101,
+ 112, 116, 45, 99, 104, 97, 114, 115, 101, 116, 97, 99, 99, 101, 112,
+ 116, 45, 101, 110, 99, 111, 100, 105, 110, 103, 103, 122, 105, 112, 44,
+ 32, 100, 101, 102, 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45,
+ 108, 97, 110, 103, 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45,
+ 114, 97, 110, 103, 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99,
+ 101, 115, 115, 45, 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108,
+ 111, 119, 45, 111, 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108,
+ 111, 119, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110,
+ 99, 97, 99, 104, 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111,
+ 110, 116, 101, 110, 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105,
+ 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110, 99, 111, 100,
+ 105, 110, 103, 99, 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103,
+ 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110,
+ 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97,
+ 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110,
+ 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97,
+ 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102,
+ 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109,
+ 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102,
+ 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114,
+ 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105,
+ 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111,
+ 100, 105, 102, 105, 101, 100, 108, 105, 110, 107, 108, 111, 99, 97, 116,
+ 105, 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115,
+ 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99,
+ 97, 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114,
+ 105, 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102,
+ 101, 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114,
+ 121, 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101,
+ 116, 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45,
+ 116, 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114,
+ 105, 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119,
+ 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 105, 100,
+ 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105,
+ 100, 101, 110, 116, 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102,
+ 108, 97, 116, 101, 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105,
+ 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
static void static_ref(void *unused) {}
static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
@@ -221,7 +220,6 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
@@ -258,188 +256,186 @@ const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 166, 8}},
{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 174, 11}},
+ .data.refcounted = {g_bytes + 174, 12}},
{.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 185, 12}},
+ .data.refcounted = {g_bytes + 186, 16}},
{.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 197, 16}},
+ .data.refcounted = {g_bytes + 202, 14}},
{.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 213, 14}},
+ .data.refcounted = {g_bytes + 216, 0}},
{.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}},
+ .data.refcounted = {g_bytes + 216, 19}},
{.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 227, 19}},
+ .data.refcounted = {g_bytes + 235, 12}},
{.refcount = &grpc_static_metadata_refcounts[22],
- .data.refcounted = {g_bytes + 246, 12}},
+ .data.refcounted = {g_bytes + 247, 30}},
{.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 258, 30}},
+ .data.refcounted = {g_bytes + 277, 31}},
{.refcount = &grpc_static_metadata_refcounts[24],
- .data.refcounted = {g_bytes + 288, 31}},
+ .data.refcounted = {g_bytes + 308, 36}},
{.refcount = &grpc_static_metadata_refcounts[25],
- .data.refcounted = {g_bytes + 319, 36}},
+ .data.refcounted = {g_bytes + 344, 1}},
{.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 355, 1}},
+ .data.refcounted = {g_bytes + 345, 1}},
{.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 356, 1}},
+ .data.refcounted = {g_bytes + 346, 1}},
{.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 357, 1}},
+ .data.refcounted = {g_bytes + 347, 8}},
{.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 358, 8}},
+ .data.refcounted = {g_bytes + 355, 4}},
{.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 366, 4}},
+ .data.refcounted = {g_bytes + 359, 7}},
{.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 370, 7}},
+ .data.refcounted = {g_bytes + 366, 8}},
{.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 377, 8}},
+ .data.refcounted = {g_bytes + 374, 16}},
{.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 385, 16}},
+ .data.refcounted = {g_bytes + 390, 4}},
{.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 401, 4}},
+ .data.refcounted = {g_bytes + 394, 3}},
{.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 405, 3}},
+ .data.refcounted = {g_bytes + 397, 3}},
{.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 408, 3}},
+ .data.refcounted = {g_bytes + 400, 4}},
{.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 411, 4}},
+ .data.refcounted = {g_bytes + 404, 5}},
{.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 415, 5}},
+ .data.refcounted = {g_bytes + 409, 4}},
{.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 420, 4}},
+ .data.refcounted = {g_bytes + 413, 3}},
{.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 424, 3}},
+ .data.refcounted = {g_bytes + 416, 3}},
{.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 427, 3}},
+ .data.refcounted = {g_bytes + 419, 1}},
{.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 430, 1}},
+ .data.refcounted = {g_bytes + 420, 11}},
{.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 431, 11}},
+ .data.refcounted = {g_bytes + 431, 3}},
{.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 442, 3}},
+ .data.refcounted = {g_bytes + 434, 3}},
{.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 445, 3}},
+ .data.refcounted = {g_bytes + 437, 3}},
{.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 448, 3}},
+ .data.refcounted = {g_bytes + 440, 3}},
{.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 451, 3}},
+ .data.refcounted = {g_bytes + 443, 3}},
{.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 454, 3}},
+ .data.refcounted = {g_bytes + 446, 14}},
{.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 457, 14}},
+ .data.refcounted = {g_bytes + 460, 15}},
{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 471, 15}},
+ .data.refcounted = {g_bytes + 475, 13}},
{.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 486, 13}},
+ .data.refcounted = {g_bytes + 488, 15}},
{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 499, 15}},
+ .data.refcounted = {g_bytes + 503, 13}},
{.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 514, 13}},
+ .data.refcounted = {g_bytes + 516, 6}},
{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 527, 6}},
+ .data.refcounted = {g_bytes + 522, 27}},
{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 533, 27}},
+ .data.refcounted = {g_bytes + 549, 3}},
{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 560, 3}},
+ .data.refcounted = {g_bytes + 552, 5}},
{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 563, 5}},
+ .data.refcounted = {g_bytes + 557, 13}},
{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 568, 13}},
+ .data.refcounted = {g_bytes + 570, 13}},
{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 581, 13}},
+ .data.refcounted = {g_bytes + 583, 19}},
{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 594, 19}},
+ .data.refcounted = {g_bytes + 602, 16}},
{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 613, 16}},
+ .data.refcounted = {g_bytes + 618, 16}},
{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 629, 16}},
+ .data.refcounted = {g_bytes + 634, 14}},
{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 645, 14}},
+ .data.refcounted = {g_bytes + 648, 16}},
{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 659, 16}},
+ .data.refcounted = {g_bytes + 664, 13}},
{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 675, 13}},
+ .data.refcounted = {g_bytes + 677, 6}},
{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 688, 6}},
+ .data.refcounted = {g_bytes + 683, 4}},
{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 694, 4}},
+ .data.refcounted = {g_bytes + 687, 4}},
{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 698, 4}},
+ .data.refcounted = {g_bytes + 691, 6}},
{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 702, 6}},
+ .data.refcounted = {g_bytes + 697, 7}},
{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 708, 7}},
+ .data.refcounted = {g_bytes + 704, 4}},
{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 715, 4}},
+ .data.refcounted = {g_bytes + 708, 8}},
{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 719, 8}},
+ .data.refcounted = {g_bytes + 716, 17}},
{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 727, 17}},
+ .data.refcounted = {g_bytes + 733, 13}},
{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 744, 13}},
+ .data.refcounted = {g_bytes + 746, 8}},
{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 757, 8}},
+ .data.refcounted = {g_bytes + 754, 19}},
{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 765, 19}},
+ .data.refcounted = {g_bytes + 773, 13}},
{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 784, 13}},
+ .data.refcounted = {g_bytes + 786, 4}},
{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 797, 4}},
+ .data.refcounted = {g_bytes + 790, 8}},
{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 801, 8}},
+ .data.refcounted = {g_bytes + 798, 12}},
{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 809, 12}},
+ .data.refcounted = {g_bytes + 810, 18}},
{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 821, 18}},
+ .data.refcounted = {g_bytes + 828, 19}},
{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 19}},
+ .data.refcounted = {g_bytes + 847, 5}},
{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 858, 5}},
+ .data.refcounted = {g_bytes + 852, 7}},
{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 863, 7}},
+ .data.refcounted = {g_bytes + 859, 7}},
{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 870, 7}},
+ .data.refcounted = {g_bytes + 866, 11}},
{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 877, 11}},
+ .data.refcounted = {g_bytes + 877, 6}},
{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 888, 6}},
+ .data.refcounted = {g_bytes + 883, 10}},
{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 894, 10}},
+ .data.refcounted = {g_bytes + 893, 25}},
{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 904, 25}},
+ .data.refcounted = {g_bytes + 918, 17}},
{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 929, 17}},
+ .data.refcounted = {g_bytes + 935, 4}},
{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 946, 4}},
+ .data.refcounted = {g_bytes + 939, 3}},
{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 950, 3}},
+ .data.refcounted = {g_bytes + 942, 16}},
{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 953, 16}},
+ .data.refcounted = {g_bytes + 958, 16}},
{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 969, 16}},
+ .data.refcounted = {g_bytes + 974, 13}},
{.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 985, 13}},
+ .data.refcounted = {g_bytes + 987, 12}},
{.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 998, 12}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1010, 21}},
+ .data.refcounted = {g_bytes + 999, 21}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8};
static const int8_t elems_r[] = {
- 10, 8, -3, 0, 9, 21, -76, 22, 0, 10, -7, 20, 0, 19, 18, 17,
- 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, -49, -50, 16, -52, -53, -54, -54, -55, -56, -57, 0, 38, 37, 36, 35,
- 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
- 18, 17, 16, 15, 14, 13, 12, 15, 14, 13, 12, 11, 10, 9, 8, 0};
+ 10, 8, -3, 0, 9, 21, -75, 22, 0, 10, -7, 20, 0, 19, 18, 17,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -48, -49, 16, -51, -52, -53, -54, -54, -55, -56, -57, 0, 37, 36, 35, 34,
+ 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18,
+ 17, 16, 15, 14, 13, 12, 11, 14, 13, 12, 11, 10, 9, 8, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 42;
- uint32_t x = i % 96;
- uint32_t y = i / 96;
+ i -= 41;
+ uint32_t x = i % 95;
+ uint32_t y = i / 95;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
@@ -449,30 +445,29 @@ static uint32_t elems_phash(uint32_t i) {
}
static const uint16_t elem_keys[] = {
- 1009, 1010, 1011, 240, 241, 242, 243, 244, 138, 139, 42, 43,
- 429, 430, 431, 911, 912, 913, 712, 713, 1098, 522, 714, 1294,
- 1392, 1490, 1588, 4822, 4920, 4951, 5116, 5214, 5312, 1111, 5410, 5508,
- 5606, 5704, 5802, 5900, 5998, 6096, 6194, 6292, 6390, 6488, 6586, 6684,
- 6782, 6880, 6978, 7076, 7174, 7272, 7370, 7468, 7566, 7664, 7762, 7860,
- 7958, 8056, 8154, 8252, 8350, 1074, 1075, 1076, 1077, 8448, 8546, 8644,
- 8742, 8840, 8938, 9036, 9134, 314, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 132, 231, 232, 0, 0, 0, 0, 0,
+ 998, 999, 1000, 237, 238, 239, 240, 241, 136, 137, 41, 42,
+ 424, 425, 426, 901, 902, 903, 704, 705, 1086, 516, 706, 1280,
+ 1377, 1474, 4675, 4772, 4803, 4966, 5063, 5160, 5257, 1099, 5354, 5451,
+ 5548, 5645, 5742, 5839, 5936, 6033, 6130, 6227, 6324, 6421, 6518, 6615,
+ 6712, 6809, 6906, 7003, 7100, 7197, 7294, 7391, 7488, 7585, 7682, 7779,
+ 7876, 7973, 8070, 8167, 8264, 1063, 1064, 1065, 1066, 8361, 8458, 8555,
+ 8652, 8749, 8846, 8943, 310, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 130, 228, 229, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 74, 77, 75, 19, 20, 21, 22, 23, 15, 16, 17, 18, 11, 12, 13,
- 3, 4, 5, 0, 1, 41, 6, 2, 70, 48, 55, 56, 24, 25, 26,
- 27, 28, 29, 7, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
- 42, 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 57, 58, 59,
- 60, 61, 62, 63, 64, 76, 78, 79, 80, 65, 66, 67, 68, 69, 71,
- 72, 73, 14, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 8, 9, 10};
+ 73, 76, 74, 19, 20, 21, 22, 23, 15, 16, 17, 18, 11, 12, 13,
+ 3, 4, 5, 0, 1, 41, 6, 2, 69, 48, 55, 24, 25, 26, 27,
+ 28, 29, 30, 7, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42,
+ 43, 44, 45, 46, 47, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 75, 77, 78, 79, 65, 66, 67, 68, 70, 71,
+ 72, 14, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 8, 9, 10};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 98 + b);
+ uint32_t k = (uint32_t)(a * 97 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k
? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]],
@@ -483,328 +478,324 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 355, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[25],
+ .data.refcounted = {g_bytes + 344, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 356, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[26],
+ .data.refcounted = {g_bytes + 345, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[7],
.data.refcounted = {g_bytes + 50, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[27],
+ .data.refcounted = {g_bytes + 346, 1}}},
+ {{.refcount = &grpc_static_metadata_refcounts[9],
+ .data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 357, 1}}},
+ .data.refcounted = {g_bytes + 347, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 358, 8}}},
+ .data.refcounted = {g_bytes + 355, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[9],
.data.refcounted = {g_bytes + 77, 13}},
{.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 366, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 370, 7}}},
+ .data.refcounted = {g_bytes + 359, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[5],
.data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 377, 8}}},
+ {.refcount = &grpc_static_metadata_refcounts[31],
+ .data.refcounted = {g_bytes + 366, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[11],
.data.refcounted = {g_bytes + 110, 12}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 385, 16}}},
+ {.refcount = &grpc_static_metadata_refcounts[32],
+ .data.refcounted = {g_bytes + 374, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 401, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[33],
+ .data.refcounted = {g_bytes + 390, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 405, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[34],
+ .data.refcounted = {g_bytes + 394, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[35],
+ .data.refcounted = {g_bytes + 397, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[4],
+ .data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 408, 3}}},
+ .data.refcounted = {g_bytes + 400, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 411, 4}}},
+ .data.refcounted = {g_bytes + 404, 5}}},
{{.refcount = &grpc_static_metadata_refcounts[4],
.data.refcounted = {g_bytes + 29, 7}},
{.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 415, 5}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 420, 4}}},
+ .data.refcounted = {g_bytes + 409, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[3],
.data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 424, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[39],
+ .data.refcounted = {g_bytes + 413, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[1],
.data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 427, 3}}},
+ {.refcount = &grpc_static_metadata_refcounts[40],
+ .data.refcounted = {g_bytes + 416, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 430, 1}}},
+ {.refcount = &grpc_static_metadata_refcounts[41],
+ .data.refcounted = {g_bytes + 419, 1}}},
{{.refcount = &grpc_static_metadata_refcounts[0],
.data.refcounted = {g_bytes + 0, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[42],
+ .data.refcounted = {g_bytes + 420, 11}}},
+ {{.refcount = &grpc_static_metadata_refcounts[2],
+ .data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 431, 11}}},
+ .data.refcounted = {g_bytes + 431, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 442, 3}}},
+ .data.refcounted = {g_bytes + 434, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 445, 3}}},
+ .data.refcounted = {g_bytes + 437, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 448, 3}}},
+ .data.refcounted = {g_bytes + 440, 3}}},
{{.refcount = &grpc_static_metadata_refcounts[2],
.data.refcounted = {g_bytes + 12, 7}},
{.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 451, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 454, 3}}},
+ .data.refcounted = {g_bytes + 443, 3}}},
+ {{.refcount = &grpc_static_metadata_refcounts[48],
+ .data.refcounted = {g_bytes + 446, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 457, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 471, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 471, 15}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 486, 13}}},
+ .data.refcounted = {g_bytes + 460, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[49],
+ .data.refcounted = {g_bytes + 460, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[50],
+ .data.refcounted = {g_bytes + 475, 13}}},
+ {{.refcount = &grpc_static_metadata_refcounts[51],
+ .data.refcounted = {g_bytes + 488, 15}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 499, 15}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 503, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 514, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 516, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 527, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 522, 27}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 533, 27}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 549, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 560, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 552, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 563, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 557, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 568, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 570, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 581, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 583, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 594, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 602, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 613, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 618, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 629, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 634, 14}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 645, 14}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 648, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 659, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 675, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 664, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[11],
.data.refcounted = {g_bytes + 110, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[65],
+ .data.refcounted = {g_bytes + 677, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 688, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 683, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 694, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 687, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 698, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 691, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 702, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 697, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 708, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 715, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 704, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[14],
.data.refcounted = {g_bytes + 162, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[71],
+ .data.refcounted = {g_bytes + 708, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 719, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 716, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 727, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 733, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 744, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 746, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 757, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 754, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 765, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 784, 13}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 773, 13}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[15],
.data.refcounted = {g_bytes + 166, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 174, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[77],
+ .data.refcounted = {g_bytes + 786, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 797, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 790, 8}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 801, 8}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 798, 12}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 809, 12}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 810, 18}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 821, 18}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 828, 19}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 839, 19}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 847, 5}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 858, 5}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 852, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 863, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 859, 7}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 870, 7}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 866, 11}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 877, 11}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 877, 6}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 888, 6}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 883, 10}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 894, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 893, 25}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 904, 25}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 929, 17}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 918, 17}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[13],
.data.refcounted = {g_bytes + 152, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
+ {{.refcount = &grpc_static_metadata_refcounts[90],
+ .data.refcounted = {g_bytes + 935, 4}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 946, 4}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 939, 3}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 950, 3}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 953, 16}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 227, 0}}},
+ .data.refcounted = {g_bytes + 942, 16}},
+ {.refcount = &grpc_static_metadata_refcounts[19],
+ .data.refcounted = {g_bytes + 216, 0}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 358, 8}}},
+ {.refcount = &grpc_static_metadata_refcounts[28],
+ .data.refcounted = {g_bytes + 347, 8}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 370, 7}}},
+ {.refcount = &grpc_static_metadata_refcounts[30],
+ .data.refcounted = {g_bytes + 359, 7}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 969, 16}}},
+ {.refcount = &grpc_static_metadata_refcounts[93],
+ .data.refcounted = {g_bytes + 958, 16}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 366, 4}}},
+ {.refcount = &grpc_static_metadata_refcounts[29],
+ .data.refcounted = {g_bytes + 355, 4}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 985, 13}}},
+ {.refcount = &grpc_static_metadata_refcounts[94],
+ .data.refcounted = {g_bytes + 974, 13}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 998, 12}}},
+ {.refcount = &grpc_static_metadata_refcounts[95],
+ .data.refcounted = {g_bytes + 987, 12}}},
{{.refcount = &grpc_static_metadata_refcounts[10],
.data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1010, 21}}},
+ {.refcount = &grpc_static_metadata_refcounts[96],
+ .data.refcounted = {g_bytes + 999, 21}}},
};
-const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 74, 75, 76,
- 77, 78, 79, 80};
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 73, 74, 75,
+ 76, 77, 78, 79};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 7649ccd5d2..f9600ee2e4 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -44,7 +44,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 98
+#define GRPC_STATIC_MDSTR_COUNT 97
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -78,174 +78,172 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_HOST (grpc_static_slice_table[14])
/* "lb-token" */
#define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[15])
-/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[16])
/* "grpc-timeout" */
-#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[17])
+#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[16])
/* "grpc-tracing-bin" */
-#define GRPC_MDSTR_GRPC_TRACING_BIN (grpc_static_slice_table[18])
+#define GRPC_MDSTR_GRPC_TRACING_BIN (grpc_static_slice_table[17])
/* "grpc-stats-bin" */
-#define GRPC_MDSTR_GRPC_STATS_BIN (grpc_static_slice_table[19])
+#define GRPC_MDSTR_GRPC_STATS_BIN (grpc_static_slice_table[18])
/* "" */
-#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[20])
+#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[19])
/* "grpc.wait_for_ready" */
-#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[21])
+#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[20])
/* "grpc.timeout" */
-#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[22])
+#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[21])
/* "grpc.max_request_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \
- (grpc_static_slice_table[23])
+ (grpc_static_slice_table[22])
/* "grpc.max_response_message_bytes" */
#define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \
- (grpc_static_slice_table[24])
+ (grpc_static_slice_table[23])
/* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \
- (grpc_static_slice_table[25])
+ (grpc_static_slice_table[24])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[26])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[25])
/* "1" */
-#define GRPC_MDSTR_1 (grpc_static_slice_table[27])
+#define GRPC_MDSTR_1 (grpc_static_slice_table[26])
/* "2" */
-#define GRPC_MDSTR_2 (grpc_static_slice_table[28])
+#define GRPC_MDSTR_2 (grpc_static_slice_table[27])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[29])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[28])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[30])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[29])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[31])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[30])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[32])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[31])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[33])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[32])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[34])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[33])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[35])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[34])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[36])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[35])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[37])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[36])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[38])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[37])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[39])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[38])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[39])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[41])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[40])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[41])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[42])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[44])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[43])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[45])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[44])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[46])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[45])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[47])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[46])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[47])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[49])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[48])
/* "accept-encoding" */
-#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[50])
+#define GRPC_MDSTR_ACCEPT_ENCODING (grpc_static_slice_table[49])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[51])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[50])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[52])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[51])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[53])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[52])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[54])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[53])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[54])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[56])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[55])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[56])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[58])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[57])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[59])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[58])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[60])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[59])
/* "content-encoding" */
-#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[61])
+#define GRPC_MDSTR_CONTENT_ENCODING (grpc_static_slice_table[60])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[61])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[62])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[63])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[64])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[66])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[65])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[66])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[68])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[67])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[69])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[68])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[70])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[69])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[71])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[70])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[72])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[71])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[73])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[72])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[73])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[74])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[75])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[77])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[76])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[78])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[77])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[78])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[80])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[79])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[81])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[80])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[82])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[81])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[83])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[82])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[84])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[83])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[85])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[84])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[86])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[85])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[86])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[88])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[87])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[89])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[88])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[90])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[89])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[91])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[90])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[92])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[91])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[93])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[92])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[94])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[93])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[95])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[94])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[96])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[95])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[97])
+ (grpc_static_slice_table[96])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
@@ -257,7 +255,7 @@ extern grpc_slice_refcount
#define GRPC_STATIC_METADATA_INDEX(static_slice) \
((int)((static_slice).refcount - grpc_static_metadata_refcounts))
-#define GRPC_STATIC_MDELEM_COUNT 81
+#define GRPC_STATIC_MDELEM_COUNT 80
extern grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "grpc-status": "0" */
@@ -428,81 +426,78 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "lb-token": "" */
#define GRPC_MDELEM_LB_TOKEN_EMPTY \
(GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[55], GRPC_MDELEM_STORAGE_STATIC))
-/* "lb-cost-bin": "" */
-#define GRPC_MDELEM_LB_COST_BIN_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
/* "link": "" */
#define GRPC_MDELEM_LINK_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[56], GRPC_MDELEM_STORAGE_STATIC))
/* "location": "" */
#define GRPC_MDELEM_LOCATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[57], GRPC_MDELEM_STORAGE_STATIC))
/* "max-forwards": "" */
#define GRPC_MDELEM_MAX_FORWARDS_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[58], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authenticate": "" */
#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[59], GRPC_MDELEM_STORAGE_STATIC))
/* "proxy-authorization": "" */
#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[60], GRPC_MDELEM_STORAGE_STATIC))
/* "range": "" */
#define GRPC_MDELEM_RANGE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[61], GRPC_MDELEM_STORAGE_STATIC))
/* "referer": "" */
#define GRPC_MDELEM_REFERER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[62], GRPC_MDELEM_STORAGE_STATIC))
/* "refresh": "" */
#define GRPC_MDELEM_REFRESH_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[63], GRPC_MDELEM_STORAGE_STATIC))
/* "retry-after": "" */
#define GRPC_MDELEM_RETRY_AFTER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[64], GRPC_MDELEM_STORAGE_STATIC))
/* "server": "" */
#define GRPC_MDELEM_SERVER_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[65], GRPC_MDELEM_STORAGE_STATIC))
/* "set-cookie": "" */
#define GRPC_MDELEM_SET_COOKIE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[66], GRPC_MDELEM_STORAGE_STATIC))
/* "strict-transport-security": "" */
#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[67], GRPC_MDELEM_STORAGE_STATIC))
/* "transfer-encoding": "" */
#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[68], GRPC_MDELEM_STORAGE_STATIC))
/* "user-agent": "" */
#define GRPC_MDELEM_USER_AGENT_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[69], GRPC_MDELEM_STORAGE_STATIC))
/* "vary": "" */
#define GRPC_MDELEM_VARY_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[70], GRPC_MDELEM_STORAGE_STATIC))
/* "via": "" */
#define GRPC_MDELEM_VIA_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[71], GRPC_MDELEM_STORAGE_STATIC))
/* "www-authenticate": "" */
#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[72], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[73], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[74], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[75], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[76], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[77], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[78], GRPC_MDELEM_STORAGE_STATIC))
/* "grpc-accept-encoding": "identity,deflate,gzip" */
#define GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[80], GRPC_MDELEM_STORAGE_STATIC))
+ (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[79], GRPC_MDELEM_STORAGE_STATIC))
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);
typedef enum {
@@ -522,7 +517,6 @@ typedef enum {
GRPC_BATCH_USER_AGENT,
GRPC_BATCH_HOST,
GRPC_BATCH_LB_TOKEN,
- GRPC_BATCH_LB_COST_BIN,
GRPC_BATCH_CALLOUTS_COUNT
} grpc_metadata_batch_callouts_index;
@@ -545,7 +539,6 @@ typedef union {
struct grpc_linked_mdelem *user_agent;
struct grpc_linked_mdelem *host;
struct grpc_linked_mdelem *lb_token;
- struct grpc_linked_mdelem *lb_cost_bin;
} named;
} grpc_metadata_batch_callouts;
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 9a0abe1ca4..e56bf2780a 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -167,9 +167,9 @@ typedef struct grpc_transport_stream_op {
/***************************************************************************
* remaining fields are initialized and used at the discretion of the
- * transport implementation */
+ * current handler of the op */
- grpc_transport_private_op_data transport_private;
+ grpc_transport_private_op_data handler_private;
} grpc_transport_stream_op;
/** Transport op: a set of operations to perform on a transport as a whole */
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
index d339ed327f..c97f47b397 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -37,10 +37,14 @@ extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
extern void grpc_client_channel_init(void);
extern void grpc_client_channel_shutdown(void);
+extern void grpc_load_reporting_plugin_init(void);
+extern void grpc_load_reporting_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
+ grpc_register_plugin(grpc_load_reporting_plugin_init,
+ grpc_load_reporting_plugin_shutdown);
}