aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/client_channel/client_channel.c147
-rw-r--r--src/core/lib/iomgr/udp_server.c16
-rw-r--r--src/core/lib/iomgr/udp_server.h12
3 files changed, 61 insertions, 114 deletions
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index f424f83335..4de510cbf6 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -71,7 +71,8 @@
*/
typedef enum {
- WAIT_FOR_READY_UNSET,
+ /* zero so it can be default initialized */
+ WAIT_FOR_READY_UNSET = 0,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_value;
@@ -633,7 +634,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ /* zero so that it can be default-initialized */
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
@@ -655,7 +657,6 @@ typedef struct client_channel_call_data {
gpr_timespec call_start_time;
gpr_timespec deadline;
method_parameters *method_params;
- grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -728,6 +729,47 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
gpr_free(ops);
}
+// Sets calld->method_params.
+// If the method params specify a timeout, populates
+// *per_method_deadline and returns true.
+static bool set_call_method_params_from_service_config_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ gpr_timespec *per_method_deadline) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ if (chand->method_params_table != NULL) {
+ calld->method_params = grpc_method_config_table_get(
+ exec_ctx, chand->method_params_table, calld->path);
+ if (calld->method_params != NULL) {
+ method_parameters_ref(calld->method_params);
+ if (gpr_time_cmp(calld->method_params->timeout,
+ gpr_time_0(GPR_TIMESPAN)) != 0) {
+ *per_method_deadline =
+ gpr_time_add(calld->call_start_time, calld->method_params->timeout);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ /* apply service-config level configuration to the call (now that we're
+ * certain it exists) */
+ call_data *calld = elem->call_data;
+ gpr_timespec per_method_deadline;
+ if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
+ &per_method_deadline)) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
+ }
+}
+
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
@@ -855,6 +897,7 @@ static bool pick_subchannel_locked(
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
+ apply_final_configuration_locked(exec_ctx, elem);
grpc_lb_policy *lb_policy = chand->lb_policy;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
// If the application explicitly set wait_for_ready, use that.
@@ -1065,114 +1108,18 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
-// Sets calld->method_params.
-// If the method params specify a timeout, populates
-// *per_method_deadline and returns true.
-static bool set_call_method_params_from_service_config_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- gpr_timespec *per_method_deadline) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
- exec_ctx, chand->method_params_table, calld->path);
- if (calld->method_params != NULL) {
- method_parameters_ref(calld->method_params);
- if (gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- *per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- return true;
- }
- }
- }
- return false;
-}
-
-// Gets data from the service config. Invoked when the resolver returns
-// its initial result.
-static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
- // If this is an error, there's no point in looking at the service config.
- if (error == GRPC_ERROR_NONE) {
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
- }
- }
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
-}
-
-static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- // If the resolver has already returned results, then we can access
- // the service config parameters immediately. Otherwise, we need to
- // defer that work until the resolver returns an initial result.
- if (chand->lb_policy != NULL) {
- // We already have a resolver result, so check for service config.
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
- }
- } else {
- // We don't yet have a resolver result, so register a callback to
- // get the service config data once the resolver returns.
- // Take a reference to the call stack to be owned by the callback.
- GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
- grpc_closure_init(&calld->read_service_config, read_service_config_locked,
- elem, grpc_combiner_scheduler(chand->combiner, false));
- grpc_closure_list_append(&chand->waiting_for_config_closures,
- &calld->read_service_config, GRPC_ERROR_NONE);
- }
- // Start the deadline timer with the current deadline value. If we
- // do not yet have service config data, then the timer may be reset
- // later.
- grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "initial_read_service_config");
-}
-
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->method_params = NULL;
- calld->cancel_error = GRPC_ERROR_NONE;
- gpr_atm_rel_store(&calld->subchannel_call, 0);
- calld->connected_subchannel = NULL;
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
- calld->pollent = NULL;
- GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config");
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_init(&calld->read_service_config,
- initial_read_service_config_locked, elem,
- grpc_combiner_scheduler(chand->combiner, false)),
- GRPC_ERROR_NONE);
+ grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index c833d86866..28f2bd9359 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -109,8 +109,8 @@ struct grpc_udp_server {
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
- /* The parent grpc server */
- grpc_server *grpc_server;
+ /* opaque object to pass to callbacks */
+ void *user_data;
};
grpc_udp_server *grpc_udp_server_create(void) {
@@ -178,7 +178,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
/* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. */
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"udp_listener_shutdown");
@@ -204,7 +204,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Server destroyed"));
}
@@ -299,7 +299,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
- sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
@@ -322,7 +322,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that the socket is writeable. */
GPR_ASSERT(sp->write_cb);
- sp->write_cb(exec_ctx, sp->emfd);
+ sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to write. */
grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
@@ -464,13 +464,13 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
- grpc_server *server) {
+ void *user_data) {
size_t i;
gpr_mu_lock(&s->mu);
grpc_udp_listener *sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
- s->grpc_server = server;
+ s->user_data = user_data;
sp = s->head;
while (sp != NULL) {
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index ed63fa7d81..90842a47f0 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -47,23 +47,23 @@ typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
- struct grpc_server *server);
+ void *user_data);
/* Called when the socket is writeable. */
-typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
+ void *user_data);
/* Called when the grpc_fd is about to be orphaned (and the FD closed). */
typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+ grpc_fd *emfd, void *user_data);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);
-/* Start listening to bound ports */
+/* Start listening to bound ports. user_data is passed to callbacks. */
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
grpc_pollset **pollsets, size_t pollset_count,
- struct grpc_server *server);
+ void *user_data);
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);