aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/client_channel/client_channel.c147
-rw-r--r--src/core/ext/client_channel/parse_address.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c2
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c130
-rw-r--r--src/core/lib/iomgr/error.c40
-rw-r--r--src/core/lib/iomgr/error_internal.h15
-rw-r--r--src/core/lib/iomgr/pollset_uv.c22
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c52
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c1
-rw-r--r--src/core/lib/iomgr/udp_server.c16
-rw-r--r--src/core/lib/iomgr/udp_server.h12
12 files changed, 253 insertions, 197 deletions
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index ba842c7916..960d00e815 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -71,7 +71,8 @@
*/
typedef enum {
- WAIT_FOR_READY_UNSET,
+ /* zero so it can be default initialized */
+ WAIT_FOR_READY_UNSET = 0,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_value;
@@ -631,7 +632,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
+ /* zero so that it can be default-initialized */
+ GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
@@ -653,7 +655,6 @@ typedef struct client_channel_call_data {
gpr_timespec call_start_time;
gpr_timespec deadline;
method_parameters *method_params;
- grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -727,6 +728,47 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
gpr_free(ops);
}
+// Sets calld->method_params.
+// If the method params specify a timeout, populates
+// *per_method_deadline and returns true.
+static bool set_call_method_params_from_service_config_locked(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ gpr_timespec *per_method_deadline) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ if (chand->method_params_table != NULL) {
+ calld->method_params = grpc_method_config_table_get(
+ exec_ctx, chand->method_params_table, calld->path);
+ if (calld->method_params != NULL) {
+ method_parameters_ref(calld->method_params);
+ if (gpr_time_cmp(calld->method_params->timeout,
+ gpr_time_0(GPR_TIMESPAN)) != 0) {
+ *per_method_deadline =
+ gpr_time_add(calld->call_start_time, calld->method_params->timeout);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ /* apply service-config level configuration to the call (now that we're
+ * certain it exists) */
+ call_data *calld = elem->call_data;
+ gpr_timespec per_method_deadline;
+ if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
+ &per_method_deadline)) {
+ // If the deadline from the service config is shorter than the one
+ // from the client API, reset the deadline timer.
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
+ }
+}
+
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
@@ -857,6 +899,7 @@ static bool pick_subchannel_locked(
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
+ apply_final_configuration_locked(exec_ctx, elem);
grpc_lb_policy *lb_policy = chand->lb_policy;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
// If the application explicitly set wait_for_ready, use that.
@@ -1071,115 +1114,19 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
-// Sets calld->method_params.
-// If the method params specify a timeout, populates
-// *per_method_deadline and returns true.
-static bool set_call_method_params_from_service_config_locked(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- gpr_timespec *per_method_deadline) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
- exec_ctx, chand->method_params_table, calld->path);
- if (calld->method_params != NULL) {
- method_parameters_ref(calld->method_params);
- if (gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- *per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- return true;
- }
- }
- }
- return false;
-}
-
-// Gets data from the service config. Invoked when the resolver returns
-// its initial result.
-static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
- // If this is an error, there's no point in looking at the service config.
- if (error == GRPC_ERROR_NONE) {
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- // If the deadline from the service config is shorter than the one
- // from the client API, reset the deadline timer.
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
- calld->deadline = per_method_deadline;
- grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
- }
- }
- }
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
-}
-
-static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error_ignored) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- // If the resolver has already returned results, then we can access
- // the service config parameters immediately. Otherwise, we need to
- // defer that work until the resolver returns an initial result.
- if (chand->lb_policy != NULL) {
- // We already have a resolver result, so check for service config.
- gpr_timespec per_method_deadline;
- if (set_call_method_params_from_service_config_locked(
- exec_ctx, elem, &per_method_deadline)) {
- calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
- }
- } else {
- // We don't yet have a resolver result, so register a callback to
- // get the service config data once the resolver returns.
- // Take a reference to the call stack to be owned by the callback.
- GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
- grpc_closure_init(&calld->read_service_config, read_service_config_locked,
- elem, grpc_combiner_scheduler(chand->combiner, false));
- grpc_closure_list_append(&chand->waiting_for_config_closures,
- &calld->read_service_config, GRPC_ERROR_NONE);
- }
- // Start the deadline timer with the current deadline value. If we
- // do not yet have service config data, then the timer may be reset
- // later.
- grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "initial_read_service_config");
-}
-
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->method_params = NULL;
- calld->cancel_error = GRPC_ERROR_NONE;
- gpr_atm_rel_store(&calld->subchannel_call, 0);
- calld->connected_subchannel = NULL;
- calld->waiting_ops = NULL;
- calld->waiting_ops_count = 0;
- calld->waiting_ops_capacity = 0;
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
- calld->pollent = NULL;
calld->arena = args->arena;
- GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config");
- grpc_closure_sched(
- exec_ctx,
- grpc_closure_init(&calld->read_service_config,
- initial_read_service_config_locked, elem,
- grpc_combiner_scheduler(chand->combiner, false)),
- GRPC_ERROR_NONE);
+ grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/parse_address.c b/src/core/ext/client_channel/parse_address.c
index 8ae15fc72b..cd1b2cd80c 100644
--- a/src/core/ext/client_channel/parse_address.c
+++ b/src/core/ext/client_channel/parse_address.c
@@ -128,6 +128,7 @@ int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
size_t host_without_scope_len = (size_t)(host_end - host);
+ uint32_t sin6_scope_id = 0;
strncpy(host_without_scope, host, host_without_scope_len);
host_without_scope[host_without_scope_len] = '\0';
if (inet_pton(AF_INET6, host_without_scope, &in6->sin6_addr) == 0) {
@@ -136,10 +137,12 @@ int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
}
if (gpr_parse_bytes_to_uint32(host_end + 1,
strlen(host) - host_without_scope_len - 1,
- &in6->sin6_scope_id) == 0) {
+ &sin6_scope_id) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 scope id: '%s'", host_end + 1);
goto done;
}
+ // Handle "sin6_scope_id" being type "u_long". See grpc issue ##10027.
+ in6->sin6_scope_id = sin6_scope_id;
} else {
if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 114b14e884..082078c72f 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -511,6 +511,10 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_error *error) {
if (!t->closed) {
+ if (!grpc_error_has_clear_grpc_status(error)) {
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE);
+ }
if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
if (t->close_transport_on_writes_finished == NULL) {
t->close_transport_on_writes_finished =
@@ -520,10 +524,6 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_error_add_child(t->close_transport_on_writes_finished, error);
return;
}
- if (!grpc_error_has_clear_grpc_status(error)) {
- error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE);
- }
t->closed = 1;
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index f487533c41..9b4b1a7b84 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -91,7 +91,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_ping_parser *p = parser;
while (p->byte != 8 && cur != end) {
- p->opaque_8bytes |= (((uint64_t)*cur) << (8 * p->byte));
+ p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
cur++;
p->byte++;
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index 01a03533da..fabfaf8a27 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -54,6 +54,7 @@
#include "third_party/objective_c/Cronet/bidirectional_stream_c.h"
#define GRPC_HEADER_SIZE_IN_BYTES 5
+#define GRPC_FLUSH_READ_SIZE 4096
#define CRONET_LOG(...) \
do { \
@@ -151,11 +152,17 @@ struct write_state {
struct op_state {
bool state_op_done[OP_NUM_OPS];
bool state_callback_received[OP_NUM_OPS];
+ /* A non-zero gRPC status code has been seen */
bool fail_state;
+ /* Transport is discarding all buffered messages */
bool flush_read;
bool flush_cronet_when_ready;
bool pending_write_for_trailer;
- bool unprocessed_send_message;
+ bool pending_send_message;
+ /* User requested RECV_TRAILING_METADATA */
+ bool pending_recv_trailing_metadata;
+ /* Cronet has not issued a callback of a bidirectional read */
+ bool pending_read_from_cronet;
grpc_error *cancel_error;
/* data structure for storing data coming from server */
struct read_state rs;
@@ -248,11 +255,35 @@ static const char *op_id_string(enum e_op_id i) {
return "UNKNOWN";
}
-static void free_read_buffer(stream_obj *s) {
+static void null_and_maybe_free_read_buffer(stream_obj *s) {
if (s->state.rs.read_buffer &&
s->state.rs.read_buffer != s->state.rs.grpc_header_bytes) {
gpr_free(s->state.rs.read_buffer);
- s->state.rs.read_buffer = NULL;
+ }
+ s->state.rs.read_buffer = NULL;
+}
+
+static void maybe_flush_read(stream_obj *s) {
+ /* To enter flush read state (discarding all the buffered messages in
+ * transport layer), two conditions must be satisfied: 1) non-zero grpc status
+ * has been received, and 2) an op requesting the status code
+ * (RECV_TRAILING_METADATA) is issued by the user. (See
+ * doc/status_ordering.md) */
+ /* Whenever the evaluation of any of the two condition is changed, we check
+ * whether we should enter the flush read state. */
+ if (s->state.pending_recv_trailing_metadata && s->state.fail_state) {
+ if (!s->state.flush_read) {
+ CRONET_LOG(GPR_DEBUG, "%p: Flush read", s);
+ s->state.flush_read = true;
+ null_and_maybe_free_read_buffer(s);
+ s->state.rs.read_buffer = gpr_malloc(GRPC_FLUSH_READ_SIZE);
+ if (!s->state.pending_read_from_cronet) {
+ CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
+ bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
+ GRPC_FLUSH_READ_SIZE);
+ s->state.pending_read_from_cronet = true;
+ }
+ }
}
}
@@ -279,7 +310,11 @@ static void add_to_storage(struct stream_obj *s, grpc_transport_stream_op *op) {
storage->head = new_op;
storage->num_pending_ops++;
if (op->send_message) {
- s->state.unprocessed_send_message = true;
+ s->state.pending_send_message = true;
+ }
+ if (op->recv_trailing_metadata) {
+ s->state.pending_recv_trailing_metadata = true;
+ maybe_flush_read(s);
}
CRONET_LOG(GPR_DEBUG, "adding new op %p. %d in the queue.", new_op,
storage->num_pending_ops);
@@ -367,7 +402,7 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
gpr_free(s->state.ws.write_buffer);
s->state.ws.write_buffer = NULL;
}
- free_read_buffer(s);
+ null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
@@ -390,7 +425,7 @@ static void on_canceled(bidirectional_stream *stream) {
gpr_free(s->state.ws.write_buffer);
s->state.ws.write_buffer = NULL;
}
- free_read_buffer(s);
+ null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
@@ -405,7 +440,7 @@ static void on_succeeded(bidirectional_stream *stream) {
bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_SUCCEEDED] = true;
s->cbs = NULL;
- free_read_buffer(s);
+ null_and_maybe_free_read_buffer(s);
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
@@ -473,6 +508,7 @@ static void on_response_headers_received(
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
s->state.rs.remaining_bytes);
+ s->state.pending_read_from_cronet = true;
}
gpr_mu_unlock(&s->mu);
grpc_exec_ctx_finish(&exec_ctx);
@@ -504,10 +540,13 @@ static void on_read_completed(bidirectional_stream *stream, char *data,
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
count);
gpr_mu_lock(&s->mu);
+ s->state.pending_read_from_cronet = false;
s->state.state_callback_received[OP_RECV_MESSAGE] = true;
if (count > 0 && s->state.flush_read) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
- bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096);
+ bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
+ GRPC_FLUSH_READ_SIZE);
+ s->state.pending_read_from_cronet = true;
gpr_mu_unlock(&s->mu);
} else if (count > 0) {
s->state.rs.received_bytes += count;
@@ -518,16 +557,14 @@ static void on_read_completed(bidirectional_stream *stream, char *data,
bidirectional_stream_read(
s->cbs, s->state.rs.read_buffer + s->state.rs.received_bytes,
s->state.rs.remaining_bytes);
+ s->state.pending_read_from_cronet = true;
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
}
} else {
- if (s->state.flush_read) {
- gpr_free(s->state.rs.read_buffer);
- s->state.rs.read_buffer = NULL;
- }
+ null_and_maybe_free_read_buffer(s);
s->state.rs.read_stream_closed = true;
gpr_mu_unlock(&s->mu);
execute_from_storage(s);
@@ -564,6 +601,7 @@ static void on_response_trailers_received(
if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
0 != strcmp(trailers->headers[i].value, "0")) {
s->state.fail_state = true;
+ maybe_flush_read(s);
}
}
s->state.state_callback_received[OP_RECV_TRAILING_METADATA] = true;
@@ -778,7 +816,7 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
else if (!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA])
result = false;
/* we haven't sent message yet */
- else if (stream_state->unprocessed_send_message &&
+ else if (stream_state->pending_send_message &&
!stream_state->state_op_done[OP_SEND_MESSAGE])
result = false;
/* we haven't got on_write_completed for the send yet */
@@ -900,7 +938,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_op->send_message &&
op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
- stream_state->unprocessed_send_message = false;
+ stream_state->pending_send_message = false;
if (stream_state->state_callback_received[OP_FAILED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed.");
@@ -1009,6 +1047,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->state_op_done[OP_RECV_MESSAGE] = true;
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
+ } else if (stream_state->flush_read) {
+ CRONET_LOG(GPR_DEBUG, "flush read");
+ grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+ GRPC_ERROR_NONE);
+ stream_state->state_op_done[OP_RECV_MESSAGE] = true;
+ oas->state.state_op_done[OP_RECV_MESSAGE] = true;
+ result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.length_field_received == false) {
if (stream_state->rs.received_bytes == GRPC_HEADER_SIZE_IN_BYTES &&
stream_state->rs.remaining_bytes == 0) {
@@ -1029,6 +1074,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
true; /* Indicates that at least one read request has been made */
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
stream_state->rs.remaining_bytes);
+ stream_state->pending_read_from_cronet = true;
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
stream_state->rs.remaining_bytes = 0;
@@ -1047,11 +1093,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
+ stream_state->rs.length_field_received = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
stream_state->rs.remaining_bytes);
+ stream_state->pending_read_from_cronet = true;
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_state->rs.remaining_bytes == 0) {
@@ -1064,6 +1112,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
true; /* Indicates that at least one read request has been made */
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
stream_state->rs.remaining_bytes);
+ stream_state->pending_read_from_cronet = true;
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
result = NO_ACTION_POSSIBLE;
@@ -1075,7 +1124,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
uint8_t *dst_p = GRPC_SLICE_START_PTR(read_data_slice);
memcpy(dst_p, stream_state->rs.read_buffer,
(size_t)stream_state->rs.length_field);
- free_read_buffer(s);
+ null_and_maybe_free_read_buffer(s);
grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
grpc_slice_buffer_add(&stream_state->rs.read_slice_buffer,
read_data_slice);
@@ -1096,6 +1145,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
stream_state->rs.remaining_bytes);
+ stream_state->pending_read_from_cronet = true;
result = ACTION_TAKEN_NO_CALLBACK;
}
} else if (stream_op->recv_trailing_metadata &&
@@ -1153,15 +1203,6 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
make a note */
if (stream_op->recv_message)
stream_state->state_op_done[OP_RECV_MESSAGE_AND_ON_COMPLETE] = true;
- } else if (stream_state->fail_state && !stream_state->flush_read) {
- CRONET_LOG(GPR_DEBUG, "running: %p flush read", oas);
- if (stream_state->rs.read_buffer &&
- stream_state->rs.read_buffer != stream_state->rs.grpc_header_bytes) {
- gpr_free(stream_state->rs.read_buffer);
- stream_state->rs.read_buffer = NULL;
- }
- stream_state->rs.read_buffer = gpr_malloc(4096);
- stream_state->flush_read = true;
} else {
result = NO_ACTION_POSSIBLE;
}
@@ -1190,7 +1231,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->state.fail_state = s->state.flush_read = false;
s->state.cancel_error = NULL;
s->state.flush_cronet_when_ready = s->state.pending_write_for_trailer = false;
- s->state.unprocessed_send_message = false;
+ s->state.pending_send_message = false;
+ s->state.pending_recv_trailing_metadata = false;
+ s->state.pending_read_from_cronet = false;
s->curr_gs = gs;
s->curr_ct = (grpc_cronet_transport *)gt;
@@ -1209,37 +1252,30 @@ static void set_pollset_set_do_nothing(grpc_exec_ctx *exec_ctx,
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_transport_stream_op *op) {
CRONET_LOG(GPR_DEBUG, "perform_stream_op");
- stream_obj *s = (stream_obj *)gs;
- add_to_storage(s, op);
if (op->send_initial_metadata &&
header_has_authority(op->send_initial_metadata->list.head)) {
/* Cronet does not support :authority header field. We cancel the call when
- this field is present in metadata */
- bidirectional_stream_header_array header_array;
- bidirectional_stream_header *header;
- bidirectional_stream cbs;
- CRONET_LOG(GPR_DEBUG,
- ":authority header is provided but not supported;"
- " cancel operations");
- /* Notify application that operation is cancelled by forging trailers */
- header_array.count = 1;
- header_array.capacity = 1;
- header_array.headers = gpr_malloc(sizeof(bidirectional_stream_header));
- header = (bidirectional_stream_header *)header_array.headers;
- header->key = "grpc-status";
- header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */
- cbs.annotation = (void *)s;
- s->state.state_op_done[OP_CANCEL_ERROR] = true;
- on_response_trailers_received(&cbs, &header_array);
- gpr_free(header_array.headers);
- } else {
- execute_from_storage(s);
+ this field is present in metadata */
+ if (op->recv_initial_metadata_ready) {
+ grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
+ GRPC_ERROR_CANCELLED);
+ }
+ if (op->recv_message_ready) {
+ grpc_closure_sched(exec_ctx, op->recv_message_ready,
+ GRPC_ERROR_CANCELLED);
+ }
+ grpc_closure_sched(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
+ return;
}
+ stream_obj *s = (stream_obj *)gs;
+ add_to_storage(s, op);
+ execute_from_storage(s);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, void *and_free_memory) {
stream_obj *s = (stream_obj *)gs;
+ null_and_maybe_free_read_buffer(s);
GRPC_ERROR_UNREF(s->state.cancel_error);
}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 7cdbe30198..1127fff756 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -140,14 +140,16 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line,
const char *func) {
if (grpc_error_is_special(err)) return err;
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- err->refs.count, err->refs.count + 1, file, line, func);
- gpr_ref(&err->refs);
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line,
+ func);
+ gpr_ref(&err->atomics.refs);
return err;
}
#else
grpc_error *grpc_error_ref(grpc_error *err) {
if (grpc_error_is_special(err)) return err;
- gpr_ref(&err->refs);
+ gpr_ref(&err->atomics.refs);
return err;
}
#endif
@@ -182,7 +184,7 @@ static void error_destroy(grpc_error *err) {
GPR_ASSERT(!grpc_error_is_special(err));
unref_errs(err);
unref_strs(err);
- gpr_free((void *)gpr_atm_acq_load(&err->error_string));
+ gpr_free((void *)gpr_atm_acq_load(&err->atomics.error_string));
gpr_free(err);
}
@@ -191,15 +193,17 @@ void grpc_error_unref(grpc_error *err, const char *file, int line,
const char *func) {
if (grpc_error_is_special(err)) return;
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d %s]", err,
- err->refs.count, err->refs.count - 1, file, line, func);
- if (gpr_unref(&err->refs)) {
+ gpr_atm_no_barrier_load(&err->atomics.refs.count),
+ gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line,
+ func);
+ if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
}
}
#else
void grpc_error_unref(grpc_error *err) {
if (grpc_error_is_special(err)) return;
- if (gpr_unref(&err->refs)) {
+ if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
}
}
@@ -328,8 +332,8 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
internal_set_time(&err, GRPC_ERROR_TIME_CREATED, gpr_now(GPR_CLOCK_REALTIME));
- gpr_atm_no_barrier_store(&err->error_string, 0);
- gpr_ref_init(&err->refs, 1);
+ gpr_atm_no_barrier_store(&err->atomics.error_string, 0);
+ gpr_ref_init(&err->atomics.refs, 1);
GPR_TIMER_END("grpc_error_create", 0);
return err;
}
@@ -369,7 +373,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
grpc_slice_from_static_string("cancelled"));
internal_set_int(&out, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED);
}
- } else if (gpr_ref_is_unique(&in->refs)) {
+ } else if (gpr_ref_is_unique(&in->atomics.refs)) {
out = in;
} else {
uint8_t new_arena_capacity = in->arena_capacity;
@@ -382,10 +386,14 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
- memcpy(out, in, sizeof(*in) + in->arena_size * sizeof(intptr_t));
+ // bulk memcpy of the rest of the struct.
+ size_t skip = sizeof(&out->atomics);
+ memcpy((void *)((uintptr_t)out + skip), (void *)((uintptr_t)in + skip),
+ sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip);
+ // manually set the atomics and the new capacity
+ gpr_atm_no_barrier_store(&out->atomics.error_string, 0);
+ gpr_ref_init(&out->atomics.refs, 1);
out->arena_capacity = new_arena_capacity;
- gpr_atm_no_barrier_store(&out->error_string, 0);
- gpr_ref_init(&out->refs, 1);
ref_strs(out);
ref_errs(out);
GRPC_ERROR_UNREF(in);
@@ -692,7 +700,7 @@ const char *grpc_error_string(grpc_error *err) {
if (err == GRPC_ERROR_OOM) return oom_error_string;
if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string;
- void *p = (void *)gpr_atm_acq_load(&err->error_string);
+ void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
return p;
@@ -712,9 +720,9 @@ const char *grpc_error_string(grpc_error *err) {
char *out = finish_kvs(&kvs);
- if (!gpr_atm_rel_cas(&err->error_string, 0, (gpr_atm)out)) {
+ if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) {
gpr_free(out);
- out = (char *)gpr_atm_no_barrier_load(&err->error_string);
+ out = (char *)gpr_atm_no_barrier_load(&err->atomics.error_string);
}
GPR_TIMER_END("grpc_error_string", 0);
diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h
index fb4814e41f..7f204df1b2 100644
--- a/src/core/lib/iomgr/error_internal.h
+++ b/src/core/lib/iomgr/error_internal.h
@@ -46,14 +46,25 @@ struct grpc_linked_error {
uint8_t next;
};
+// c core representation of an error. See error.h for high level description of
+// this object.
struct grpc_error {
- gpr_refcount refs;
+ // All atomics in grpc_error must be stored in this nested struct. The rest of
+ // the object is memcpy-ed in bulk in copy_and_unref.
+ struct atomics {
+ gpr_refcount refs;
+ gpr_atm error_string;
+ } atomics;
+ // These arrays index into dynamic arena at the bottom of the struct.
+ // UINT8_MAX is used as a sentinel value.
uint8_t ints[GRPC_ERROR_INT_MAX];
uint8_t strs[GRPC_ERROR_STR_MAX];
uint8_t times[GRPC_ERROR_TIME_MAX];
+ // The child errors are stored in the arena, but are effectively a linked list
+ // structure, since they are contained withing grpc_linked_error objects.
uint8_t first_err;
uint8_t last_err;
- gpr_atm error_string;
+ // The arena is dynamically reallocated with a grow factor of 1.5.
uint8_t arena_size;
uint8_t arena_capacity;
intptr_t arena[0];
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index af33949c69..a2f81bcd78 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -39,6 +39,7 @@
#include <string.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@@ -61,25 +62,30 @@ gpr_mu grpc_polling_mu;
immediately in the next loop iteration.
Note: In the future, if there is a bug that involves missing wakeups in the
future, try adding a uv_async_t to kick the loop differently */
-uv_timer_t dummy_uv_handle;
+uv_timer_t *dummy_uv_handle;
size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
void dummy_timer_cb(uv_timer_t *handle) {}
+void dummy_handle_close_cb(uv_handle_t *handle) { gpr_free(handle); }
+
void grpc_pollset_global_init(void) {
gpr_mu_init(&grpc_polling_mu);
- uv_timer_init(uv_default_loop(), &dummy_uv_handle);
+ dummy_uv_handle = gpr_malloc(sizeof(uv_timer_t));
+ uv_timer_init(uv_default_loop(), dummy_uv_handle);
grpc_pollset_work_run_loop = 1;
}
-static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
-
void grpc_pollset_global_shutdown(void) {
gpr_mu_destroy(&grpc_polling_mu);
- uv_close((uv_handle_t *)&dummy_uv_handle, timer_close_cb);
+ uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb);
}
+static void timer_run_cb(uv_timer_t *timer) {}
+
+static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
+
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
*mu = &grpc_polling_mu;
uv_timer_init(uv_default_loop(), &pollset->timer);
@@ -95,7 +101,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
} else {
// kick the loop once
- uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0);
+ uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
}
grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
}
@@ -111,8 +117,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
}
}
-static void timer_run_cb(uv_timer_t *timer) {}
-
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
@@ -145,7 +149,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- uv_timer_start(&dummy_uv_handle, dummy_timer_cb, 0, 0);
+ uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 79ff910738..4d715be94c 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -40,6 +40,7 @@
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
@@ -54,8 +55,36 @@ typedef struct request {
grpc_closure *on_done;
grpc_resolved_addresses **addresses;
struct addrinfo *hints;
+ char *host;
+ char *port;
} request;
+static int retry_named_port_failure(int status, request *r,
+ uv_getaddrinfo_cb getaddrinfo_cb) {
+ if (status != 0) {
+ // This loop is copied from resolve_address_posix.c
+ char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
+ if (strcmp(r->port, svc[i][0]) == 0) {
+ int retry_status;
+ uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req->data = r;
+ retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
+ r->host, svc[i][1], r->hints);
+ if (retry_status < 0 || getaddrinfo_cb == NULL) {
+ // The callback will not be called
+ gpr_free(req);
+ }
+ return retry_status;
+ }
+ }
+ }
+ /* If this function calls uv_getaddrinfo, it will return that function's
+ return value. That function only returns numbers <=0, so we can safely
+ return 1 to indicate that we never retried */
+ return 1;
+}
+
static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
grpc_resolved_addresses **addresses) {
struct addrinfo *resp;
@@ -97,13 +126,21 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
request *r = (request *)req->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *error;
+ int retry_status;
+
+ gpr_free(req);
+ retry_status = retry_named_port_failure(status, r, getaddrinfo_callback);
+ if (retry_status == 0) {
+ // The request is being retried. Nothing should be done here
+ return;
+ }
+ /* Either no retry was attempted, or the retry failed. Either way, the
+ original error probably has more interesting information */
error = handle_addrinfo_result(status, res, r->addresses);
grpc_closure_sched(&exec_ctx, r->on_done, error);
grpc_exec_ctx_finish(&exec_ctx);
-
gpr_free(r->hints);
gpr_free(r);
- gpr_free(req);
uv_freeaddrinfo(res);
}
@@ -143,6 +180,7 @@ static grpc_error *blocking_resolve_address_impl(
uv_getaddrinfo_t req;
int s;
grpc_error *err;
+ int retry_status;
req.addrinfo = NULL;
@@ -158,6 +196,12 @@ static grpc_error *blocking_resolve_address_impl(
hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
+ request r = {
+ .addresses = addresses, .hints = &hints, .host = host, .port = port};
+ retry_status = retry_named_port_failure(s, &r, NULL);
+ if (retry_status <= 0) {
+ s = retry_status;
+ }
err = handle_addrinfo_result(s, req.addrinfo, addresses);
done:
@@ -200,6 +244,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
r = gpr_malloc(sizeof(request));
r->on_done = on_done;
r->addresses = addrs;
+ r->host = host;
+ r->port = port;
req = gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
@@ -222,6 +268,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
gpr_free(r);
gpr_free(req);
gpr_free(hints);
+ gpr_free(host);
+ gpr_free(port);
}
}
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index ae66577caf..618483d9cb 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -76,7 +76,6 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
- grpc_error_free_string(str);
}
if (error == GRPC_ERROR_NONE) {
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index d1bcd89af1..71e295770a 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -109,8 +109,8 @@ struct grpc_udp_server {
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
- /* The parent grpc server */
- grpc_server *grpc_server;
+ /* opaque object to pass to callbacks */
+ void *user_data;
};
grpc_udp_server *grpc_udp_server_create(void) {
@@ -178,7 +178,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
/* Call the orphan_cb to signal that the FD is about to be closed and
* should no longer be used. */
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
"udp_listener_shutdown");
@@ -204,7 +204,7 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- sp->orphan_cb(exec_ctx, sp->emfd);
+ sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
grpc_fd_shutdown(exec_ctx, sp->emfd,
GRPC_ERROR_CREATE("Server destroyed"));
}
@@ -299,7 +299,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
- sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
+ sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
@@ -322,7 +322,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Tell the registered callback that the socket is writeable. */
GPR_ASSERT(sp->write_cb);
- sp->write_cb(exec_ctx, sp->emfd);
+ sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data);
/* Re-arm the notification event so we get another chance to write. */
grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
@@ -464,13 +464,13 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
- grpc_server *server) {
+ void *user_data) {
size_t i;
gpr_mu_lock(&s->mu);
grpc_udp_listener *sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
- s->grpc_server = server;
+ s->user_data = user_data;
sp = s->head;
while (sp != NULL) {
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index ed63fa7d81..90842a47f0 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -47,23 +47,23 @@ typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
- struct grpc_server *server);
+ void *user_data);
/* Called when the socket is writeable. */
-typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
+ void *user_data);
/* Called when the grpc_fd is about to be orphaned (and the FD closed). */
typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd);
+ grpc_fd *emfd, void *user_data);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);
-/* Start listening to bound ports */
+/* Start listening to bound ports. user_data is passed to callbacks. */
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
grpc_pollset **pollsets, size_t pollset_count,
- struct grpc_server *server);
+ void *user_data);
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);