aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/lib')
-rw-r--r--src/core/lib/channel/channel_args.c7
-rw-r--r--src/core/lib/channel/channel_args.h2
-rw-r--r--src/core/lib/channel/channel_stack.c11
-rw-r--r--src/core/lib/channel/channel_stack.h11
-rw-r--r--src/core/lib/channel/compress_filter.c36
-rw-r--r--src/core/lib/channel/connected_channel.c8
-rw-r--r--src/core/lib/channel/deadline_filter.c44
-rw-r--r--src/core/lib/channel/deadline_filter.h6
-rw-r--r--src/core/lib/channel/http_client_filter.c123
-rw-r--r--src/core/lib/channel/http_server_filter.c82
-rw-r--r--src/core/lib/channel/max_age_filter.c386
-rw-r--r--src/core/lib/channel/max_age_filter.h39
-rw-r--r--src/core/lib/channel/message_size_filter.c29
-rw-r--r--src/core/lib/http/httpcli_security_connector.c6
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.c17
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.c5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.c15
-rw-r--r--src/core/lib/iomgr/error.c35
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c240
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c6
-rw-r--r--src/core/lib/iomgr/ev_posix.c6
-rw-r--r--src/core/lib/iomgr/ev_posix.h3
-rw-r--r--src/core/lib/iomgr/lockfree_event.c238
-rw-r--r--src/core/lib/iomgr/lockfree_event.h54
-rw-r--r--src/core/lib/iomgr/pollset.h4
-rw-r--r--src/core/lib/iomgr/pollset_windows.c4
-rw-r--r--src/core/lib/iomgr/port.h4
-rw-r--r--src/core/lib/iomgr/resource_quota.c9
-rw-r--r--src/core/lib/iomgr/resource_quota.h2
-rw-r--r--src/core/lib/iomgr/tcp_client.h4
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c24
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c21
-rw-r--r--src/core/lib/iomgr/tcp_posix.c114
-rw-r--r--src/core/lib/iomgr/tcp_posix.h7
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c22
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix.h3
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c25
-rw-r--r--src/core/lib/iomgr/tcp_windows.c14
-rw-r--r--src/core/lib/iomgr/tcp_windows.h4
-rw-r--r--src/core/lib/iomgr/timer.h3
-rw-r--r--src/core/lib/iomgr/timer_generic.c320
-rw-r--r--src/core/lib/iomgr/timer_generic.h2
-rw-r--r--src/core/lib/iomgr/timer_heap.c16
-rw-r--r--src/core/lib/security/credentials/credentials.h2
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c38
-rw-r--r--src/core/lib/security/transport/security_connector.c49
-rw-r--r--src/core/lib/security/transport/security_handshaker.c9
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c26
-rw-r--r--src/core/lib/slice/slice_buffer.c30
-rw-r--r--src/core/lib/support/cpu_linux.c5
-rw-r--r--src/core/lib/support/wrap_memcpy.c2
-rw-r--r--src/core/lib/surface/call.c308
-rw-r--r--src/core/lib/surface/channel.c15
-rw-r--r--src/core/lib/surface/completion_queue.c5
-rw-r--r--src/core/lib/surface/init.c4
-rw-r--r--src/core/lib/surface/init_secure.c2
-rw-r--r--src/core/lib/surface/lame_client.c20
-rw-r--r--src/core/lib/surface/server.c44
-rw-r--r--src/core/lib/transport/transport.c34
-rw-r--r--src/core/lib/transport/transport.h121
-rw-r--r--src/core/lib/transport/transport_impl.h3
-rw-r--r--src/core/lib/transport/transport_op_string.c34
63 files changed, 1443 insertions, 1324 deletions
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 1a099ac437..a6d124c719 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -346,3 +346,10 @@ int grpc_channel_arg_get_integer(grpc_arg *arg, grpc_integer_options options) {
}
return arg->value.integer;
}
+
+bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args) {
+ const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_MINIMAL_STACK);
+ if (arg == NULL) return false;
+ if (arg->type == GRPC_ARG_INTEGER && arg->value.integer == 0) return false;
+ return true;
+}
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 5c7d31f8bb..158cda5b21 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -113,6 +113,8 @@ grpc_channel_args *grpc_channel_args_set_socket_mutator(
const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
const char *name);
+bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args);
+
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
int min_value;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 6d53b0576e..94382980eb 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -246,9 +246,9 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
}
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1;
- next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op);
+ next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
@@ -284,7 +284,8 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- grpc_transport_stream_op *op = grpc_make_transport_stream_op(NULL);
- op->cancel_error = error;
- elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
+ op->cancel_stream = true;
+ op->payload->cancel_stream.cancel_error = error;
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 80e3603e8d..fdbcbdb018 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -112,9 +112,9 @@ typedef struct {
typedef struct {
/* Called to eg. send/receive data on a call.
See grpc_call_next_op on how to call the next element in the stack */
- void (*start_transport_stream_op)(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op);
+ void (*start_transport_stream_op_batch)(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op);
/* Called to handle channel level operations - e.g. new calls, or transport
closure.
See grpc_channel_next_op on how to call the next element in the stack */
@@ -281,7 +281,7 @@ void grpc_call_stack_ignore_set_pollset_or_pollset_set(
grpc_polling_entity *pollent);
/* Call the next operation in a call stack */
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op *op);
+ grpc_transport_stream_op_batch *op);
/* Call the next operation (depending on call directionality) in a channel
stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
@@ -300,7 +300,8 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem, grpc_transport_stream_op *op);
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op);
void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
grpc_call_element *cur_elem,
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 02dc479f3a..4625cba0d2 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -62,7 +62,7 @@ typedef struct call_data {
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
- grpc_transport_stream_op *send_op;
+ grpc_transport_stream_op_batch *send_op;
uint32_t send_length;
uint32_t send_flags;
grpc_slice incoming_slice;
@@ -210,7 +210,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
- calld->send_op->send_message = &calld->replacement_stream.base;
+ calld->send_op->payload->send_message.send_message =
+ &calld->replacement_stream.base;
calld->post_send = calld->send_op->on_complete;
calld->send_op->on_complete = &calld->send_done;
@@ -231,9 +232,9 @@ static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
- while (grpc_byte_stream_next(exec_ctx, calld->send_op->send_message,
- &calld->incoming_slice, ~(size_t)0,
- &calld->got_slice)) {
+ while (grpc_byte_stream_next(
+ exec_ctx, calld->send_op->payload->send_message.send_message,
+ &calld->incoming_slice, ~(size_t)0, &calld->got_slice)) {
grpc_slice_buffer_add(&calld->slices, calld->incoming_slice);
if (calld->send_length == calld->slices.length) {
finish_send_message(exec_ctx, elem);
@@ -242,33 +243,34 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
}
}
-static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+static void compress_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
- GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
+ GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
if (op->send_initial_metadata) {
grpc_error *error = process_send_initial_metadata(
- exec_ctx, elem, op->send_initial_metadata);
+ exec_ctx, elem,
+ op->payload->send_initial_metadata.send_initial_metadata);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
return;
}
}
- if (op->send_message != NULL &&
- !skip_compression(elem, op->send_message->flags)) {
+ if (op->send_message &&
+ !skip_compression(elem, op->payload->send_message.send_message->flags)) {
calld->send_op = op;
- calld->send_length = op->send_message->length;
- calld->send_flags = op->send_message->flags;
+ calld->send_length = op->payload->send_message.send_message->length;
+ calld->send_flags = op->payload->send_message.send_message->flags;
continue_send_message(exec_ctx, elem);
} else {
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
- GPR_TIMER_END("compress_start_transport_stream_op", 0);
+ GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@@ -337,7 +339,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_compress_filter = {
- compress_start_transport_stream_op,
+ compress_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 75c68a5534..22caf24373 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -62,9 +62,9 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
/* Intercept a call operation and either push it directly up or translate it
into transport stream operations */
-static void con_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+static void con_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
@@ -142,7 +142,7 @@ static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
const grpc_channel_info *channel_info) {}
const grpc_channel_filter grpc_connected_filter = {
- con_start_transport_stream_op,
+ con_start_transport_stream_op_batch,
con_start_transport_op,
sizeof(call_data),
init_call_elem,
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index ca701ed457..fda099b021 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -134,7 +134,7 @@ static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
// Inject our own on_complete callback into op.
static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
- grpc_transport_stream_op* op) {
+ grpc_transport_stream_op_batch* op) {
deadline_state->next_on_complete = op->on_complete;
grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
grpc_schedule_on_exec_ctx);
@@ -196,16 +196,16 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
start_timer_if_needed(exec_ctx, elem, new_deadline);
}
-void grpc_deadline_state_client_start_transport_stream_op(
+void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_transport_stream_op* op) {
+ grpc_transport_stream_op_batch* op) {
grpc_deadline_state* deadline_state = elem->call_data;
- if (op->cancel_error != GRPC_ERROR_NONE) {
+ if (op->cancel_stream) {
cancel_timer_if_needed(exec_ctx, deadline_state);
} else {
// Make sure we know when the call is complete, so that we can cancel
// the timer.
- if (op->recv_trailing_metadata != NULL) {
+ if (op->recv_trailing_metadata) {
inject_on_complete_cb(deadline_state, op);
}
}
@@ -261,10 +261,11 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
// Method for starting a call op for client filter.
-static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- grpc_transport_stream_op* op) {
- grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
+static void client_start_transport_stream_op_batch(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
+ op);
// Chain to next filter.
grpc_call_next_op(exec_ctx, elem, op);
}
@@ -282,30 +283,33 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
}
// Method for starting a call op for server filter.
-static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- grpc_transport_stream_op* op) {
+static void server_start_transport_stream_op_batch(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
server_call_data* calld = elem->call_data;
- if (op->cancel_error != GRPC_ERROR_NONE) {
+ if (op->cancel_stream) {
cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
} else {
// If we're receiving initial metadata, we need to get the deadline
// from the recv_initial_metadata_ready callback. So we inject our
// own callback into that hook.
- if (op->recv_initial_metadata_ready != NULL) {
- calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
- calld->recv_initial_metadata = op->recv_initial_metadata;
+ if (op->recv_initial_metadata) {
+ calld->next_recv_initial_metadata_ready =
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready;
+ calld->recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata;
grpc_closure_init(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
- op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &calld->recv_initial_metadata_ready;
}
// Make sure we know when the call is complete, so that we can cancel
// the timer.
// Note that we trigger this on recv_trailing_metadata, even though
// the client never sends trailing metadata, because this is the
// hook that tells us when the call is complete on the server side.
- if (op->recv_trailing_metadata != NULL) {
+ if (op->recv_trailing_metadata) {
inject_on_complete_cb(&calld->base.deadline_state, op);
}
}
@@ -314,7 +318,7 @@ static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
}
const grpc_channel_filter grpc_client_deadline_filter = {
- client_start_transport_stream_op,
+ client_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(base_call_data),
init_call_elem,
@@ -329,7 +333,7 @@ const grpc_channel_filter grpc_client_deadline_filter = {
};
const grpc_channel_filter grpc_server_deadline_filter = {
- server_start_transport_stream_op,
+ server_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(server_call_data),
init_call_elem,
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index 72cd5cb929..d8db9a9f97 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -83,15 +83,15 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
-// To be called from the client-side filter's start_transport_stream_op()
+// To be called from the client-side filter's start_transport_stream_op_batch()
// method. Ensures that the deadline timer is cancelled when the call
// is completed.
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
-void grpc_deadline_state_client_start_transport_stream_op(
+void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_transport_stream_op* op);
+ grpc_transport_stream_op_batch* op);
// Deadline filters for direct client channels and server channels.
// Note: Deadlines for non-direct client channels are handled by the
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 17af2013d9..4e47c5c658 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -63,7 +63,7 @@ typedef struct call_data {
uint8_t *payload_bytes;
/* Vars to read data off of send_message */
- grpc_transport_stream_op send_op;
+ grpc_transport_stream_op_batch *send_op;
uint32_t send_length;
uint32_t send_flags;
grpc_slice incoming_slice;
@@ -219,9 +219,9 @@ static void continue_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = elem->call_data;
uint8_t *wrptr = calld->payload_bytes;
- while (grpc_byte_stream_next(exec_ctx, calld->send_op.send_message,
- &calld->incoming_slice, ~(size_t)0,
- &calld->got_slice)) {
+ while (grpc_byte_stream_next(
+ exec_ctx, calld->send_op->payload->send_message.send_message,
+ &calld->incoming_slice, ~(size_t)0, &calld->got_slice)) {
memcpy(wrptr, GRPC_SLICE_START_PTR(calld->incoming_slice),
GRPC_SLICE_LENGTH(calld->incoming_slice));
wrptr += GRPC_SLICE_LENGTH(calld->incoming_slice);
@@ -242,10 +242,11 @@ static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
/* Pass down the original send_message op that was blocked.*/
grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
calld->send_flags);
- calld->send_op.send_message = &calld->replacement_stream.base;
- calld->post_send = calld->send_op.on_complete;
- calld->send_op.on_complete = &calld->send_done;
- grpc_call_next_op(exec_ctx, elem, &calld->send_op);
+ calld->send_op->payload->send_message.send_message =
+ &calld->replacement_stream.base;
+ calld->post_send = calld->send_op->on_complete;
+ calld->send_op->on_complete = &calld->send_done;
+ grpc_call_next_op(exec_ctx, elem, calld->send_op);
} else {
continue_send_message(exec_ctx, elem);
}
@@ -253,29 +254,30 @@ static void got_slice(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
grpc_error *error;
- if (op->send_initial_metadata != NULL) {
+ if (op->send_initial_metadata) {
/* Decide which HTTP VERB to use. We use GET if the request is marked
cacheable, and the operation contains both initial metadata and send
message, and the payload is below the size threshold, and all the data
for this request is immediately available. */
grpc_mdelem method = GRPC_MDELEM_METHOD_POST;
- if ((op->send_initial_metadata_flags &
+ if (op->send_message &&
+ (op->payload->send_initial_metadata.send_initial_metadata_flags &
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
- op->send_message != NULL &&
- op->send_message->length < channeld->max_payload_size_for_get) {
+ op->payload->send_message.send_message->length <
+ channeld->max_payload_size_for_get) {
method = GRPC_MDELEM_METHOD_GET;
/* The following write to calld->send_message_blocked isn't racy with
reads in hc_start_transport_op (which deals with SEND_MESSAGE ops) because
being here means ops->send_message is not NULL, which is primarily
guarding the read there. */
calld->send_message_blocked = true;
- } else if (op->send_initial_metadata_flags &
+ } else if (op->payload->send_initial_metadata.send_initial_metadata_flags &
GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) {
method = GRPC_MDELEM_METHOD_PUT;
}
@@ -283,12 +285,13 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
/* Attempt to read the data from send_message and create a header field. */
if (grpc_mdelem_eq(method, GRPC_MDELEM_METHOD_GET)) {
/* allocate memory to hold the entire payload */
- calld->payload_bytes = gpr_malloc(op->send_message->length);
+ calld->payload_bytes =
+ gpr_malloc(op->payload->send_message.send_message->length);
/* read slices of send_message and copy into payload_bytes */
- calld->send_op = *op;
- calld->send_length = op->send_message->length;
- calld->send_flags = op->send_message->flags;
+ calld->send_op = op;
+ calld->send_length = op->payload->send_message.send_message->length;
+ calld->send_flags = op->payload->send_message.send_message->flags;
continue_send_message(exec_ctx, elem);
if (calld->send_message_blocked == false) {
@@ -299,13 +302,15 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
const unsigned char k_query_separator = '?';
grpc_slice path_slice =
- GRPC_MDVALUE(op->send_initial_metadata->idx.named.path->md);
+ GRPC_MDVALUE(op->payload->send_initial_metadata
+ .send_initial_metadata->idx.named.path->md);
/* sum up individual component's lengths and allocate enough memory to
* hold combined path+query */
size_t estimated_len = GRPC_SLICE_LENGTH(path_slice);
estimated_len++; /* for the '?' */
estimated_len += grpc_base64_estimate_encoded_size(
- op->send_message->length, k_url_safe, k_multi_line);
+ op->payload->send_message.send_message->length, k_url_safe,
+ k_multi_line);
estimated_len += 1; /* for the trailing 0 */
grpc_slice path_with_query_slice = grpc_slice_malloc(estimated_len);
@@ -320,8 +325,8 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
write_ptr++; /* for the '?' */
grpc_base64_encode_core((char *)write_ptr, calld->payload_bytes,
- op->send_message->length, k_url_safe,
- k_multi_line);
+ op->payload->send_message.send_message->length,
+ k_url_safe, k_multi_line);
/* remove trailing unused memory and add trailing 0 to terminate string
*/
@@ -335,14 +340,15 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
/* substitute previous path with the new path+query */
grpc_mdelem mdelem_path_and_query = grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice);
- grpc_metadata_batch *b = op->send_initial_metadata;
+ grpc_metadata_batch *b =
+ op->payload->send_initial_metadata.send_initial_metadata;
error = grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
mdelem_path_and_query);
if (error != GRPC_ERROR_NONE) return error;
calld->on_complete = op->on_complete;
op->on_complete = &calld->hc_on_complete;
- op->send_message = NULL;
+ op->send_message = false;
grpc_slice_unref_internal(exec_ctx, path_with_query_slice);
} else {
/* Not all data is available. Fall back to POST. */
@@ -353,47 +359,60 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
}
}
- remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_METHOD);
- remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_SCHEME);
- remove_if_present(exec_ctx, op->send_initial_metadata, GRPC_BATCH_TE);
- remove_if_present(exec_ctx, op->send_initial_metadata,
+ remove_if_present(exec_ctx,
+ op->payload->send_initial_metadata.send_initial_metadata,
+ GRPC_BATCH_METHOD);
+ remove_if_present(exec_ctx,
+ op->payload->send_initial_metadata.send_initial_metadata,
+ GRPC_BATCH_SCHEME);
+ remove_if_present(exec_ctx,
+ op->payload->send_initial_metadata.send_initial_metadata,
+ GRPC_BATCH_TE);
+ remove_if_present(exec_ctx,
+ op->payload->send_initial_metadata.send_initial_metadata,
GRPC_BATCH_CONTENT_TYPE);
- remove_if_present(exec_ctx, op->send_initial_metadata,
+ remove_if_present(exec_ctx,
+ op->payload->send_initial_metadata.send_initial_metadata,
GRPC_BATCH_USER_AGENT);
/* Send : prefixed headers, which have to be before any application
layer headers. */
- error = grpc_metadata_batch_add_head(exec_ctx, op->send_initial_metadata,
- &calld->method, method);
+ error = grpc_metadata_batch_add_head(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->method, method);
if (error != GRPC_ERROR_NONE) return error;
- error =
- grpc_metadata_batch_add_head(exec_ctx, op->send_initial_metadata,
- &calld->scheme, channeld->static_scheme);
+ error = grpc_metadata_batch_add_head(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->scheme, channeld->static_scheme);
if (error != GRPC_ERROR_NONE) return error;
- error = grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
- &calld->te_trailers,
- GRPC_MDELEM_TE_TRAILERS);
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->te_trailers, GRPC_MDELEM_TE_TRAILERS);
if (error != GRPC_ERROR_NONE) return error;
error = grpc_metadata_batch_add_tail(
- exec_ctx, op->send_initial_metadata, &calld->content_type,
- GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->content_type, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
if (error != GRPC_ERROR_NONE) return error;
- error = grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
- &calld->user_agent,
- GRPC_MDELEM_REF(channeld->user_agent));
+ error = grpc_metadata_batch_add_tail(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->user_agent, GRPC_MDELEM_REF(channeld->user_agent));
if (error != GRPC_ERROR_NONE) return error;
}
- if (op->recv_initial_metadata != NULL) {
+ if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
- calld->recv_initial_metadata = op->recv_initial_metadata;
- calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = &calld->hc_on_recv_initial_metadata;
+ calld->recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata;
+ calld->on_done_recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready;
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &calld->hc_on_recv_initial_metadata;
}
- if (op->recv_trailing_metadata != NULL) {
+ if (op->recv_trailing_metadata) {
/* substitute our callback for the higher callback */
- calld->recv_trailing_metadata = op->recv_trailing_metadata;
+ calld->recv_trailing_metadata =
+ op->payload->recv_trailing_metadata.recv_trailing_metadata;
calld->on_done_recv_trailing_metadata = op->on_complete;
op->on_complete = &calld->hc_on_recv_trailing_metadata;
}
@@ -403,17 +422,17 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_error *error = hc_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
} else {
call_data *calld = elem->call_data;
- if (op->send_message != NULL && calld->send_message_blocked) {
+ if (op->send_message && calld->send_message_blocked) {
/* Don't forward the op. send_message contains slices that aren't ready
- yet. The call will be forwarded by the op_complete of slice read call.
+ yet. The call will be forwarded by the op_complete of slice read call.
*/
} else {
grpc_call_next_op(exec_ctx, elem, op);
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index df0f95010b..c1e49ffacc 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -58,8 +58,7 @@ typedef struct call_data {
bool payload_bin_delivered;
grpc_metadata_batch *recv_initial_metadata;
- bool *recv_idempotent_request;
- bool *recv_cacheable_request;
+ uint32_t *recv_initial_metadata_flags;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
/** Closure to call when we retrieve read message from the path URI
@@ -116,14 +115,21 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
if (b->idx.named.method != NULL) {
if (grpc_mdelem_eq(b->idx.named.method->md, GRPC_MDELEM_METHOD_POST)) {
- *calld->recv_idempotent_request = false;
- *calld->recv_cacheable_request = false;
+ *calld->recv_initial_metadata_flags &=
+ ~(GRPC_INITIAL_METADATA_CACHEABLE_REQUEST |
+ GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST);
} else if (grpc_mdelem_eq(b->idx.named.method->md,
GRPC_MDELEM_METHOD_PUT)) {
- *calld->recv_idempotent_request = true;
+ *calld->recv_initial_metadata_flags &=
+ ~GRPC_INITIAL_METADATA_CACHEABLE_REQUEST;
+ *calld->recv_initial_metadata_flags |=
+ GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST;
} else if (grpc_mdelem_eq(b->idx.named.method->md,
GRPC_MDELEM_METHOD_GET)) {
- *calld->recv_cacheable_request = true;
+ *calld->recv_initial_metadata_flags |=
+ GRPC_INITIAL_METADATA_CACHEABLE_REQUEST;
+ *calld->recv_initial_metadata_flags &=
+ ~GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST;
} else {
add_error(error_name, &error,
grpc_attach_md_to_error(
@@ -206,7 +212,8 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":path")));
- } else if (*calld->recv_cacheable_request == true) {
+ } else if (*calld->recv_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) {
/* We have a cacheable request made with GET verb. The path contains the
* query parameter which is base64 encoded request payload. */
const char k_query_separator = '?';
@@ -311,45 +318,53 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
- if (op->send_initial_metadata != NULL) {
+ if (op->send_initial_metadata) {
grpc_error *error = GRPC_ERROR_NONE;
static const char *error_name = "Failed sending initial metadata";
- add_error(error_name, &error, grpc_metadata_batch_add_head(
- exec_ctx, op->send_initial_metadata,
- &calld->status, GRPC_MDELEM_STATUS_200));
- add_error(error_name, &error,
- grpc_metadata_batch_add_tail(
- exec_ctx, op->send_initial_metadata, &calld->content_type,
- GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC));
+ add_error(
+ error_name, &error,
+ grpc_metadata_batch_add_head(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->status, GRPC_MDELEM_STATUS_200));
+ add_error(
+ error_name, &error,
+ grpc_metadata_batch_add_tail(
+ exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
+ &calld->content_type,
+ GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC));
add_error(error_name, &error,
- server_filter_outgoing_metadata(exec_ctx, elem,
- op->send_initial_metadata));
+ server_filter_outgoing_metadata(
+ exec_ctx, elem,
+ op->payload->send_initial_metadata.send_initial_metadata));
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
return;
}
}
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
- GPR_ASSERT(op->recv_idempotent_request != NULL);
- GPR_ASSERT(op->recv_cacheable_request != NULL);
- calld->recv_initial_metadata = op->recv_initial_metadata;
- calld->recv_idempotent_request = op->recv_idempotent_request;
- calld->recv_cacheable_request = op->recv_cacheable_request;
- calld->on_done_recv = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = &calld->hs_on_recv;
+ GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags != NULL);
+ calld->recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata;
+ calld->recv_initial_metadata_flags =
+ op->payload->recv_initial_metadata.recv_flags;
+ calld->on_done_recv =
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready;
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &calld->hs_on_recv;
}
if (op->recv_message) {
- calld->recv_message_ready = op->recv_message_ready;
- calld->pp_recv_message = op->recv_message;
- if (op->recv_message_ready) {
- op->recv_message_ready = &calld->hs_recv_message_ready;
+ calld->recv_message_ready = op->payload->recv_message.recv_message_ready;
+ calld->pp_recv_message = op->payload->recv_message.recv_message;
+ if (op->payload->recv_message.recv_message_ready) {
+ op->payload->recv_message.recv_message_ready =
+ &calld->hs_recv_message_ready;
}
if (op->on_complete) {
calld->on_complete = op->on_complete;
@@ -359,9 +374,10 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (op->send_trailing_metadata) {
grpc_error *error = server_filter_outgoing_metadata(
- exec_ctx, elem, op->send_trailing_metadata);
+ exec_ctx, elem,
+ op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
return;
}
}
@@ -369,7 +385,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
GPR_TIMER_BEGIN("hs_start_transport_op", 0);
hs_mutate_op(exec_ctx, elem, op);
diff --git a/src/core/lib/channel/max_age_filter.c b/src/core/lib/channel/max_age_filter.c
deleted file mode 100644
index c25481486c..0000000000
--- a/src/core/lib/channel/max_age_filter.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- *
- * Copyright 2017, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/lib/channel/message_size_filter.h"
-
-#include <limits.h>
-#include <string.h>
-
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/transport/http2_errors.h"
-#include "src/core/lib/transport/service_config.h"
-
-#define DEFAULT_MAX_CONNECTION_AGE_MS INT_MAX
-#define DEFAULT_MAX_CONNECTION_AGE_GRACE_MS INT_MAX
-#define DEFAULT_MAX_CONNECTION_IDLE_MS INT_MAX
-
-typedef struct channel_data {
- /* We take a reference to the channel stack for the timer callback */
- grpc_channel_stack* channel_stack;
- /* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer
- and max_age_grace_timer_pending */
- gpr_mu max_age_timer_mu;
- /* True if the max_age timer callback is currently pending */
- bool max_age_timer_pending;
- /* True if the max_age_grace timer callback is currently pending */
- bool max_age_grace_timer_pending;
- /* The timer for checking if the channel has reached its max age */
- grpc_timer max_age_timer;
- /* The timer for checking if the max-aged channel has uesed up the grace
- period */
- grpc_timer max_age_grace_timer;
- /* The timer for checking if the channel's idle duration reaches
- max_connection_idle */
- grpc_timer max_idle_timer;
- /* Allowed max time a channel may have no outstanding rpcs */
- gpr_timespec max_connection_idle;
- /* Allowed max time a channel may exist */
- gpr_timespec max_connection_age;
- /* Allowed grace period after the channel reaches its max age */
- gpr_timespec max_connection_age_grace;
- /* Closure to run when the channel's idle duration reaches max_connection_idle
- and should be closed gracefully */
- grpc_closure close_max_idle_channel;
- /* Closure to run when the channel reaches its max age and should be closed
- gracefully */
- grpc_closure close_max_age_channel;
- /* Closure to run the channel uses up its max age grace time and should be
- closed forcibly */
- grpc_closure force_close_max_age_channel;
- /* Closure to run when the init fo channel stack is done and the max_idle
- timer should be started */
- grpc_closure start_max_idle_timer_after_init;
- /* Closure to run when the init fo channel stack is done and the max_age timer
- should be started */
- grpc_closure start_max_age_timer_after_init;
- /* Closure to run when the goaway op is finished and the max_age_timer */
- grpc_closure start_max_age_grace_timer_after_goaway_op;
- /* Closure to run when the channel connectivity state changes */
- grpc_closure channel_connectivity_changed;
- /* Records the current connectivity state */
- grpc_connectivity_state connectivity_state;
- /* Number of active calls */
- gpr_atm call_count;
-} channel_data;
-
-/* Increase the nubmer of active calls. Before the increasement, if there are no
- calls, the max_idle_timer should be cancelled. */
-static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
- if (gpr_atm_full_fetch_add(&chand->call_count, 1) == 0) {
- grpc_timer_cancel(exec_ctx, &chand->max_idle_timer);
- }
-}
-
-/* Decrease the nubmer of active calls. After the decrement, if there are no
- calls, the max_idle_timer should be started. */
-static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
- if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
- GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_idle_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle),
- &chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC));
- }
-}
-
-static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- /* Decrease call_count. If there are no active calls at this time,
- max_idle_timer will start here. If the number of active calls is not 0,
- max_idle_timer will start after all the active calls end. */
- decrease_call_count(exec_ctx, chand);
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age start_max_idle_timer_after_init");
-}
-
-static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- gpr_mu_lock(&chand->max_age_timer_mu);
- chand->max_age_timer_pending = true;
- GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_age_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age),
- &chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_mu_unlock(&chand->max_age_timer_mu);
- grpc_transport_op* op = grpc_make_transport_op(NULL);
- op->on_connectivity_state_change = &chand->channel_connectivity_changed,
- op->connectivity_state = &chand->connectivity_state;
- grpc_channel_next_op(exec_ctx,
- grpc_channel_stack_element(chand->channel_stack, 0), op);
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age start_max_age_timer_after_init");
-}
-
-static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
- void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- gpr_mu_lock(&chand->max_age_timer_mu);
- chand->max_age_grace_timer_pending = true;
- GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
- grpc_timer_init(exec_ctx, &chand->max_age_grace_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- chand->max_connection_age_grace),
- &chand->force_close_max_age_channel,
- gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_mu_unlock(&chand->max_age_timer_mu);
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age start_max_age_grace_timer_after_goaway_op");
-}
-
-static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- gpr_atm_no_barrier_fetch_add(&chand->call_count, 1);
- if (error == GRPC_ERROR_NONE) {
- grpc_transport_op* op = grpc_make_transport_op(NULL);
- op->goaway_error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_idle"),
- GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
- grpc_channel_element* elem =
- grpc_channel_stack_element(chand->channel_stack, 0);
- elem->filter->start_transport_op(exec_ctx, elem, op);
- } else if (error != GRPC_ERROR_CANCELLED) {
- GRPC_LOG_IF_ERROR("close_max_idle_channel", error);
- }
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age max_idle_timer");
-}
-
-static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- gpr_mu_lock(&chand->max_age_timer_mu);
- chand->max_age_timer_pending = false;
- gpr_mu_unlock(&chand->max_age_timer_mu);
- if (error == GRPC_ERROR_NONE) {
- GRPC_CHANNEL_STACK_REF(chand->channel_stack,
- "max_age start_max_age_grace_timer_after_goaway_op");
- grpc_transport_op* op = grpc_make_transport_op(
- &chand->start_max_age_grace_timer_after_goaway_op);
- op->goaway_error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_age"),
- GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
- grpc_channel_element* elem =
- grpc_channel_stack_element(chand->channel_stack, 0);
- elem->filter->start_transport_op(exec_ctx, elem, op);
- } else if (error != GRPC_ERROR_CANCELLED) {
- GRPC_LOG_IF_ERROR("close_max_age_channel", error);
- }
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age max_age_timer");
-}
-
-static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- gpr_mu_lock(&chand->max_age_timer_mu);
- chand->max_age_grace_timer_pending = false;
- gpr_mu_unlock(&chand->max_age_timer_mu);
- if (error == GRPC_ERROR_NONE) {
- grpc_transport_op* op = grpc_make_transport_op(NULL);
- op->disconnect_with_error =
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel reaches max age");
- grpc_channel_element* elem =
- grpc_channel_stack_element(chand->channel_stack, 0);
- elem->filter->start_transport_op(exec_ctx, elem, op);
- } else if (error != GRPC_ERROR_CANCELLED) {
- GRPC_LOG_IF_ERROR("force_close_max_age_channel", error);
- }
- GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
- "max_age max_age_grace_timer");
-}
-
-static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
- grpc_error* error) {
- channel_data* chand = arg;
- if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
- grpc_transport_op* op = grpc_make_transport_op(NULL);
- op->on_connectivity_state_change = &chand->channel_connectivity_changed,
- op->connectivity_state = &chand->connectivity_state;
- grpc_channel_next_op(
- exec_ctx, grpc_channel_stack_element(chand->channel_stack, 0), op);
- } else {
- gpr_mu_lock(&chand->max_age_timer_mu);
- if (chand->max_age_timer_pending) {
- grpc_timer_cancel(exec_ctx, &chand->max_age_timer);
- chand->max_age_timer_pending = false;
- }
- if (chand->max_age_grace_timer_pending) {
- grpc_timer_cancel(exec_ctx, &chand->max_age_grace_timer);
- chand->max_age_grace_timer_pending = false;
- }
- gpr_mu_unlock(&chand->max_age_timer_mu);
- /* If there are no active calls, this increasement will cancel
- max_idle_timer, and prevent max_idle_timer from being started in the
- future. */
- increase_call_count(exec_ctx, chand);
- }
-}
-
-/* Constructor for call_data. */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- const grpc_call_element_args* args) {
- channel_data* chand = elem->channel_data;
- increase_call_count(exec_ctx, chand);
- return GRPC_ERROR_NONE;
-}
-
-/* Destructor for call_data. */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- const grpc_call_final_info* final_info,
- grpc_closure* ignored) {
- channel_data* chand = elem->channel_data;
- decrease_call_count(exec_ctx, chand);
-}
-
-/* Constructor for channel_data. */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem,
- grpc_channel_element_args* args) {
- channel_data* chand = elem->channel_data;
- gpr_mu_init(&chand->max_age_timer_mu);
- chand->max_age_timer_pending = false;
- chand->max_age_grace_timer_pending = false;
- chand->channel_stack = args->channel_stack;
- chand->max_connection_age =
- DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_MS, GPR_TIMESPAN);
- chand->max_connection_age_grace =
- DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS,
- GPR_TIMESPAN);
- chand->max_connection_idle =
- DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN);
- for (size_t i = 0; i < args->channel_args->num_args; ++i) {
- if (0 == strcmp(args->channel_args->args[i].key,
- GRPC_ARG_MAX_CONNECTION_AGE_MS)) {
- const int value = grpc_channel_arg_get_integer(
- &args->channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_CONNECTION_AGE_MS, 1, INT_MAX});
- chand->max_connection_age =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
- } else if (0 == strcmp(args->channel_args->args[i].key,
- GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) {
- const int value = grpc_channel_arg_get_integer(
- &args->channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0,
- INT_MAX});
- chand->max_connection_age_grace =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
- } else if (0 == strcmp(args->channel_args->args[i].key,
- GRPC_ARG_MAX_CONNECTION_IDLE_MS)) {
- const int value = grpc_channel_arg_get_integer(
- &args->channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX});
- chand->max_connection_idle =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
- }
- }
- grpc_closure_init(&chand->close_max_idle_channel, close_max_idle_channel,
- chand, grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->close_max_age_channel, close_max_age_channel, chand,
- grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->force_close_max_age_channel,
- force_close_max_age_channel, chand,
- grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_idle_timer_after_init,
- start_max_idle_timer_after_init, chand,
- grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_age_timer_after_init,
- start_max_age_timer_after_init, chand,
- grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->start_max_age_grace_timer_after_goaway_op,
- start_max_age_grace_timer_after_goaway_op, chand,
- grpc_schedule_on_exec_ctx);
- grpc_closure_init(&chand->channel_connectivity_changed,
- channel_connectivity_changed, chand,
- grpc_schedule_on_exec_ctx);
-
- if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
- /* When the channel reaches its max age, we send down an op with
- goaway_error set. However, we can't send down any ops until after the
- channel stack is fully initialized. If we start the timer here, we have
- no guarantee that the timer won't pop before channel stack initialization
- is finished. To avoid that problem, we create a closure to start the
- timer, and we schedule that closure to be run after call stack
- initialization is done. */
- GRPC_CHANNEL_STACK_REF(chand->channel_stack,
- "max_age start_max_age_timer_after_init");
- grpc_closure_sched(exec_ctx, &chand->start_max_age_timer_after_init,
- GRPC_ERROR_NONE);
- }
-
- /* Initialize the number of calls as 1, so that the max_idle_timer will not
- start until start_max_idle_timer_after_init is invoked. */
- gpr_atm_rel_store(&chand->call_count, 1);
- if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
- GRPC_CHANNEL_STACK_REF(chand->channel_stack,
- "max_age start_max_idle_timer_after_init");
- grpc_closure_sched(exec_ctx, &chand->start_max_idle_timer_after_init,
- GRPC_ERROR_NONE);
- }
- return GRPC_ERROR_NONE;
-}
-
-/* Destructor for channel_data. */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem) {}
-
-const grpc_channel_filter grpc_max_age_filter = {
- grpc_call_next_op,
- grpc_channel_next_op,
- 0, /* sizeof_call_data */
- init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_call_next_get_peer,
- grpc_channel_next_get_info,
- "max_age"};
diff --git a/src/core/lib/channel/max_age_filter.h b/src/core/lib/channel/max_age_filter.h
deleted file mode 100644
index 93e357a88e..0000000000
--- a/src/core/lib/channel/max_age_filter.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//
-// Copyright 2017, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef GRPC_CORE_LIB_CHANNEL_MAX_AGE_FILTER_H
-#define GRPC_CORE_LIB_CHANNEL_MAX_AGE_FILTER_H
-
-#include "src/core/lib/channel/channel_stack.h"
-
-extern const grpc_channel_filter grpc_max_age_filter;
-
-#endif /* GRPC_CORE_LIB_CHANNEL_MAX_AGE_FILTER_H */
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index 63136650a5..57726c8476 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -132,21 +132,23 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
gpr_free(message_string);
}
// Invoke the next callback.
- grpc_closure_sched(exec_ctx, calld->next_recv_message_ready, error);
+ grpc_closure_run(exec_ctx, calld->next_recv_message_ready, error);
}
// Start transport stream op.
-static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- grpc_transport_stream_op* op) {
+static void start_transport_stream_op_batch(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
call_data* calld = elem->call_data;
// Check max send message size.
- if (op->send_message != NULL && calld->max_send_size >= 0 &&
- op->send_message->length > (size_t)calld->max_send_size) {
+ if (op->send_message && calld->max_send_size >= 0 &&
+ op->payload->send_message.send_message->length >
+ (size_t)calld->max_send_size) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
- op->send_message->length, calld->max_send_size);
- grpc_transport_stream_op_finish_with_failure(
+ op->payload->send_message.send_message->length,
+ calld->max_send_size);
+ grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS,
@@ -155,10 +157,11 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
return;
}
// Inject callback for receiving a message.
- if (op->recv_message_ready != NULL) {
- calld->next_recv_message_ready = op->recv_message_ready;
- calld->recv_message = op->recv_message;
- op->recv_message_ready = &calld->recv_message_ready;
+ if (op->recv_message) {
+ calld->next_recv_message_ready =
+ op->payload->recv_message.recv_message_ready;
+ calld->recv_message = op->payload->recv_message.recv_message;
+ op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
}
// Chain to the next filter.
grpc_call_next_op(exec_ctx, elem, op);
@@ -253,7 +256,7 @@ static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
}
const grpc_channel_filter grpc_message_size_filter = {
- start_transport_stream_op,
+ start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index fc338342e4..9eab1360a4 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -47,7 +47,7 @@
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *handshaker_factory;
char *secure_peer_name;
} grpc_httpcli_ssl_channel_security_connector;
@@ -56,7 +56,7 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);
@@ -69,7 +69,7 @@ static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx,
(grpc_httpcli_ssl_channel_security_connector *)sc;
tsi_handshaker *handshaker = NULL;
if (c->handshaker_factory != NULL) {
- tsi_result result = tsi_ssl_handshaker_factory_create_handshaker(
+ tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
c->handshaker_factory, c->secure_peer_name, &handshaker);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index f9de0c715e..6407a6ad3f 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -41,8 +41,7 @@ typedef struct {
grpc_endpoint *server;
} grpc_endpoint_pair;
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
- const char *name, grpc_resource_quota *resource_quota,
- size_t read_slice_size);
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
+ grpc_channel_args *args);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.c b/src/core/lib/iomgr/endpoint_pair_posix.c
index b9ff969e81..5542a372d8 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.c
+++ b/src/core/lib/iomgr/endpoint_pair_posix.c
@@ -62,22 +62,25 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
- const char *name, grpc_resource_quota *resource_quota,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
+ grpc_channel_args *args) {
int sv[2];
grpc_endpoint_pair p;
char *final_name;
create_sockets(sv);
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), resource_quota,
- read_slice_size, "socketpair-server");
+ p.client = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], final_name), args,
+ "socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), resource_quota,
- read_slice_size, "socketpair-client");
+ p.server = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[0], final_name), args,
+ "socketpair-client");
gpr_free(final_name);
+
+ grpc_exec_ctx_finish(&exec_ctx);
return p;
}
diff --git a/src/core/lib/iomgr/endpoint_pair_uv.c b/src/core/lib/iomgr/endpoint_pair_uv.c
index ff24894c6d..9718eb0523 100644
--- a/src/core/lib/iomgr/endpoint_pair_uv.c
+++ b/src/core/lib/iomgr/endpoint_pair_uv.c
@@ -41,9 +41,8 @@
#include "src/core/lib/iomgr/endpoint_pair.h"
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
- const char *name, grpc_resource_quota *resource_quota,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
+ grpc_channel_args *args) {
grpc_endpoint_pair endpoint_pair;
// TODO(mlumish): implement this properly under libuv
GPR_ASSERT(false &&
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.c b/src/core/lib/iomgr/endpoint_pair_windows.c
index 93f71b745c..25d6264dfb 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_windows.c
@@ -83,15 +83,18 @@ static void create_sockets(SOCKET sv[2]) {
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
- const char *name, grpc_resource_quota *resource_quota,
- size_t read_slice_size) {
+ const char *name, grpc_channel_args *channel_args) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
- p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
- resource_quota, "endpoint:server");
- p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
- resource_quota, "endpoint:client");
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ p.client = grpc_tcp_create(&exec_ctx,
+ grpc_winsocket_create(sv[1], "endpoint:client"),
+ channel_args, "endpoint:server");
+ p.server = grpc_tcp_create(&exec_ctx,
+ grpc_winsocket_create(sv[0], "endpoint:server"),
+ channel_args, "endpoint:client");
+ grpc_exec_ctx_finish(&exec_ctx);
return p;
}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 1dbb64e8f3..fbbca6b493 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -212,7 +212,11 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
GPR_ASSERT(*err);
uint8_t slots = (uint8_t)(size / sizeof(intptr_t));
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
- (*err)->arena_capacity = (uint8_t)(3 * (*err)->arena_capacity / 2);
+ (*err)->arena_capacity =
+ (uint8_t)GPR_MIN(UINT8_MAX - 1, (3 * (*err)->arena_capacity / 2));
+ if ((*err)->arena_size + slots > (*err)->arena_capacity) {
+ return UINT8_MAX;
+ }
*err = gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
}
@@ -223,10 +227,14 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
static void internal_set_int(grpc_error **err, grpc_error_ints which,
intptr_t value) {
- // GPR_ASSERT((*err)->ints[which] == UINT8_MAX); // TODO, enforce this
uint8_t slot = (*err)->ints[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
+ if (slot == UINT8_MAX) {
+ gpr_log(GPR_ERROR, "Error %p is full, dropping int {\"%s\":%" PRIiPTR "}",
+ *err, error_int_name(which), value);
+ return;
+ }
}
(*err)->ints[which] = slot;
(*err)->arena[slot] = value;
@@ -234,10 +242,16 @@ static void internal_set_int(grpc_error **err, grpc_error_ints which,
static void internal_set_str(grpc_error **err, grpc_error_strs which,
grpc_slice value) {
- // GPR_ASSERT((*err)->strs[which] == UINT8_MAX); // TODO, enforce this
uint8_t slot = (*err)->strs[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
+ if (slot == UINT8_MAX) {
+ const char *str = grpc_slice_to_c_string(value);
+ gpr_log(GPR_ERROR, "Error %p is full, dropping string {\"%s\":\"%s\"}",
+ *err, error_str_name(which), str);
+ gpr_free((void *)str);
+ return;
+ }
} else {
unref_slice(*(grpc_slice *)((*err)->arena + slot));
}
@@ -245,12 +259,19 @@ static void internal_set_str(grpc_error **err, grpc_error_strs which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
+static char *fmt_time(gpr_timespec tm);
static void internal_set_time(grpc_error **err, grpc_error_times which,
gpr_timespec value) {
- // GPR_ASSERT((*err)->times[which] == UINT8_MAX); // TODO, enforce this
uint8_t slot = (*err)->times[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
+ if (slot == UINT8_MAX) {
+ const char *time_str = fmt_time(value);
+ gpr_log(GPR_ERROR, "Error %p is full, dropping \"%s\":\"%s\"}", *err,
+ error_time_name(which), time_str);
+ gpr_free((void *)time_str);
+ return;
+ }
}
(*err)->times[which] = slot;
memcpy((*err)->arena + slot, &value, sizeof(value));
@@ -259,6 +280,12 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
static void internal_add_error(grpc_error **err, grpc_error *new) {
grpc_linked_error new_last = {new, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
+ if (slot == UINT8_MAX) {
+ gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err, new,
+ grpc_error_string(new));
+ GRPC_ERROR_UNREF(new);
+ return;
+ }
if ((*err)->first_err == UINT8_MAX) {
GPR_ASSERT((*err)->last_err == UINT8_MAX);
(*err)->last_err = slot;
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index f6372c0f3f..4a0f91391f 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -56,6 +56,8 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/lockfree_event.h"
+#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@@ -140,52 +142,11 @@ struct grpc_fd {
Ref/Unref by two to avoid altering the orphaned bit */
gpr_atm refst;
- /* Internally stores data of type (grpc_error *). If the FD is shutdown, this
- contains reason for shutdown (i.e a pointer to grpc_error) ORed with
- FD_SHUTDOWN_BIT. Since address allocations are word-aligned, the lower bit
- of (grpc_error *) addresses is guaranteed to be zero. Even if the
- (grpc_error *), is of special types like GRPC_ERROR_NONE, GRPC_ERROR_OOM
- etc, the lower bit is guaranteed to be zero.
-
- Once an fd is shutdown, any pending or future read/write closures on the
- fd should fail */
- gpr_atm shutdown_error;
-
/* The fd is either closed or we relinquished control of it. In either
cases, this indicates that the 'fd' on this structure is no longer
valid */
bool orphaned;
- /* Closures to call when the fd is readable or writable respectively. These
- fields contain one of the following values:
- CLOSURE_READY : The fd has an I/O event of interest but there is no
- closure yet to execute
-
- CLOSURE_NOT_READY : The fd has no I/O event of interest
-
- closure ptr : The closure to be executed when the fd has an I/O
- event of interest
-
- shutdown_error | FD_SHUTDOWN_BIT :
- 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
- This indicates that the fd is shutdown. Since all
- memory allocations are word-aligned, the lower two
- bits of the shutdown_error pointer are always 0. So
- it is safe to OR these with FD_SHUTDOWN_BIT
-
- Valid state transitions:
-
- <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
- | | ^ | ^ | |
- | | | | | | |
- | +--------------4----------+ 6 +---------2---------------+ |
- | | |
- | v |
- +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
-
- For 1, 4 : See set_ready() function
- For 2, 3 : See notify_on() function
- For 5,6,7: See set_shutdown() function */
gpr_atm read_closure;
gpr_atm write_closure;
@@ -217,11 +178,6 @@ static void fd_unref(grpc_fd *fd);
static void fd_global_init(void);
static void fd_global_shutdown(void);
-#define CLOSURE_NOT_READY ((gpr_atm)0)
-#define CLOSURE_READY ((gpr_atm)2)
-
-#define FD_SHUTDOWN_BIT 1
-
/*******************************************************************************
* Polling island Declarations
*/
@@ -948,10 +904,8 @@ static void unref_by(grpc_fd *fd, int n) {
fd_freelist = fd;
grpc_iomgr_unregister_object(&fd->iomgr_object);
- grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
- /* Clear the least significant bit if it set (in case fd was shutdown) */
- err = (grpc_error *)((intptr_t)err & ~FD_SHUTDOWN_BIT);
- GRPC_ERROR_UNREF(err);
+ grpc_lfev_destroy(&fd->read_closure);
+ grpc_lfev_destroy(&fd->write_closure);
gpr_mu_unlock(&fd_freelist_mu);
} else {
@@ -1015,10 +969,9 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
new_fd->fd = fd;
- gpr_atm_no_barrier_store(&new_fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE);
new_fd->orphaned = false;
- gpr_atm_no_barrier_store(&new_fd->read_closure, CLOSURE_NOT_READY);
- gpr_atm_no_barrier_store(&new_fd->write_closure, CLOSURE_NOT_READY);
+ grpc_lfev_init(&new_fd->read_closure);
+ grpc_lfev_init(&new_fd->write_closure);
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
new_fd->freelist_next = NULL;
@@ -1104,153 +1057,6 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
GRPC_ERROR_UNREF(error);
}
-static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
- grpc_closure *closure) {
- while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- switch (curr) {
- case CLOSURE_NOT_READY: {
- /* CLOSURE_NOT_READY -> <closure>.
-
- We're guaranteed by API that there's an acquire barrier before here,
- so there's no need to double-dip and this can be a release-only.
-
- The release itself pairs with the acquire half of a set_ready full
- barrier. */
- if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
- return; /* Successful. Return */
- }
-
- break; /* retry */
- }
-
- case CLOSURE_READY: {
- /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
- successful. If not, the state most likely transitioned to shutdown.
- We should retry.
-
- This can be a no-barrier cas since the state is being transitioned to
- CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
- closure when transitioning out of CLOSURE_NO_READY state (i.e there
- is no other code that needs to 'happen-after' this) */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
- return; /* Successful. Return */
- }
-
- break; /* retry */
- }
-
- default: {
- /* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
- contains a pointer to the shutdown-error). If the fd is shutdown,
- schedule the closure with the shutdown error */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
- grpc_closure_sched(exec_ctx, closure,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "FD Shutdown", &shutdown_err, 1));
- return;
- }
-
- /* There is already a closure!. This indicates a bug in the code */
- gpr_log(GPR_ERROR,
- "notify_on called with a previous callback still pending");
- abort();
- }
- }
- }
-
- GPR_UNREACHABLE_CODE(return );
-}
-
-static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
- grpc_error *shutdown_err) {
- gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
-
- while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
- switch (curr) {
- case CLOSURE_READY:
- case CLOSURE_NOT_READY:
- /* Need a full barrier here so that the initial load in notify_on
- doesn't need a barrier */
- if (gpr_atm_full_cas(state, curr, new_state)) {
- return; /* early out */
- }
- break; /* retry */
-
- default: {
- /* 'curr' is either a closure or the fd is already shutdown */
-
- /* If fd is already shutdown, we are done */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- return;
- }
-
- /* Fd is not shutdown. Schedule the closure and move the state to
- shutdown state.
- Needs an acquire to pair with setting the closure (and get a
- happens-after on that edge), and a release to pair with anything
- loading the shutdown state. */
- if (gpr_atm_full_cas(state, curr, new_state)) {
- grpc_closure_sched(exec_ctx, (grpc_closure *)curr,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "FD Shutdown", &shutdown_err, 1));
- return;
- }
-
- /* 'curr' was a closure but now changed to a different state. We will
- have to retry */
- break;
- }
- }
- }
-
- GPR_UNREACHABLE_CODE(return );
-}
-
-static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state) {
- while (true) {
- gpr_atm curr = gpr_atm_no_barrier_load(state);
-
- switch (curr) {
- case CLOSURE_READY: {
- /* Already ready. We are done here */
- return;
- }
-
- case CLOSURE_NOT_READY: {
- /* No barrier required as we're transitioning to a state that does not
- involve a closure */
- if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
- return; /* early out */
- }
- break; /* retry */
- }
-
- default: {
- /* 'curr' is either a closure or the fd is shutdown */
- if ((curr & FD_SHUTDOWN_BIT) > 0) {
- /* The fd is shutdown. Do nothing */
- return;
- }
- /* Full cas: acquire pairs with this cas' release in the event of a
- spurious set_ready; release pairs with this or the acquire in
- notify_on (or set_shutdown) */
- else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
- grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
- return;
- }
- /* else the state changed again (only possible by either a racing
- set_ready or set_shutdown functions. In both these cases, the closure
- would have been scheduled for execution. So we are done here */
- return;
- }
- }
- }
-}
-
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
grpc_fd *fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
@@ -1258,33 +1064,27 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
}
static bool fd_is_shutdown(grpc_fd *fd) {
- grpc_error *err = (grpc_error *)gpr_atm_acq_load(&fd->shutdown_error);
- return (((intptr_t)err & FD_SHUTDOWN_BIT) > 0);
+ return grpc_lfev_is_shutdown(&fd->read_closure);
}
/* Might be called multiple times */
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- /* Store the shutdown error ORed with FD_SHUTDOWN_BIT in fd->shutdown_error */
- if (gpr_atm_rel_cas(&fd->shutdown_error, (gpr_atm)GRPC_ERROR_NONE,
- (gpr_atm)why | FD_SHUTDOWN_BIT)) {
+ if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
+ GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
-
- set_shutdown(exec_ctx, fd, &fd->read_closure, why);
- set_shutdown(exec_ctx, fd, &fd->write_closure, why);
- } else {
- /* Shutdown already called */
- GRPC_ERROR_UNREF(why);
+ grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
}
+ GRPC_ERROR_UNREF(why);
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- notify_on(exec_ctx, fd, &fd->read_closure, closure);
+ grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
- notify_on(exec_ctx, fd, &fd->write_closure, closure);
+ grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
}
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
@@ -1332,7 +1132,7 @@ static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
GRPC_POLLING_TRACE(
"pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
- (void *)worker, worker->pt_id);
+ (void *)worker, (long int)worker->pt_id);
int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
if (err_num != 0) {
err = GRPC_OS_ERROR(err_num, "pthread_kill");
@@ -1467,13 +1267,14 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
return 0;
}
timeout = gpr_time_sub(deadline, now);
- return gpr_time_to_millis(gpr_time_add(
+ int millis = gpr_time_to_millis(gpr_time_add(
timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+ return millis >= 1 ? millis : 1;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
- set_ready(exec_ctx, fd, &fd->read_closure);
+ grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1485,7 +1286,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- set_ready(exec_ctx, fd, &fd->write_closure);
+ grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@@ -1650,6 +1451,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
for (int i = 0; i < ep_rv; ++i) {
void *data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &global_wakeup_fd) {
+ grpc_timer_consume_kick();
append_error(error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else if (data_ptr == &pi->workqueue_wakeup_fd) {
@@ -1714,7 +1516,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.pt_id = pthread_self();
gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0);
- *worker_hdl = &worker;
+ if (worker_hdl) *worker_hdl = &worker;
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
@@ -1792,7 +1594,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_lock(&pollset->po.mu);
}
- *worker_hdl = NULL;
+ if (worker_hdl) *worker_hdl = NULL;
gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index ca6e855611..9834cdd197 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -52,6 +52,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
@@ -870,7 +871,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
grpc_pollset_worker worker;
- *worker_hdl = &worker;
+ if (worker_hdl) *worker_hdl = &worker;
grpc_error *error = GRPC_ERROR_NONE;
/* Avoid malloc for small number of elements. */
@@ -1006,6 +1007,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
+ grpc_timer_consume_kick();
work_combine_error(&error,
grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd));
}
@@ -1090,7 +1092,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_lock(&pollset->mu);
}
}
- *worker_hdl = NULL;
+ if (worker_hdl) *worker_hdl = NULL;
GPR_TIMER_END("pollset_work", 0);
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
return error;
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index b5be5504b9..13409a4de8 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -111,6 +111,12 @@ static void try_engine(const char *engine) {
}
}
+/* This should be used for testing purposes ONLY */
+void grpc_set_event_engine_test_only(
+ const grpc_event_engine_vtable *ev_engine) {
+ g_event_engine = ev_engine;
+}
+
/* Call this only after calling grpc_event_engine_init() */
const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 1a9e5c115a..becc4d359e 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -183,4 +183,7 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
+/* This should be used for testing purposes ONLY */
+void grpc_set_event_engine_test_only(const grpc_event_engine_vtable *);
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */
diff --git a/src/core/lib/iomgr/lockfree_event.c b/src/core/lib/iomgr/lockfree_event.c
new file mode 100644
index 0000000000..17e3bbf727
--- /dev/null
+++ b/src/core/lib/iomgr/lockfree_event.c
@@ -0,0 +1,238 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/lockfree_event.h"
+
+#include <grpc/support/log.h>
+
+/* 'state' holds the to call when the fd is readable or writable respectively.
+ It can contain one of the following values:
+ CLOSURE_READY : The fd has an I/O event of interest but there is no
+ closure yet to execute
+
+ CLOSURE_NOT_READY : The fd has no I/O event of interest
+
+ closure ptr : The closure to be executed when the fd has an I/O
+ event of interest
+
+ shutdown_error | FD_SHUTDOWN_BIT :
+ 'shutdown_error' field ORed with FD_SHUTDOWN_BIT.
+ This indicates that the fd is shutdown. Since all
+ memory allocations are word-aligned, the lower two
+ bits of the shutdown_error pointer are always 0. So
+ it is safe to OR these with FD_SHUTDOWN_BIT
+
+ Valid state transitions:
+
+ <closure ptr> <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY
+ | | ^ | ^ | |
+ | | | | | | |
+ | +--------------4----------+ 6 +---------2---------------+ |
+ | | |
+ | v |
+ +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+
+
+ For 1, 4 : See grpc_lfev_set_ready() function
+ For 2, 3 : See grpc_lfev_notify_on() function
+ For 5,6,7: See grpc_lfev_set_shutdown() function */
+
+#define CLOSURE_NOT_READY ((gpr_atm)0)
+#define CLOSURE_READY ((gpr_atm)2)
+
+#define FD_SHUTDOWN_BIT ((gpr_atm)1)
+
+void grpc_lfev_init(gpr_atm *state) {
+ gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY);
+}
+
+void grpc_lfev_destroy(gpr_atm *state) {
+ gpr_atm curr = gpr_atm_no_barrier_load(state);
+ if (curr & FD_SHUTDOWN_BIT) {
+ GRPC_ERROR_UNREF((grpc_error *)(curr & ~FD_SHUTDOWN_BIT));
+ } else {
+ GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY);
+ }
+}
+
+bool grpc_lfev_is_shutdown(gpr_atm *state) {
+ gpr_atm curr = gpr_atm_no_barrier_load(state);
+ return (curr & FD_SHUTDOWN_BIT) != 0;
+}
+
+void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
+ grpc_closure *closure) {
+ while (true) {
+ gpr_atm curr = gpr_atm_no_barrier_load(state);
+ switch (curr) {
+ case CLOSURE_NOT_READY: {
+ /* CLOSURE_NOT_READY -> <closure>.
+
+ We're guaranteed by API that there's an acquire barrier before here,
+ so there's no need to double-dip and this can be a release-only.
+
+ The release itself pairs with the acquire half of a set_ready full
+ barrier. */
+ if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) {
+ return; /* Successful. Return */
+ }
+
+ break; /* retry */
+ }
+
+ case CLOSURE_READY: {
+ /* Change the state to CLOSURE_NOT_READY. Schedule the closure if
+ successful. If not, the state most likely transitioned to shutdown.
+ We should retry.
+
+ This can be a no-barrier cas since the state is being transitioned to
+ CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any
+ closure when transitioning out of CLOSURE_NO_READY state (i.e there
+ is no other code that needs to 'happen-after' this) */
+ if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) {
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
+ return; /* Successful. Return */
+ }
+
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown(in which case 'curr'
+ contains a pointer to the shutdown-error). If the fd is shutdown,
+ schedule the closure with the shutdown error */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "FD Shutdown", &shutdown_err, 1));
+ return;
+ }
+
+ /* There is already a closure!. This indicates a bug in the code */
+ gpr_log(GPR_ERROR,
+ "notify_on called with a previous callback still pending");
+ abort();
+ }
+ }
+ }
+
+ GPR_UNREACHABLE_CODE(return );
+}
+
+bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
+ grpc_error *shutdown_err) {
+ gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
+
+ while (true) {
+ gpr_atm curr = gpr_atm_no_barrier_load(state);
+ switch (curr) {
+ case CLOSURE_READY:
+ case CLOSURE_NOT_READY:
+ /* Need a full barrier here so that the initial load in notify_on
+ doesn't need a barrier */
+ if (gpr_atm_full_cas(state, curr, new_state)) {
+ return true; /* early out */
+ }
+ break; /* retry */
+
+ default: {
+ /* 'curr' is either a closure or the fd is already shutdown */
+
+ /* If fd is already shutdown, we are done */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ GRPC_ERROR_UNREF(shutdown_err);
+ return false;
+ }
+
+ /* Fd is not shutdown. Schedule the closure and move the state to
+ shutdown state.
+ Needs an acquire to pair with setting the closure (and get a
+ happens-after on that edge), and a release to pair with anything
+ loading the shutdown state. */
+ if (gpr_atm_full_cas(state, curr, new_state)) {
+ grpc_closure_sched(exec_ctx, (grpc_closure *)curr,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "FD Shutdown", &shutdown_err, 1));
+ return true;
+ }
+
+ /* 'curr' was a closure but now changed to a different state. We will
+ have to retry */
+ break;
+ }
+ }
+ }
+
+ GPR_UNREACHABLE_CODE(return false);
+}
+
+void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) {
+ while (true) {
+ gpr_atm curr = gpr_atm_no_barrier_load(state);
+
+ switch (curr) {
+ case CLOSURE_READY: {
+ /* Already ready. We are done here */
+ return;
+ }
+
+ case CLOSURE_NOT_READY: {
+ /* No barrier required as we're transitioning to a state that does not
+ involve a closure */
+ if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) {
+ return; /* early out */
+ }
+ break; /* retry */
+ }
+
+ default: {
+ /* 'curr' is either a closure or the fd is shutdown */
+ if ((curr & FD_SHUTDOWN_BIT) > 0) {
+ /* The fd is shutdown. Do nothing */
+ return;
+ }
+ /* Full cas: acquire pairs with this cas' release in the event of a
+ spurious set_ready; release pairs with this or the acquire in
+ notify_on (or set_shutdown) */
+ else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
+ grpc_closure_sched(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
+ return;
+ }
+ /* else the state changed again (only possible by either a racing
+ set_ready or set_shutdown functions. In both these cases, the closure
+ would have been scheduled for execution. So we are done here */
+ return;
+ }
+ }
+ }
+}
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
new file mode 100644
index 0000000000..1d9119204c
--- /dev/null
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H
+#define GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H
+
+/* Lock free event notification for file descriptors */
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+void grpc_lfev_init(gpr_atm *state);
+void grpc_lfev_destroy(gpr_atm *state);
+bool grpc_lfev_is_shutdown(gpr_atm *state);
+
+void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
+ grpc_closure *closure);
+/* Returns true on first successful shutdown */
+bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
+ grpc_error *shutdown_err);
+void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state);
+
+#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index e19ce697b8..9bf3cdac89 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -75,6 +75,10 @@ void grpc_pollset_destroy(grpc_pollset *pollset);
and it is guaranteed that it will not be released by grpc_pollset_work
AFTER worker has been destroyed.
+ It's legal for worker to be NULL: in that case, this specific thread can not
+ be directly woken with a kick, but maybe be indirectly (with a kick against
+ the pollset as a whole).
+
Tries not to block past deadline.
May call grpc_closure_list_run on grpc_closure_list, without holding the
pollset
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 17043c1ea1..04c6b71747 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -120,7 +120,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
grpc_pollset_worker worker;
- *worker_hdl = &worker;
+ if (worker_hdl) *worker_hdl = &worker;
int added_worker = 0;
worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
@@ -185,7 +185,7 @@ done:
remove_worker(&worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
}
gpr_cv_destroy(&worker.cv);
- *worker_hdl = NULL;
+ if (worker_hdl) *worker_hdl = NULL;
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index 94a454c0b7..269dc35003 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -85,6 +85,10 @@
#define GRPC_LINUX_SOCKETUTILS 1
#endif
#endif
+#ifndef __GLIBC__
+#define GRPC_LINUX_EPOLL 1
+#define GRPC_LINUX_EVENTFD 1
+#endif
#ifndef GRPC_LINUX_EVENTFD
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
#endif
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 8dcd80d001..c3ee878651 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -142,6 +142,8 @@ struct grpc_resource_quota {
/* Amount of free memory in the resource quota */
int64_t free_pool;
+ gpr_atm last_size;
+
/* Has rq_step been scheduled to occur? */
bool step_scheduled;
/* Are we currently reclaiming memory */
@@ -581,6 +583,7 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
resource_quota->combiner = grpc_combiner_create(NULL);
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
+ gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
resource_quota->step_scheduled = false;
resource_quota->reclaiming = false;
gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0);
@@ -643,11 +646,17 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
rq_resize_args *a = gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
+ gpr_atm_no_barrier_store(&resource_quota->last_size,
+ (gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
+size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota) {
+ return (size_t)gpr_atm_no_barrier_load(&resource_quota->last_size);
+}
+
/*******************************************************************************
* grpc_resource_user channel args api
*/
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index b9f62cbf83..6f99be0d51 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -90,6 +90,8 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
double grpc_resource_quota_get_memory_pressure(
grpc_resource_quota *resource_quota);
+size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota);
+
typedef struct grpc_resource_user grpc_resource_user;
grpc_resource_user *grpc_resource_user_create(
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index 0485661316..bc367bdfa5 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -40,10 +40,6 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
-/* Channel arg (integer) setting how large a slice to try and read from the wire
- each time recvmsg (or equivalent) is called */
-#define GRPC_ARG_TCP_READ_CHUNK_SIZE "grpc.experimental.tcp_read_chunk_size"
-
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure).
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index a108b10da6..a2692707d9 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -137,29 +137,7 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
grpc_endpoint *grpc_tcp_client_create_from_fd(
grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
const char *addr_str) {
- size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
- grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
- if (channel_args != NULL) {
- for (size_t i = 0; i < channel_args->num_args; i++) {
- if (0 ==
- strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
- grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
- 8 * 1024 * 1024};
- tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
- &channel_args->args[i], options);
- } else if (0 ==
- strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
- resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
- }
- }
- }
-
- grpc_endpoint *ep =
- grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
- grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
- return ep;
+ return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str);
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index a356564766..d6baca50ba 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -63,7 +63,7 @@ typedef struct {
int refs;
grpc_closure on_connect;
grpc_endpoint **endpoint;
- grpc_resource_quota *resource_quota;
+ grpc_channel_args *channel_args;
} async_connect;
static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
@@ -72,7 +72,7 @@ static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
- grpc_resource_quota_unref_internal(exec_ctx, ac->resource_quota);
+ grpc_channel_args_destroy(exec_ctx, ac->channel_args);
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac);
@@ -119,7 +119,8 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (!wsa_success) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
} else {
- *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
+ *ep =
+ grpc_tcp_create(exec_ctx, socket, ac->channel_args, ac->addr_name);
socket = NULL;
}
} else {
@@ -152,17 +153,6 @@ static void tcp_client_connect_impl(
grpc_winsocket_callback_info *info;
grpc_error *error = GRPC_ERROR_NONE;
- grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
- if (channel_args != NULL) {
- for (size_t i = 0; i < channel_args->num_args; i++) {
- if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
- grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
- resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
- }
- }
- }
-
*endpoint = NULL;
/* Use dualstack sockets where available. */
@@ -225,7 +215,7 @@ static void tcp_client_connect_impl(
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
- ac->resource_quota = resource_quota;
+ ac->channel_args = grpc_channel_args_copy(channel_args);
grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
grpc_closure_init(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
@@ -247,7 +237,6 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
grpc_closure_sched(exec_ctx, on_done, final_error);
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 4d7cf3ff51..5f4b38de2b 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -52,7 +52,9 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/profiling/timers.h"
@@ -80,10 +82,14 @@ typedef struct {
int fd;
bool finished_edge;
msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
- size_t slice_size;
+ double target_length;
+ double bytes_read_this_round;
gpr_refcount refcount;
gpr_atm shutdown_count;
+ int min_read_chunk_size;
+ int max_read_chunk_size;
+
/* garbage after the last read */
grpc_slice_buffer last_read_buffer;
@@ -108,6 +114,42 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
+ tcp->bytes_read_this_round += (double)bytes;
+}
+
+static void finish_estimate(grpc_tcp *tcp) {
+ /* If we read >80% of the target buffer in one read loop, increase the size
+ of the target buffer to either the amount read, or twice its previous
+ value */
+ if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
+ tcp->target_length =
+ GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round);
+ } else {
+ tcp->target_length =
+ 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
+ }
+ tcp->bytes_read_this_round = 0;
+}
+
+static size_t get_target_read_size(grpc_tcp *tcp) {
+ grpc_resource_quota *rq = grpc_resource_user_quota(tcp->resource_user);
+ double pressure = grpc_resource_quota_get_memory_pressure(rq);
+ double target =
+ tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
+ size_t sz = (((size_t)GPR_CLAMP(target, tcp->min_read_chunk_size,
+ tcp->max_read_chunk_size)) +
+ 255) &
+ ~(size_t)255;
+ /* don't use more than 1/16th of the overall resource quota for a single read
+ * alloc */
+ size_t rqmax = grpc_resource_quota_peek_size(rq);
+ if (sz > rqmax / 16 && rqmax > 1024) {
+ sz = rqmax / 16;
+ }
+ return sz;
+}
+
static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) {
return grpc_error_set_str(
grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
@@ -232,9 +274,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
/* NB: After calling call_read_cb a parallel call of the read handler may
* be running. */
if (errno == EAGAIN) {
- if (tcp->iov_size > 1) {
- tcp->iov_size /= 2;
- }
+ finish_estimate(tcp);
/* We've consumed the edge, request a new one */
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
@@ -253,14 +293,13 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
+ add_to_estimate(tcp, (size_t)read_bytes);
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
grpc_slice_buffer_trim_end(
tcp->incoming_buffer,
tcp->incoming_buffer->length - (size_t)read_bytes,
&tcp->last_read_buffer);
- } else if (tcp->iov_size < MAX_READ_IOVEC) {
- ++tcp->iov_size;
}
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE);
@@ -285,11 +324,11 @@ static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
}
static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- if (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
- grpc_resource_user_alloc_slices(
- exec_ctx, &tcp->slice_allocator, tcp->slice_size,
- (size_t)tcp->iov_size - tcp->incoming_buffer->count,
- tcp->incoming_buffer);
+ size_t target_read_size = get_target_read_size(tcp);
+ if (tcp->incoming_buffer->length < target_read_size &&
+ tcp->incoming_buffer->count < MAX_READ_IOVEC) {
+ grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
+ target_read_size, 1, tcp->incoming_buffer);
} else {
tcp_do_read(exec_ctx, tcp);
}
@@ -540,9 +579,50 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_get_peer,
tcp_get_fd};
-grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
- grpc_resource_quota *resource_quota,
- size_t slice_size, const char *peer_string) {
+#define MAX_CHUNK_SIZE 32 * 1024 * 1024
+
+grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
+ const grpc_channel_args *channel_args,
+ const char *peer_string) {
+ int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
+ int tcp_max_read_chunk_size = 4 * 1024 * 1024;
+ int tcp_min_read_chunk_size = 256;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ MAX_CHUNK_SIZE};
+ tcp_read_chunk_size =
+ grpc_channel_arg_get_integer(&channel_args->args[i], options);
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ MAX_CHUNK_SIZE};
+ tcp_min_read_chunk_size =
+ grpc_channel_arg_get_integer(&channel_args->args[i], options);
+ } else if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ MAX_CHUNK_SIZE};
+ tcp_max_read_chunk_size =
+ grpc_channel_arg_get_integer(&channel_args->args[i], options);
+ } else if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_ref_internal(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
+ if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) {
+ tcp_min_read_chunk_size = tcp_max_read_chunk_size;
+ }
+ tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
+ tcp_max_read_chunk_size);
+
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
@@ -552,7 +632,10 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
tcp->release_fd_cb = NULL;
tcp->release_fd = NULL;
tcp->incoming_buffer = NULL;
- tcp->slice_size = slice_size;
+ tcp->target_length = (double)tcp_read_chunk_size;
+ tcp->min_read_chunk_size = tcp_min_read_chunk_size;
+ tcp->max_read_chunk_size = tcp_max_read_chunk_size;
+ tcp->bytes_read_this_round = 0;
tcp->iov_size = 1;
tcp->finished_edge = true;
/* paired with unref in grpc_tcp_destroy */
@@ -569,6 +652,7 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
&tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
return &tcp->base;
}
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 1c0d13f96e..1ad5788331 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -47,14 +47,13 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
-#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
-
extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_fd *fd, grpc_resource_quota *resource_quota,
- size_t read_slice_size, const char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ const grpc_channel_args *args,
+ const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index d6a017cf7f..e66ffc9b1c 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -59,6 +59,7 @@
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -90,7 +91,6 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s = gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
- s->resource_quota = grpc_resource_quota_create(NULL);
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
@@ -98,27 +98,14 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
s->so_reuseport =
has_so_reuseport && (args->args[i].value.integer != 0);
} else {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer");
}
- } else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
- if (args->args[i].type == GRPC_ARG_POINTER) {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
- s->resource_quota =
- grpc_resource_quota_ref_internal(args->args[i].value.pointer.p);
- } else {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
- gpr_free(s);
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
- }
} else if (0 == strcmp(GRPC_ARG_EXPAND_WILDCARD_ADDRS, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) {
s->expand_wildcard_addrs = (args->args[i].value.integer != 0);
} else {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
GRPC_ARG_EXPAND_WILDCARD_ADDRS " must be an integer");
@@ -138,6 +125,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
s->head = NULL;
s->tail = NULL;
s->nports = 0;
+ s->channel_args = grpc_channel_args_copy(args);
gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0);
*server = s;
return GRPC_ERROR_NONE;
@@ -158,8 +146,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
s->head = sp->next;
gpr_free(sp);
}
-
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
+ grpc_channel_args_destroy(exec_ctx, s->channel_args);
gpr_free(s);
}
@@ -286,8 +273,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg,
- grpc_tcp_create(fdobj, sp->server->resource_quota,
- GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
+ grpc_tcp_create(exec_ctx, fdobj, sp->server->channel_args, addr_str),
read_notifier_pollset, acceptor);
gpr_free(name);
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h
index f5dc8532f9..c15e2e1493 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix.h
+++ b/src/core/lib/iomgr/tcp_server_utils_posix.h
@@ -103,7 +103,8 @@ struct grpc_tcp_server {
/* next pollset to assign a channel to */
gpr_atm next_pollset_to_assign;
- grpc_resource_quota *resource_quota;
+ /* channel args for this server */
+ grpc_channel_args *channel_args;
};
/* If successful, add a listener to \a s for \a addr, set \a dsmode for the
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index 12ce7d3fdd..4c17f08918 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -46,6 +46,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/pollset_windows.h"
#include "src/core/lib/iomgr/resolve_address.h"
@@ -102,7 +103,7 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
- grpc_resource_quota *resource_quota;
+ grpc_channel_args *channel_args;
};
/* Public function. Allocates the proper data structures to hold a
@@ -112,21 +113,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
- s->resource_quota = grpc_resource_quota_create(NULL);
- for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
- if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
- if (args->args[i].type == GRPC_ARG_POINTER) {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
- s->resource_quota =
- grpc_resource_quota_ref_internal(args->args[i].value.pointer.p);
- } else {
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
- gpr_free(s);
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
- }
- }
- }
+ s->channel_args = grpc_channel_args_copy(args);
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
s->active_ports = 0;
@@ -155,7 +142,7 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
grpc_winsocket_destroy(sp->socket);
gpr_free(sp);
}
- grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
+ grpc_channel_args_destroy(exec_ctx, s->channel_args);
gpr_free(s);
}
@@ -383,8 +370,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_free(utf8_message);
}
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
- ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
- sp->server->resource_quota, peer_name_string);
+ ep = grpc_tcp_create(exec_ctx, grpc_winsocket_create(sock, fd_name),
+ sp->server->channel_args, peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} else {
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 9134883226..f74aa68793 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -430,9 +430,19 @@ static grpc_endpoint_vtable vtable = {win_read,
win_get_peer,
win_get_fd};
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
- grpc_resource_quota *resource_quota,
+grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
+ grpc_channel_args *channel_args,
char *peer_string) {
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_ref_internal(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 4402de1c38..abafdb22d2 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -50,8 +50,8 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
- grpc_resource_quota *resource_quota,
+grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
+ grpc_channel_args *channel_args,
char *peer_string);
grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index d84a278b18..e0338f93c7 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -101,6 +101,9 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
void grpc_timer_list_init(gpr_timespec now);
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx);
+/* Consume a kick issued by grpc_kick_poller */
+void grpc_timer_consume_kick(void);
+
/* the following must be implemented by each iomgr implementation */
void grpc_kick_poller(void);
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index e53c801929..d8e6068431 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -37,9 +37,13 @@
#include "src/core/lib/iomgr/timer.h"
+#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
+#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/time_averaged_stats.h"
#include "src/core/lib/iomgr/timer_heap.h"
#include "src/core/lib/support/spinlock.h"
@@ -52,12 +56,15 @@
#define MIN_QUEUE_WINDOW_DURATION 0.01
#define MAX_QUEUE_WINDOW_DURATION 1
+int grpc_timer_trace = 0;
+int grpc_timer_check_trace = 0;
+
typedef struct {
gpr_mu mu;
grpc_time_averaged_stats stats;
/* All and only timers with deadlines <= this will be in the heap. */
- gpr_timespec queue_deadline_cap;
- gpr_timespec min_deadline;
+ gpr_atm queue_deadline_cap;
+ gpr_atm min_deadline;
/* Index in the g_shard_queue */
uint32_t shard_queue_index;
/* This holds all timers with deadlines < queue_deadline_cap. Timers in this
@@ -67,38 +74,92 @@ typedef struct {
grpc_timer list;
} shard_type;
-/* Protects g_shard_queue */
-static gpr_mu g_mu;
-/* Allow only one run_some_expired_timers at once */
-static gpr_spinlock g_checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER;
+struct shared_mutables {
+ gpr_atm min_timer;
+ /* Allow only one run_some_expired_timers at once */
+ gpr_spinlock checker_mu;
+ bool initialized;
+ /* Protects g_shard_queue */
+ gpr_mu mu;
+} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
+
+static struct shared_mutables g_shared_mutables = {
+ .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
+};
static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
-/* Protected by g_mu */
+/* Protected by g_shared_mutables.mu */
static shard_type *g_shard_queue[NUM_SHARDS];
-static bool g_initialized = false;
+static gpr_timespec g_start_time;
+
+GPR_TLS_DECL(g_last_seen_min_timer);
+
+static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
+ if (a > GPR_ATM_MAX - b) {
+ return GPR_ATM_MAX;
+ }
+ return a + b;
+}
+
+static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
+ gpr_atm *next, grpc_error *error);
+
+static gpr_timespec dbl_to_ts(double d) {
+ gpr_timespec ts;
+ ts.tv_sec = (int64_t)d;
+ ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
+ ts.clock_type = GPR_TIMESPAN;
+ return ts;
+}
+
+static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time);
+ double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
+ (double)ts.tv_nsec / GPR_NS_PER_MS +
+ (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
+
+static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time);
+ double x =
+ GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
-static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
- gpr_timespec *next, grpc_error *error);
+static gpr_timespec atm_to_timespec(gpr_atm x) {
+ return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
+}
-static gpr_timespec compute_min_deadline(shard_type *shard) {
+static gpr_atm compute_min_deadline(shard_type *shard) {
return grpc_timer_heap_is_empty(&shard->heap)
- ? shard->queue_deadline_cap
+ ? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
}
void grpc_timer_list_init(gpr_timespec now) {
uint32_t i;
- g_initialized = true;
- gpr_mu_init(&g_mu);
+ g_shared_mutables.initialized = true;
+ gpr_mu_init(&g_shared_mutables.mu);
g_clock_type = now.clock_type;
+ g_start_time = now;
+ g_shared_mutables.min_timer = timespec_to_atm_round_down(now);
+ gpr_tls_init(&g_last_seen_min_timer);
+ gpr_tls_set(&g_last_seen_min_timer, 0);
+ grpc_register_tracer("timer", &grpc_timer_trace);
+ grpc_register_tracer("timer_check", &grpc_timer_check_trace);
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
gpr_mu_init(&shard->mu);
grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
0.5);
- shard->queue_deadline_cap = now;
+ shard->queue_deadline_cap = g_shared_mutables.min_timer;
shard->shard_queue_index = i;
grpc_timer_heap_init(&shard->heap);
shard->list.next = shard->list.prev = &shard->list;
@@ -110,29 +171,23 @@ void grpc_timer_list_init(gpr_timespec now) {
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
int i;
run_some_expired_timers(
- exec_ctx, gpr_inf_future(g_clock_type), NULL,
+ exec_ctx, GPR_ATM_MAX, NULL,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
grpc_timer_heap_destroy(&shard->heap);
}
- gpr_mu_destroy(&g_mu);
- g_initialized = false;
+ gpr_mu_destroy(&g_shared_mutables.mu);
+ gpr_tls_destroy(&g_last_seen_min_timer);
+ g_shared_mutables.initialized = false;
}
static double ts_to_dbl(gpr_timespec ts) {
return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
}
-static gpr_timespec dbl_to_ts(double d) {
- gpr_timespec ts;
- ts.tv_sec = (int64_t)d;
- ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
- ts.clock_type = GPR_TIMESPAN;
- return ts;
-}
-
+/* returns true if the first element in the list */
static void list_join(grpc_timer *head, grpc_timer *timer) {
timer->next = head;
timer->prev = head->prev;
@@ -158,15 +213,13 @@ static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
static void note_deadline_change(shard_type *shard) {
while (shard->shard_queue_index > 0 &&
- gpr_time_cmp(
- shard->min_deadline,
- g_shard_queue[shard->shard_queue_index - 1]->min_deadline) < 0) {
+ shard->min_deadline <
+ g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
}
while (shard->shard_queue_index < NUM_SHARDS - 1 &&
- gpr_time_cmp(
- shard->min_deadline,
- g_shard_queue[shard->shard_queue_index + 1]->min_deadline) > 0) {
+ shard->min_deadline >
+ g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
swap_adjacent_shards_in_queue(shard->shard_queue_index);
}
}
@@ -179,9 +232,17 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
- timer->deadline = deadline;
+ timer->deadline = timespec_to_atm_round_up(deadline);
+
+ if (grpc_timer_trace) {
+ gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
+ "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
+ timer, deadline.tv_sec, deadline.tv_nsec, timer->deadline,
+ now.tv_sec, now.tv_nsec, timespec_to_atm_round_down(now), closure,
+ closure->cb);
+ }
- if (!g_initialized) {
+ if (!g_shared_mutables.initialized) {
timer->pending = false;
grpc_closure_sched(exec_ctx, timer->closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -201,12 +262,18 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
grpc_time_averaged_stats_add_sample(&shard->stats,
ts_to_dbl(gpr_time_sub(deadline, now)));
- if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
+ if (timer->deadline < shard->queue_deadline_cap) {
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
timer->heap_index = INVALID_HEAP_INDEX;
list_join(&shard->list, timer);
}
+ if (grpc_timer_trace) {
+ gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
+ " => is_first_timer=%s",
+ (int)(shard - g_shards), shard->queue_deadline_cap,
+ is_first_timer ? "true" : "false");
+ }
gpr_mu_unlock(&shard->mu);
/* Deadline may have decreased, we need to adjust the master queue. Note
@@ -221,28 +288,41 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
In that case, the timer will simply have to wait for the next
grpc_timer_check. */
if (is_first_timer) {
- gpr_mu_lock(&g_mu);
- if (gpr_time_cmp(deadline, shard->min_deadline) < 0) {
- gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline;
- shard->min_deadline = deadline;
+ gpr_mu_lock(&g_shared_mutables.mu);
+ if (grpc_timer_trace) {
+ gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
+ shard->min_deadline);
+ }
+ if (timer->deadline < shard->min_deadline) {
+ gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
+ shard->min_deadline = timer->deadline;
note_deadline_change(shard);
- if (shard->shard_queue_index == 0 &&
- gpr_time_cmp(deadline, old_min_deadline) < 0) {
+ if (shard->shard_queue_index == 0 && timer->deadline < old_min_deadline) {
+ gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, timer->deadline);
grpc_kick_poller();
}
}
- gpr_mu_unlock(&g_mu);
+ gpr_mu_unlock(&g_shared_mutables.mu);
}
}
+void grpc_timer_consume_kick(void) {
+ /* force re-evaluation of last seeen min */
+ gpr_tls_set(&g_last_seen_min_timer, 0);
+}
+
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
- if (!g_initialized) {
+ if (!g_shared_mutables.initialized) {
/* must have already been cancelled, also the shard mutex is invalid */
return;
}
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
+ if (grpc_timer_trace) {
+ gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
+ timer->pending ? "true" : "false");
+ }
if (timer->pending) {
grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
@@ -260,7 +340,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
for timers that fall at or under it. Returns true if the queue is no
longer empty.
REQUIRES: shard->mu locked */
-static int refill_queue(shard_type *shard, gpr_timespec now) {
+static int refill_queue(shard_type *shard, gpr_atm now) {
/* Compute the new queue window width and bound by the limits: */
double computed_deadline_delta =
grpc_time_averaged_stats_update_average(&shard->stats) *
@@ -271,12 +351,22 @@ static int refill_queue(shard_type *shard, gpr_timespec now) {
grpc_timer *timer, *next;
/* Compute the new cap and put all timers under it into the queue: */
- shard->queue_deadline_cap = gpr_time_add(
- gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta));
+ shard->queue_deadline_cap =
+ saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
+ (gpr_atm)(deadline_delta * 1000.0));
+
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
+ (int)(shard - g_shards), shard->queue_deadline_cap);
+ }
for (timer = shard->list.next; timer != &shard->list; timer = next) {
next = timer->next;
- if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) {
+ if (timer->deadline < shard->queue_deadline_cap) {
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
+ timer->deadline);
+ }
list_remove(timer);
grpc_timer_heap_add(&shard->heap, timer);
}
@@ -287,15 +377,29 @@ static int refill_queue(shard_type *shard, gpr_timespec now) {
/* This pops the next non-cancelled timer with deadline <= now from the
queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
-static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
+static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
grpc_timer *timer;
for (;;) {
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
+ (int)(shard - g_shards),
+ grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
+ }
if (grpc_timer_heap_is_empty(&shard->heap)) {
- if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL;
+ if (now < shard->queue_deadline_cap) return NULL;
if (!refill_queue(shard, now)) return NULL;
}
timer = grpc_timer_heap_top(&shard->heap);
- if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG,
+ " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
+ timer->deadline, now);
+ }
+ if (timer->deadline > now) return NULL;
+ if (grpc_timer_trace) {
+ gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
+ now - timer->deadline);
+ }
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
@@ -304,7 +408,7 @@ static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
/* REQUIRES: shard->mu unlocked */
static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
- gpr_timespec now, gpr_timespec *new_min_deadline,
+ gpr_atm now, gpr_atm *new_min_deadline,
grpc_error *error) {
size_t n = 0;
grpc_timer *timer;
@@ -318,17 +422,29 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
return n;
}
-static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
- gpr_timespec *next, grpc_error *error) {
+static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
+ gpr_atm *next, grpc_error *error) {
size_t n = 0;
- /* TODO(ctiller): verify that there are any timers (atomically) here */
+ gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
+ gpr_tls_set(&g_last_seen_min_timer, min_timer);
+ if (now < min_timer) {
+ if (next != NULL) *next = GPR_MIN(*next, min_timer);
+ return 0;
+ }
- if (gpr_spinlock_trylock(&g_checker_mu)) {
- gpr_mu_lock(&g_mu);
+ if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) {
+ gpr_mu_lock(&g_shared_mutables.mu);
- while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
- gpr_timespec new_min_deadline;
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
+ (int)(g_shard_queue[0] - g_shards),
+ g_shard_queue[0]->min_deadline);
+ }
+
+ while (g_shard_queue[0]->min_deadline < now ||
+ (now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) {
+ gpr_atm new_min_deadline;
/* For efficiency, we pop as many available timers as we can from the
shard. This may violate perfect timer deadline ordering, but that
@@ -336,6 +452,14 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
n +=
pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, error);
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG, " .. popped --> %" PRIdPTR
+ ", shard[%d]->min_deadline %" PRIdPTR
+ " --> %" PRIdPTR ", now=%" PRIdPTR,
+ n, (int)(g_shard_queue[0] - g_shards),
+ g_shard_queue[0]->min_deadline, new_min_deadline, now);
+ }
+
/* An grpc_timer_init() on the shard could intervene here, adding a new
timer that is earlier than new_min_deadline. However,
grpc_timer_init() will block on the master_lock before it can call
@@ -346,23 +470,24 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
}
if (next) {
- *next = gpr_time_min(*next, g_shard_queue[0]->min_deadline);
+ *next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
}
- gpr_mu_unlock(&g_mu);
- gpr_spinlock_unlock(&g_checker_mu);
+ gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
+ g_shard_queue[0]->min_deadline);
+ gpr_mu_unlock(&g_shared_mutables.mu);
+ gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
} else if (next != NULL) {
/* TODO(ctiller): this forces calling code to do an short poll, and
then retry the timer check (because this time through the timer list was
contended).
- We could reduce the cost here dramatically by keeping a count of how many
- currently active pollers got through the uncontended case above
+ We could reduce the cost here dramatically by keeping a count of how
+ many currently active pollers got through the uncontended case above
successfully, and waking up other pollers IFF that count drops to zero.
Once that count is in place, this entire else branch could disappear. */
- *next = gpr_time_min(
- *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN)));
+ *next = GPR_MIN(*next, now + 1);
}
GRPC_ERROR_UNREF(error);
@@ -372,12 +497,71 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
gpr_timespec *next) {
+ // prelude
GPR_ASSERT(now.clock_type == g_clock_type);
- return run_some_expired_timers(
- exec_ctx, now, next,
+ gpr_atm now_atm = timespec_to_atm_round_down(now);
+
+ /* fetch from a thread-local first: this avoids contention on a globally
+ mutable cacheline in the common case */
+ gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer);
+ if (now_atm < min_timer) {
+ if (next != NULL) {
+ *next =
+ atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
+ }
+ if (grpc_timer_check_trace) {
+ gpr_log(GPR_DEBUG,
+ "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
+ now_atm, min_timer);
+ }
+ return 0;
+ }
+
+ grpc_error *shutdown_error =
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"));
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
+
+ // tracing
+ if (grpc_timer_check_trace) {
+ char *next_str;
+ if (next == NULL) {
+ next_str = gpr_strdup("NULL");
+ } else {
+ gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
+ next->tv_nsec, timespec_to_atm_round_down(*next));
+ }
+ gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR
+ "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
+ now.tv_sec, now.tv_nsec, now_atm, next_str,
+ gpr_tls_get(&g_last_seen_min_timer),
+ gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
+ gpr_free(next_str);
+ }
+ // actual code
+ bool r;
+ gpr_atm next_atm;
+ if (next == NULL) {
+ r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error);
+ } else {
+ next_atm = timespec_to_atm_round_down(*next);
+ r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error);
+ *next = atm_to_timespec(next_atm);
+ }
+ // tracing
+ if (grpc_timer_check_trace) {
+ char *next_str;
+ if (next == NULL) {
+ next_str = gpr_strdup("NULL");
+ } else {
+ gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
+ next->tv_nsec, next_atm);
+ }
+ gpr_log(GPR_DEBUG, "TIMER CHECK END: %d timers triggered; next=%s", r,
+ next_str);
+ gpr_free(next_str);
+ }
+ return r > 0;
}
#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index 1608dce9fb..c79a431aa0 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -38,7 +38,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
struct grpc_timer {
- gpr_timespec deadline;
+ gpr_atm deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
bool pending;
struct grpc_timer *next;
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.c
index f736d335e6..03ccfe023a 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.c
@@ -50,7 +50,7 @@
static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) {
while (i > 0) {
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
- if (gpr_time_cmp(first[parent]->deadline, t->deadline) <= 0) break;
+ if (first[parent]->deadline <= t->deadline) break;
first[i] = first[parent];
first[i]->heap_index = i;
i = parent;
@@ -68,12 +68,12 @@ static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
uint32_t left_child = 1u + 2u * i;
if (left_child >= length) break;
uint32_t right_child = left_child + 1;
- uint32_t next_i = right_child < length &&
- gpr_time_cmp(first[left_child]->deadline,
- first[right_child]->deadline) > 0
- ? right_child
- : left_child;
- if (gpr_time_cmp(t->deadline, first[next_i]->deadline) <= 0) break;
+ uint32_t next_i =
+ right_child < length &&
+ first[left_child]->deadline > first[right_child]->deadline
+ ? right_child
+ : left_child;
+ if (t->deadline <= first[next_i]->deadline) break;
first[i] = first[next_i];
first[i]->heap_index = i;
i = next_i;
@@ -97,7 +97,7 @@ static void maybe_shrink(grpc_timer_heap *heap) {
static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) {
uint32_t i = timer->heap_index;
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
- if (gpr_time_cmp(heap->timers[parent]->deadline, timer->deadline) > 0) {
+ if (heap->timers[parent]->deadline > timer->deadline) {
adjust_upwards(heap->timers, i, timer);
} else {
adjust_downwards(heap->timers, i, heap->timer_count, timer);
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 510b79552a..89b8e3c0b3 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -71,7 +71,7 @@ typedef enum {
#define GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS 60
-#define GRPC_COMPUTE_ENGINE_METADATA_HOST "metadata"
+#define GRPC_COMPUTE_ENGINE_METADATA_HOST "metadata.google.internal"
#define GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH \
"/computeMetadata/v1/instance/service-accounts/default/token"
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index 8f321b9911..f526653ffa 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -64,7 +64,7 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
uint8_t security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
@@ -108,7 +108,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
const char *error_details) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
- grpc_transport_stream_op *op = &calld->op;
+ grpc_transport_stream_op_batch *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
reset_auth_metadata_context(&calld->auth_md_context);
@@ -122,8 +122,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED);
} else {
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
- GPR_ASSERT(op->send_initial_metadata != NULL);
- mdb = op->send_initial_metadata;
+ GPR_ASSERT(op->send_initial_metadata);
+ mdb = op->payload->send_initial_metadata.send_initial_metadata;
for (i = 0; i < num_md; i++) {
add_error(&error,
grpc_metadata_batch_add_tail(
@@ -136,7 +136,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
if (error == GRPC_ERROR_NONE) {
grpc_call_next_op(exec_ctx, elem, op);
} else {
- grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
}
}
@@ -172,11 +172,13 @@ void build_auth_metadata_context(grpc_security_connector *sc,
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_client_security_context *ctx =
- (grpc_client_security_context *)op->context[GRPC_CONTEXT_SECURITY].value;
+ (grpc_client_security_context *)op->payload
+ ->context[GRPC_CONTEXT_SECURITY]
+ .value;
grpc_call_credentials *channel_call_creds =
chand->security_connector->request_metadata_creds;
int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL);
@@ -191,7 +193,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
calld->creds = grpc_composite_call_credentials_create(channel_call_creds,
ctx->creds, NULL);
if (calld->creds == NULL) {
- grpc_transport_stream_op_finish_with_failure(
+ grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -242,7 +244,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
that is being sent or received. */
static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("auth_start_transport_op", 0);
/* grab pointers to our data from the call element */
@@ -251,23 +253,25 @@ static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *l;
grpc_client_security_context *sec_ctx = NULL;
- if (calld->security_context_set == 0 && op->cancel_error == GRPC_ERROR_NONE) {
+ if (calld->security_context_set == 0 && !op->cancel_stream) {
calld->security_context_set = 1;
- GPR_ASSERT(op->context);
- if (op->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- op->context[GRPC_CONTEXT_SECURITY].value =
+ GPR_ASSERT(op->payload->context != NULL);
+ if (op->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ op->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create();
- op->context[GRPC_CONTEXT_SECURITY].destroy =
+ op->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
- sec_ctx = op->context[GRPC_CONTEXT_SECURITY].value;
+ sec_ctx = op->payload->context[GRPC_CONTEXT_SECURITY].value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
sec_ctx->auth_context =
GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
- if (op->send_initial_metadata != NULL) {
- for (l = op->send_initial_metadata->list.head; l != NULL; l = l->next) {
+ if (op->send_initial_metadata) {
+ for (l = op->payload->send_initial_metadata.send_initial_metadata->list
+ .head;
+ l != NULL; l = l->next) {
grpc_mdelem md = l->md;
/* Pointer comparison is OK for md_elems created from the same context.
*/
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 2b51706161..dbe3263f92 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -448,14 +448,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_handshaker_factory *handshaker_factory;
+ tsi_ssl_server_handshaker_factory *handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@@ -464,7 +464,7 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
(grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
if (c->handshaker_factory != NULL) {
- tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
}
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
@@ -476,26 +476,11 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
}
gpr_free(sc);
}
-static grpc_security_status ssl_create_handshaker(
- tsi_ssl_handshaker_factory *handshaker_factory, bool is_client,
- const char *peer_name, tsi_handshaker **handshaker) {
- tsi_result result = TSI_OK;
- if (handshaker_factory == NULL) return GRPC_SECURITY_ERROR;
- result = tsi_ssl_handshaker_factory_create_handshaker(
- handshaker_factory, is_client ? peer_name : NULL, handshaker);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
- tsi_result_to_string(result));
- return GRPC_SECURITY_ERROR;
- }
- return GRPC_SECURITY_OK;
-}
-
static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
grpc_handshake_manager *handshake_mgr) {
@@ -503,11 +488,17 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
(grpc_ssl_channel_security_connector *)sc;
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
- ssl_create_handshaker(c->handshaker_factory, true /* is_client */,
- c->overridden_target_name != NULL
- ? c->overridden_target_name
- : c->target_name,
- &tsi_hs);
+ tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
+ c->handshaker_factory,
+ c->overridden_target_name != NULL ? c->overridden_target_name
+ : c->target_name,
+ &tsi_hs);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
+ tsi_result_to_string(result));
+ return;
+ }
+
// Create handshakers.
grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create(
exec_ctx, tsi_hs, &sc->base));
@@ -520,8 +511,14 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
(grpc_ssl_server_security_connector *)sc;
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
- ssl_create_handshaker(c->handshaker_factory, false /* is_client */,
- NULL /* peer_name */, &tsi_hs);
+ tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
+ c->handshaker_factory, &tsi_hs);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
+ tsi_result_to_string(result));
+ return;
+ }
+
// Create handshakers.
grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create(
exec_ctx, tsi_hs, &sc->base));
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 2f39327670..509b4b556d 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -287,12 +287,11 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
if (num_left_overs > 0) {
/* Put the leftovers in our buffer (ownership transfered). */
if (has_left_overs_in_current_slice) {
- grpc_slice_buffer_add(
- &h->left_overs,
- grpc_slice_split_tail(&h->args->read_buffer->slices[i],
- consumed_slice_size));
+ grpc_slice tail = grpc_slice_split_tail(&h->args->read_buffer->slices[i],
+ consumed_slice_size);
+ grpc_slice_buffer_add(&h->left_overs, tail);
/* split_tail above increments refcount. */
- grpc_slice_unref_internal(exec_ctx, h->args->read_buffer->slices[i]);
+ grpc_slice_unref_internal(exec_ctx, tail);
}
grpc_slice_buffer_addn(
&h->left_overs, &h->args->read_buffer->slices[i + 1],
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 3cf0632220..1aca76f9e8 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -49,7 +49,7 @@ typedef struct call_data {
up-call on transport_op, and remember to call our on_done_recv member after
handling it. */
grpc_closure auth_on_recv;
- grpc_transport_stream_op *transport_op;
+ grpc_transport_stream_op_batch *transport_op;
grpc_metadata_array md;
const grpc_metadata *consumed_md;
size_t num_consumed_md;
@@ -138,12 +138,11 @@ static void on_md_processing_done(
error_details = error_details != NULL
? error_details
: "Authentication metadata processing failed.";
- calld->transport_op->send_initial_metadata = NULL;
- if (calld->transport_op->send_message != NULL) {
- grpc_byte_stream_destroy(&exec_ctx, calld->transport_op->send_message);
- calld->transport_op->send_message = NULL;
+ if (calld->transport_op->send_message) {
+ grpc_byte_stream_destroy(
+ &exec_ctx, calld->transport_op->payload->send_message.send_message);
+ calld->transport_op->payload->send_message.send_message = NULL;
}
- calld->transport_op->send_trailing_metadata = NULL;
grpc_closure_sched(
&exec_ctx, calld->on_done_recv,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
@@ -171,14 +170,17 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
- if (op->recv_initial_metadata != NULL) {
+ if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
- calld->recv_initial_metadata = op->recv_initial_metadata;
- calld->on_done_recv = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = &calld->auth_on_recv;
+ calld->recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata;
+ calld->on_done_recv =
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready;
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &calld->auth_on_recv;
calld->transport_op = op;
}
}
@@ -190,7 +192,7 @@ static void set_recv_ops_md_callbacks(grpc_call_element *elem,
that is being sent or received. */
static void auth_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
set_recv_ops_md_callbacks(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
diff --git a/src/core/lib/slice/slice_buffer.c b/src/core/lib/slice/slice_buffer.c
index 9176dc8a42..c96b9c3b28 100644
--- a/src/core/lib/slice/slice_buffer.c
+++ b/src/core/lib/slice/slice_buffer.c
@@ -46,27 +46,29 @@
#define GROW(x) (3 * (x) / 2)
static void maybe_embiggen(grpc_slice_buffer *sb) {
- if (sb->base_slices != sb->slices) {
- memmove(sb->base_slices, sb->slices, sb->count * sizeof(grpc_slice));
- sb->slices = sb->base_slices;
- }
-
/* How far away from sb->base_slices is sb->slices pointer */
size_t slice_offset = (size_t)(sb->slices - sb->base_slices);
size_t slice_count = sb->count + slice_offset;
if (slice_count == sb->capacity) {
- sb->capacity = GROW(sb->capacity);
- GPR_ASSERT(sb->capacity > slice_count);
- if (sb->base_slices == sb->inlined) {
- sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
- memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
+ if (sb->base_slices != sb->slices) {
+ /* Make room by moving elements if there's still space unused */
+ memmove(sb->base_slices, sb->slices, sb->count * sizeof(grpc_slice));
+ sb->slices = sb->base_slices;
} else {
- sb->base_slices =
- gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
- }
+ /* Allocate more memory if no more space is available */
+ sb->capacity = GROW(sb->capacity);
+ GPR_ASSERT(sb->capacity > slice_count);
+ if (sb->base_slices == sb->inlined) {
+ sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
+ } else {
+ sb->base_slices =
+ gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
+ }
- sb->slices = sb->base_slices + slice_offset;
+ sb->slices = sb->base_slices + slice_offset;
+ }
}
}
diff --git a/src/core/lib/support/cpu_linux.c b/src/core/lib/support/cpu_linux.c
index d6f7e7d3da..1e50f59823 100644
--- a/src/core/lib/support/cpu_linux.c
+++ b/src/core/lib/support/cpu_linux.c
@@ -67,12 +67,17 @@ unsigned gpr_cpu_num_cores(void) {
}
unsigned gpr_cpu_current_cpu(void) {
+#ifdef __GLIBC__
int cpu = sched_getcpu();
if (cpu < 0) {
gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));
return 0;
}
return (unsigned)cpu;
+#else
+ // sched_getcpu() is undefined on musl
+ return 0;
+#endif
}
#endif /* GPR_CPU_LINUX */
diff --git a/src/core/lib/support/wrap_memcpy.c b/src/core/lib/support/wrap_memcpy.c
index 15c289f7b8..050cc6db5e 100644
--- a/src/core/lib/support/wrap_memcpy.c
+++ b/src/core/lib/support/wrap_memcpy.c
@@ -40,7 +40,7 @@
*/
#ifdef __linux__
-#ifdef __x86_64__
+#if defined(__x86_64__) && defined(__GNU_LIBRARY__)
__asm__(".symver memcpy,memcpy@GLIBC_2.2.5");
void *__wrap_memcpy(void *destination, const void *source, size_t num) {
return memcpy(destination, source, num);
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index a9317a4694..97d50a91be 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -117,37 +117,56 @@ static received_status unpack_received_status(gpr_atm atm) {
typedef struct batch_control {
grpc_call *call;
- grpc_cq_completion cq_completion;
+ /* Share memory for cq_completion and notify_tag as they are never needed
+ simultaneously. Each byte used in this data structure count as six bytes
+ per call, so any savings we can make are worthwhile,
+
+ We use notify_tag to determine whether or not to send notification to the
+ completion queue. Once we've made that determination, we can reuse the
+ memory for cq_completion. */
+ union {
+ grpc_cq_completion cq_completion;
+ struct {
+ /* Any given op indicates completion by either (a) calling a closure or
+ (b) sending a notification on the call's completion queue. If
+ \a is_closure is true, \a tag indicates a closure to be invoked;
+ otherwise, \a tag indicates the tag to be used in the notification to
+ be sent to the completion queue. */
+ void *tag;
+ bool is_closure;
+ } notify_tag;
+ } completion_data;
grpc_closure finish_batch;
- void *notify_tag;
gpr_refcount steps_to_complete;
grpc_error *errors[MAX_ERRORS_PER_BATCH];
gpr_atm num_errors;
- uint8_t send_initial_metadata;
- uint8_t send_message;
- uint8_t send_final_op;
- uint8_t recv_initial_metadata;
- uint8_t recv_message;
- uint8_t recv_final_op;
- uint8_t is_notify_tag_closure;
-
- /* TODO(ctiller): now that this is inlined, figure out how much of the above
- state can be eliminated */
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
} batch_control;
+typedef struct {
+ gpr_mu child_list_mu;
+ grpc_call *first_child;
+} parent_call;
+
+typedef struct {
+ grpc_call *parent;
+ /** siblings: children of the same parent form a list, and this list is
+ protected under
+ parent->mu */
+ grpc_call *sibling_next;
+ grpc_call *sibling_prev;
+} child_call;
+
struct grpc_call {
gpr_arena *arena;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
- grpc_call *parent;
- grpc_call *first_child;
gpr_timespec start_time;
- /* protects first_child, and child next/prev links */
- gpr_mu child_list_mu;
+ /* parent_call* */ gpr_atm parent_call_atm;
+ child_call *child_call;
/* client or server call */
bool is_client;
@@ -168,7 +187,8 @@ struct grpc_call {
/* have we received initial metadata */
bool has_initial_md_been_received;
- batch_control active_batches[MAX_CONCURRENT_BATCHES];
+ batch_control *active_batches[MAX_CONCURRENT_BATCHES];
+ grpc_transport_stream_op_batch_payload stream_op_payload;
/* first idx: is_receiving, second idx: is_trailing */
grpc_metadata_batch metadata_batch[2][2];
@@ -198,12 +218,6 @@ struct grpc_call {
int send_extra_metadata_count;
gpr_timespec send_deadline;
- /** siblings: children of the same parent form a list, and this list is
- protected under
- parent->mu */
- grpc_call *sibling_next;
- grpc_call *sibling_prev;
-
grpc_slice_buffer_stream sending_stream;
grpc_byte_stream *receiving_stream;
@@ -239,7 +253,7 @@ int grpc_call_error_trace = 0;
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op *op);
+ grpc_transport_stream_op_batch *op);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status,
const char *description);
@@ -268,6 +282,23 @@ static void add_init_error(grpc_error **composite, grpc_error *new) {
*composite = grpc_error_add_child(*composite, new);
}
+static parent_call *get_or_create_parent_call(grpc_call *call) {
+ parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+ if (p == NULL) {
+ p = gpr_arena_alloc(call->arena, sizeof(*p));
+ gpr_mu_init(&p->child_list_mu);
+ if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
+ gpr_mu_destroy(&p->child_list_mu);
+ p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+ }
+ }
+ return p;
+}
+
+static parent_call *get_parent_call(grpc_call *call) {
+ return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+}
+
grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
const grpc_call_create_args *args,
grpc_call **out_call) {
@@ -283,14 +314,13 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
sizeof(grpc_call) + channel_stack->call_stack_size);
call->arena = arena;
*out_call = call;
- gpr_mu_init(&call->child_list_mu);
call->channel = args->channel;
call->cq = args->cq;
- call->parent = args->parent_call;
call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
call->is_client = args->server_transport_data == NULL;
+ call->stream_op_payload.context = call->context;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
GPR_ASSERT(args->add_initial_metadata_count <
@@ -317,11 +347,17 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
if (args->parent_call != NULL) {
+ child_call *cc = call->child_call =
+ gpr_arena_alloc(arena, sizeof(child_call));
+ call->child_call->parent = args->parent_call;
+
GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent_call->is_client);
- gpr_mu_lock(&args->parent_call->child_list_mu);
+ parent_call *pc = get_or_create_parent_call(args->parent_call);
+
+ gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
@@ -355,17 +391,17 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
}
- if (args->parent_call->first_child == NULL) {
- args->parent_call->first_child = call;
- call->sibling_next = call->sibling_prev = call;
+ if (pc->first_child == NULL) {
+ pc->first_child = call;
+ cc->sibling_next = cc->sibling_prev = call;
} else {
- call->sibling_next = args->parent_call->first_child;
- call->sibling_prev = args->parent_call->first_child->sibling_prev;
- call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
- call;
+ cc->sibling_next = pc->first_child;
+ cc->sibling_prev = pc->first_child->child_call->sibling_prev;
+ cc->sibling_next->child_call->sibling_prev =
+ cc->sibling_prev->child_call->sibling_next = call;
}
- gpr_mu_unlock(&args->parent_call->child_list_mu);
+ gpr_mu_unlock(&pc->child_list_mu);
}
call->send_deadline = send_deadline;
@@ -460,7 +496,10 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
- gpr_mu_destroy(&c->child_list_mu);
+ parent_call *pc = get_parent_call(c);
+ if (pc != NULL) {
+ gpr_mu_destroy(&pc->child_list_mu);
+ }
for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md);
}
@@ -490,31 +529,31 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
}
void grpc_call_destroy(grpc_call *c) {
- int cancel;
- grpc_call *parent = c->parent;
+ child_call *cc = c->child_call;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_destroy", 0);
GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c));
- if (parent) {
- gpr_mu_lock(&parent->child_list_mu);
- if (c == parent->first_child) {
- parent->first_child = c->sibling_next;
- if (c == parent->first_child) {
- parent->first_child = NULL;
+ if (cc) {
+ parent_call *pc = get_parent_call(cc->parent);
+ gpr_mu_lock(&pc->child_list_mu);
+ if (c == pc->first_child) {
+ pc->first_child = cc->sibling_next;
+ if (c == pc->first_child) {
+ pc->first_child = NULL;
}
}
- c->sibling_prev->sibling_next = c->sibling_next;
- c->sibling_next->sibling_prev = c->sibling_prev;
- gpr_mu_unlock(&parent->child_list_mu);
- GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child");
+ cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
+ cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
+ gpr_mu_unlock(&pc->child_list_mu);
+ GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
GPR_ASSERT(!c->destroy_called);
c->destroy_called = 1;
- cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) &&
- !gpr_atm_acq_load(&c->received_final_op_atm);
+ bool cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) != 0 &&
+ gpr_atm_acq_load(&c->received_final_op_atm) == 0;
if (cancel) {
cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
@@ -535,13 +574,12 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
}
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
grpc_call_element *elem;
GPR_TIMER_BEGIN("execute_op", 0);
elem = CALL_ELEM_FROM_CALL(call, 0);
- op->context = call->context;
- elem->filter->start_transport_stream_op(exec_ctx, elem, op);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
GPR_TIMER_END("execute_op", 0);
}
@@ -594,9 +632,10 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- grpc_transport_stream_op *op = grpc_make_transport_stream_op(
+ grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
grpc_closure_create(done_termination, c, grpc_schedule_on_exec_ctx));
- op->cancel_error = error;
+ op->cancel_stream = true;
+ op->payload->cancel_stream.cancel_error = error;
execute_op(exec_ctx, c, op);
}
@@ -1025,16 +1064,17 @@ static batch_control *allocate_batch_control(grpc_call *call,
const grpc_op *ops,
size_t num_ops) {
int slot = batch_slot_for_op(ops[0].op);
- for (size_t i = 1; i < num_ops; i++) {
- int op_slot = batch_slot_for_op(ops[i].op);
- slot = GPR_MIN(slot, op_slot);
+ batch_control **pslot = &call->active_batches[slot];
+ if (*pslot == NULL) {
+ *pslot = gpr_arena_alloc(call->arena, sizeof(batch_control));
}
- batch_control *bctl = &call->active_batches[slot];
+ batch_control *bctl = *pslot;
if (bctl->call != NULL) {
return NULL;
}
memset(bctl, 0, sizeof(*bctl));
bctl->call = call;
+ bctl->op.payload = &call->stream_op_payload;
return bctl;
}
@@ -1069,46 +1109,49 @@ static grpc_error *consolidate_batch_errors(batch_control *bctl) {
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
- grpc_call *child_call;
grpc_call *next_child_call;
grpc_call *call = bctl->call;
grpc_error *error = consolidate_batch_errors(bctl);
- if (bctl->send_initial_metadata) {
+ if (bctl->op.send_initial_metadata) {
grpc_metadata_batch_destroy(
exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
}
- if (bctl->send_message) {
+ if (bctl->op.send_message) {
call->sending_message = false;
}
- if (bctl->send_final_op) {
+ if (bctl->op.send_trailing_metadata) {
grpc_metadata_batch_destroy(
exec_ctx,
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
}
- if (bctl->recv_final_op) {
+ if (bctl->op.recv_trailing_metadata) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
recv_trailing_filter(exec_ctx, call, md);
/* propagate cancellation to any interested children */
gpr_atm_rel_store(&call->received_final_op_atm, 1);
- gpr_mu_lock(&call->child_list_mu);
- child_call = call->first_child;
- if (child_call != NULL) {
- do {
- next_child_call = child_call->sibling_next;
- if (child_call->cancellation_is_inherited) {
- GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
- cancel_with_error(exec_ctx, child_call, STATUS_FROM_API_OVERRIDE,
- GRPC_ERROR_CANCELLED);
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
- }
- child_call = next_child_call;
- } while (child_call != call->first_child);
+ parent_call *pc = get_parent_call(call);
+ if (pc != NULL) {
+ grpc_call *child;
+ gpr_mu_lock(&pc->child_list_mu);
+ child = pc->first_child;
+ if (child != NULL) {
+ do {
+ next_child_call = child->child_call->sibling_next;
+ if (child->cancellation_is_inherited) {
+ GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
+ cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
+ GRPC_ERROR_CANCELLED);
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, child, "propagate_cancel");
+ }
+ child = next_child_call;
+ } while (child != pc->first_child);
+ }
+ gpr_mu_unlock(&pc->child_list_mu);
}
- gpr_mu_unlock(&call->child_list_mu);
if (call->is_client) {
get_final_status(call, set_status_value_directly,
@@ -1123,15 +1166,16 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_NONE;
}
- if (bctl->is_notify_tag_closure) {
+ if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- grpc_closure_run(exec_ctx, bctl->notify_tag, error);
+ grpc_closure_run(exec_ctx, bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
- grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, error,
- finish_batch_completion, bctl, &bctl->cq_completion);
+ grpc_cq_end_op(
+ exec_ctx, bctl->call->cq, bctl->completion_data.notify_tag.tag, error,
+ finish_batch_completion, bctl, &bctl->completion_data.cq_completion);
}
}
@@ -1374,11 +1418,13 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (bctl == NULL) {
return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
}
- bctl->notify_tag = notify_tag;
- bctl->is_notify_tag_closure = (uint8_t)(is_notify_tag_closure != 0);
+ bctl->completion_data.notify_tag.tag = notify_tag;
+ bctl->completion_data.notify_tag.is_closure =
+ (uint8_t)(is_notify_tag_closure != 0);
- grpc_transport_stream_op *stream_op = &bctl->op;
- memset(stream_op, 0, sizeof(*stream_op));
+ grpc_transport_stream_op_batch *stream_op = &bctl->op;
+ grpc_transport_stream_op_batch_payload *stream_op_payload =
+ &call->stream_op_payload;
stream_op->covered_by_poller = true;
/* rewrite batch ops into a transport op */
@@ -1432,8 +1478,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
- bctl->send_initial_metadata = 1;
- call->sent_initial_metadata = 1;
+ stream_op->send_initial_metadata = true;
+ call->sent_initial_metadata = true;
if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client,
@@ -1443,9 +1489,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
/* TODO(ctiller): just make these the same variable? */
call->metadata_batch[0][0].deadline = call->send_deadline;
- stream_op->send_initial_metadata =
+ stream_op_payload->send_initial_metadata.send_initial_metadata =
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
- stream_op->send_initial_metadata_flags = op->flags;
+ stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
+ op->flags;
break;
case GRPC_OP_SEND_MESSAGE:
if (!are_write_flags_valid(op->flags)) {
@@ -1460,8 +1507,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- bctl->send_message = 1;
- call->sending_message = 1;
+ stream_op->send_message = true;
+ call->sending_message = true;
grpc_slice_buffer_stream_init(
&call->sending_stream,
&op->data.send_message.send_message->data.raw.slice_buffer,
@@ -1473,7 +1520,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
GRPC_COMPRESS_NONE) {
call->sending_stream.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
- stream_op->send_message = &call->sending_stream.base;
+ stream_op_payload->send_message.send_message =
+ &call->sending_stream.base;
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
/* Flag validation: currently allow no flags */
@@ -1489,9 +1537,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- bctl->send_final_op = 1;
- call->sent_final_op = 1;
- stream_op->send_trailing_metadata =
+ stream_op->send_trailing_metadata = true;
+ call->sent_final_op = true;
+ stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_SEND_STATUS_FROM_SERVER:
@@ -1513,8 +1561,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
- bctl->send_final_op = 1;
- call->sent_final_op = 1;
+ stream_op->send_trailing_metadata = true;
+ call->sent_final_op = true;
GPR_ASSERT(call->send_extra_metadata_count == 0);
call->send_extra_metadata_count = 1;
call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
@@ -1553,7 +1601,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
- stream_op->send_trailing_metadata =
+ stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
case GRPC_OP_RECV_INITIAL_METADATA:
@@ -1570,16 +1618,16 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
from server.c. In that case, it's coming from accept_stream, and in
that case we're not necessarily covered by a poller. */
stream_op->covered_by_poller = call->is_client;
- call->received_initial_metadata = 1;
+ call->received_initial_metadata = true;
call->buffered_metadata[0] =
op->data.recv_initial_metadata.recv_initial_metadata;
grpc_closure_init(&call->receiving_initial_metadata_ready,
receiving_initial_metadata_ready, bctl,
grpc_schedule_on_exec_ctx);
- bctl->recv_initial_metadata = 1;
- stream_op->recv_initial_metadata =
+ stream_op->recv_initial_metadata = true;
+ stream_op_payload->recv_initial_metadata.recv_initial_metadata =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
- stream_op->recv_initial_metadata_ready =
+ stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready;
num_completion_callbacks_needed++;
break;
@@ -1593,13 +1641,14 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- call->receiving_message = 1;
- bctl->recv_message = 1;
+ call->receiving_message = true;
+ stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
- stream_op->recv_message = &call->receiving_stream;
+ stream_op_payload->recv_message.recv_message = &call->receiving_stream;
grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
bctl, grpc_schedule_on_exec_ctx);
- stream_op->recv_message_ready = &call->receiving_stream_ready;
+ stream_op_payload->recv_message.recv_message_ready =
+ &call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
case GRPC_OP_RECV_STATUS_ON_CLIENT:
@@ -1616,16 +1665,17 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- call->requested_final_op = 1;
+ call->requested_final_op = true;
call->buffered_metadata[1] =
op->data.recv_status_on_client.trailing_metadata;
call->final_op.client.status = op->data.recv_status_on_client.status;
call->final_op.client.status_details =
op->data.recv_status_on_client.status_details;
- bctl->recv_final_op = 1;
- stream_op->recv_trailing_metadata =
+ stream_op->recv_trailing_metadata = true;
+ stream_op->collect_stats = true;
+ stream_op_payload->recv_trailing_metadata.recv_trailing_metadata =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
- stream_op->collect_stats =
+ stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
@@ -1642,13 +1692,14 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- call->requested_final_op = 1;
+ call->requested_final_op = true;
call->final_op.server.cancelled =
op->data.recv_close_on_server.cancelled;
- bctl->recv_final_op = 1;
- stream_op->recv_trailing_metadata =
+ stream_op->recv_trailing_metadata = true;
+ stream_op->collect_stats = true;
+ stream_op_payload->recv_trailing_metadata.recv_trailing_metadata =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
- stream_op->collect_stats =
+ stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
}
@@ -1660,7 +1711,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
- stream_op->context = call->context;
grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
grpc_schedule_on_exec_ctx);
stream_op->on_complete = &bctl->finish_batch;
@@ -1674,26 +1724,26 @@ done:
done_with_error:
/* reverse any mutations that occured */
- if (bctl->send_initial_metadata) {
- call->sent_initial_metadata = 0;
+ if (stream_op->send_initial_metadata) {
+ call->sent_initial_metadata = false;
grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][0]);
}
- if (bctl->send_message) {
- call->sending_message = 0;
+ if (stream_op->send_message) {
+ call->sending_message = false;
grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base);
}
- if (bctl->send_final_op) {
- call->sent_final_op = 0;
+ if (stream_op->send_trailing_metadata) {
+ call->sent_final_op = false;
grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][1]);
}
- if (bctl->recv_initial_metadata) {
- call->received_initial_metadata = 0;
+ if (stream_op->recv_initial_metadata) {
+ call->received_initial_metadata = false;
}
- if (bctl->recv_message) {
- call->receiving_message = 0;
+ if (stream_op->recv_message) {
+ call->receiving_message = false;
}
- if (bctl->recv_final_op) {
- call->requested_final_op = 0;
+ if (stream_op->recv_trailing_metadata) {
+ call->requested_final_op = false;
}
goto done;
}
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index b4bfb92042..b3ba826bbc 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -150,17 +150,20 @@ grpc_channel *grpc_channel_create_with_builder(
} else if (0 == strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
channel->compression_options.default_level.is_set = true;
- GPR_ASSERT(args->args[i].value.integer >= 0 &&
- args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT);
channel->compression_options.default_level.level =
- (grpc_compression_level)args->args[i].value.integer;
+ (grpc_compression_level)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){GRPC_COMPRESS_LEVEL_NONE,
+ GRPC_COMPRESS_LEVEL_NONE,
+ GRPC_COMPRESS_LEVEL_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
channel->compression_options.default_algorithm.is_set = true;
- GPR_ASSERT(args->args[i].value.integer >= 0 &&
- args->args[i].value.integer < GRPC_COMPRESS_ALGORITHMS_COUNT);
channel->compression_options.default_algorithm.algorithm =
- (grpc_compression_algorithm)args->args[i].value.integer;
+ (grpc_compression_algorithm)grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
+ GRPC_COMPRESS_ALGORITHMS_COUNT - 1});
} else if (0 ==
strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index b4594817e4..3273addf1d 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -345,7 +345,6 @@ static void dump_pending_tags(grpc_completion_queue *cc) {}
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
- grpc_pollset_worker *worker = NULL;
gpr_timespec now;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -426,8 +425,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_mu_lock(cc->mu);
continue;
} else {
- grpc_error *err = grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc),
- &worker, now, iteration_deadline);
+ grpc_error *err = grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), NULL,
+ now, iteration_deadline);
if (err != GRPC_ERROR_NONE) {
gpr_mu_unlock(cc->mu);
const char *msg = grpc_error_string(err);
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index b46ecac18d..91bd014a0e 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -47,7 +47,6 @@
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/channel/http_client_filter.h"
#include "src/core/lib/channel/http_server_filter.h"
-#include "src/core/lib/channel/max_age_filter.h"
#include "src/core/lib/channel/message_size_filter.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h"
@@ -115,9 +114,6 @@ static void register_builtin_channel_init() {
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, prepend_filter,
(void *)&grpc_server_deadline_filter);
grpc_channel_init_register_stage(
- GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, prepend_filter,
- (void *)&grpc_max_age_filter);
- grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
prepend_filter, (void *)&grpc_message_size_filter);
grpc_channel_init_register_stage(
diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c
index 921ef87e36..746134676f 100644
--- a/src/core/lib/surface/init_secure.c
+++ b/src/core/lib/surface/init_secure.c
@@ -31,6 +31,8 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/surface/init.h"
#include <limits.h>
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 0c408aa288..82428c42c0 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -80,16 +80,18 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
-static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+static void lame_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- if (op->recv_initial_metadata != NULL) {
- fill_metadata(exec_ctx, elem, op->recv_initial_metadata);
- } else if (op->recv_trailing_metadata != NULL) {
- fill_metadata(exec_ctx, elem, op->recv_trailing_metadata);
+ if (op->recv_initial_metadata) {
+ fill_metadata(exec_ctx, elem,
+ op->payload->recv_initial_metadata.recv_initial_metadata);
+ } else if (op->recv_trailing_metadata) {
+ fill_metadata(exec_ctx, elem,
+ op->payload->recv_trailing_metadata.recv_trailing_metadata);
}
- grpc_transport_stream_op_finish_with_failure(
+ grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, op,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
@@ -148,7 +150,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_lame_filter = {
- lame_start_transport_stream_op,
+ lame_start_transport_stream_op_batch,
lame_start_transport_op,
sizeof(call_data),
init_call_elem,
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index a123c9ca43..191ee75252 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -154,8 +154,7 @@ struct call_data {
grpc_completion_queue *cq_new;
grpc_metadata_batch *recv_initial_metadata;
- bool recv_idempotent_request;
- bool recv_cacheable_request;
+ uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
request_matcher *request_matcher;
@@ -498,13 +497,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
rc->data.batch.details->deadline = calld->deadline;
- rc->data.batch.details->flags =
- (calld->recv_idempotent_request
- ? GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST
- : 0) |
- (calld->recv_cacheable_request
- ? GRPC_INITIAL_METADATA_CACHEABLE_REQUEST
- : 0);
+ rc->data.batch.details->flags = calld->recv_initial_metadata_flags;
break;
case REGISTERED_CALL:
*rc->data.registered.deadline = calld->deadline;
@@ -632,7 +625,8 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
if (!grpc_slice_eq(rm->host, calld->host)) continue;
if (!grpc_slice_eq(rm->method, calld->path)) continue;
if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
- !calld->recv_idempotent_request) {
+ 0 == (calld->recv_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
@@ -649,7 +643,8 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
if (rm->has_host) continue;
if (!grpc_slice_eq(rm->method, calld->path)) continue;
if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
- !calld->recv_idempotent_request) {
+ 0 == (calld->recv_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
@@ -781,22 +776,25 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
}
static void server_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
- if (op->recv_initial_metadata != NULL) {
- GPR_ASSERT(op->recv_idempotent_request == NULL);
- calld->recv_initial_metadata = op->recv_initial_metadata;
- calld->on_done_recv_initial_metadata = op->recv_initial_metadata_ready;
- op->recv_initial_metadata_ready = &calld->server_on_recv_initial_metadata;
- op->recv_idempotent_request = &calld->recv_idempotent_request;
- op->recv_cacheable_request = &calld->recv_cacheable_request;
+ if (op->recv_initial_metadata) {
+ GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL);
+ calld->recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata;
+ calld->on_done_recv_initial_metadata =
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready;
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready =
+ &calld->server_on_recv_initial_metadata;
+ op->payload->recv_initial_metadata.recv_flags =
+ &calld->recv_initial_metadata_flags;
}
}
-static void server_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op *op) {
+static void server_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
@@ -960,7 +958,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
const grpc_channel_filter grpc_server_top_filter = {
- server_start_transport_stream_op,
+ server_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index d56cb31ee0..82c4e004b7 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -170,7 +170,7 @@ int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
grpc_stream *stream,
- grpc_transport_stream_op *op) {
+ grpc_transport_stream_op_batch *op) {
transport->vtable->perform_stream_op(exec_ctx, transport, stream, op);
}
@@ -213,14 +213,23 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
return transport->vtable->get_endpoint(exec_ctx, transport);
}
-void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
- grpc_transport_stream_op *op,
- grpc_error *error) {
- grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error));
- grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
- GRPC_ERROR_REF(error));
+void grpc_transport_stream_op_batch_finish_with_failure(
+ grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
+ grpc_error *error) {
+ if (op->recv_message) {
+ grpc_closure_sched(exec_ctx, op->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_REF(error));
+ }
+ if (op->recv_initial_metadata) {
+ grpc_closure_sched(
+ exec_ctx,
+ op->payload->recv_initial_metadata.recv_initial_metadata_ready,
+ GRPC_ERROR_REF(error));
+ }
grpc_closure_sched(exec_ctx, op->on_complete, error);
- GRPC_ERROR_UNREF(op->cancel_error);
+ if (op->cancel_stream) {
+ GRPC_ERROR_UNREF(op->payload->cancel_stream.cancel_error);
+ }
}
typedef struct {
@@ -249,7 +258,8 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
typedef struct {
grpc_closure outer_on_complete;
grpc_closure *inner_on_complete;
- grpc_transport_stream_op op;
+ grpc_transport_stream_op_batch op;
+ grpc_transport_stream_op_batch_payload payload;
} made_transport_stream_op;
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
@@ -260,13 +270,13 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_run(exec_ctx, c, GRPC_ERROR_REF(error));
}
-grpc_transport_stream_op *grpc_make_transport_stream_op(
+grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
- made_transport_stream_op *op = gpr_malloc(sizeof(*op));
+ made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
+ op->op.payload = &op->payload;
grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
- memset(&op->op, 0, sizeof(op->op));
op->op.on_complete = &op->outer_on_complete;
return &op->op;
}
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 950b18aeda..93369cc689 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -109,55 +109,98 @@ void grpc_transport_move_stats(grpc_transport_stream_stats *from,
grpc_transport_stream_stats *to);
typedef struct {
+ void *extra_arg;
grpc_closure closure;
- void *args[2];
-} grpc_transport_private_op_data;
+} grpc_handler_private_op_data;
+
+typedef struct grpc_transport_stream_op_batch_payload
+ grpc_transport_stream_op_batch_payload;
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
-typedef struct grpc_transport_stream_op {
+typedef struct grpc_transport_stream_op_batch {
/** Should be enqueued when all requested operations (excluding recv_message
and recv_initial_metadata which have their own closures) in a given batch
have been completed. */
grpc_closure *on_complete;
+ /** Values for the stream op (fields set are determined by flags above) */
+ grpc_transport_stream_op_batch_payload *payload;
+
/** Is the completion of this op covered by a poller (if false: the op should
complete independently of some pollset being polled) */
- bool covered_by_poller;
+ bool covered_by_poller : 1;
- /** Send initial metadata to the peer, from the provided metadata batch.
- idempotent_request MUST be set if this is non-null */
- grpc_metadata_batch *send_initial_metadata;
- /** Iff send_initial_metadata != NULL, flags associated with
- send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
- uint32_t send_initial_metadata_flags;
+ /** Send initial metadata to the peer, from the provided metadata batch. */
+ bool send_initial_metadata : 1;
/** Send trailing metadata to the peer, from the provided metadata batch. */
- grpc_metadata_batch *send_trailing_metadata;
+ bool send_trailing_metadata : 1;
/** Send message data to the peer, from the provided byte stream. */
- grpc_byte_stream *send_message;
+ bool send_message : 1;
/** Receive initial metadata from the stream, into provided metadata batch. */
- grpc_metadata_batch *recv_initial_metadata;
- bool *recv_idempotent_request;
- bool *recv_cacheable_request;
- /** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure *recv_initial_metadata_ready;
+ bool recv_initial_metadata : 1;
/** Receive message data from the stream, into provided byte stream. */
- grpc_byte_stream **recv_message;
- /** Should be enqueued when one message is ready to be processed. */
- grpc_closure *recv_message_ready;
+ bool recv_message : 1;
/** Receive trailing metadata from the stream, into provided metadata batch.
*/
- grpc_metadata_batch *recv_trailing_metadata;
+ bool recv_trailing_metadata : 1;
/** Collect any stats into provided buffer, zero internal stat counters */
- grpc_transport_stream_stats *collect_stats;
+ bool collect_stats : 1;
+
+ /** Cancel this stream with the provided error */
+ bool cancel_stream : 1;
+
+ /***************************************************************************
+ * remaining fields are initialized and used at the discretion of the
+ * current handler of the op */
- /** If != GRPC_ERROR_NONE, forcefully close this stream.
+ grpc_handler_private_op_data handler_private;
+} grpc_transport_stream_op_batch;
+
+struct grpc_transport_stream_op_batch_payload {
+ struct {
+ grpc_metadata_batch *send_initial_metadata;
+ /** Iff send_initial_metadata != NULL, flags associated with
+ send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
+ uint32_t send_initial_metadata_flags;
+ } send_initial_metadata;
+
+ struct {
+ grpc_metadata_batch *send_trailing_metadata;
+ } send_trailing_metadata;
+
+ struct {
+ grpc_byte_stream *send_message;
+ } send_message;
+
+ struct {
+ grpc_metadata_batch *recv_initial_metadata;
+ uint32_t *recv_flags;
+ /** Should be enqueued when initial metadata is ready to be processed. */
+ grpc_closure *recv_initial_metadata_ready;
+ } recv_initial_metadata;
+
+ struct {
+ grpc_byte_stream **recv_message;
+ /** Should be enqueued when one message is ready to be processed. */
+ grpc_closure *recv_message_ready;
+ } recv_message;
+
+ struct {
+ grpc_metadata_batch *recv_trailing_metadata;
+ } recv_trailing_metadata;
+
+ struct {
+ grpc_transport_stream_stats *collect_stats;
+ } collect_stats;
+
+ /** Forcefully close this stream.
The HTTP2 semantics should be:
- server side: if cancel_error has GRPC_ERROR_INT_GRPC_STATUS, and
trailing metadata has not been sent, send trailing metadata with status
@@ -167,17 +210,13 @@ typedef struct grpc_transport_stream_op {
convert to a HTTP2 error code using
grpc_chttp2_grpc_status_to_http2_error. Send a RST_STREAM with this
error. */
- grpc_error *cancel_error;
+ struct {
+ grpc_error *cancel_error;
+ } cancel_stream;
/* Indexes correspond to grpc_context_index enum values */
grpc_call_context_element *context;
-
- /***************************************************************************
- * remaining fields are initialized and used at the discretion of the
- * current handler of the op */
-
- grpc_transport_private_op_data handler_private;
-} grpc_transport_stream_op;
+};
/** Transport op: a set of operations to perform on a transport as a whole */
typedef struct grpc_transport_op {
@@ -210,7 +249,7 @@ typedef struct grpc_transport_op {
* remaining fields are initialized and used at the discretion of the
* transport implementation */
- grpc_transport_private_op_data transport_private;
+ grpc_handler_private_op_data handler_private;
} grpc_transport_op;
/* Returns the amount of memory required to store a grpc_stream for this
@@ -250,11 +289,11 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
grpc_stream *stream,
grpc_closure *then_schedule_closure);
-void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
- grpc_transport_stream_op *op,
- grpc_error *error);
+void grpc_transport_stream_op_batch_finish_with_failure(
+ grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
+ grpc_error *error);
-char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
+char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@@ -265,11 +304,12 @@ char *grpc_transport_op_string(grpc_transport_op *op);
transport - the transport on which to initiate the stream
stream - the stream on which to send the operations. This must be
non-NULL and previously initialized by the same transport.
- op - a grpc_transport_stream_op specifying the op to perform */
+ op - a grpc_transport_stream_op_batch specifying the op to perform
+ */
void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
grpc_stream *stream,
- grpc_transport_stream_op *op);
+ grpc_transport_stream_op_batch *op);
void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
@@ -301,9 +341,10 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
-/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
+/* Allocate a grpc_transport_stream_op_batch, and preconfigure the on_consumed
+ closure
to \a on_consumed and then delete the returned transport op */
-grpc_transport_stream_op *grpc_make_transport_stream_op(
+grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_consumed);
#ifdef __cplusplus
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 6f688bf8d2..bbb19a34bd 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -59,7 +59,8 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_perform_stream_op */
void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_transport_stream_op *op);
+ grpc_stream *stream,
+ grpc_transport_stream_op_batch *op);
/* implementation of grpc_transport_perform_op */
void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 28360e3784..3a2a793e01 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -71,7 +71,8 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
}
}
-char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
+char *grpc_transport_stream_op_batch_string(
+ grpc_transport_stream_op_batch *op) {
char *tmp;
char *out;
@@ -81,45 +82,49 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
gpr_strvec_add(
&b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
- if (op->send_initial_metadata != NULL) {
+ if (op->send_initial_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
- put_metadata_list(&b, *op->send_initial_metadata);
+ put_metadata_list(
+ &b, *op->payload->send_initial_metadata.send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
- if (op->send_message != NULL) {
+ if (op->send_message) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
- op->send_message->flags, op->send_message->length);
+ op->payload->send_message.send_message->flags,
+ op->payload->send_message.send_message->length);
gpr_strvec_add(&b, tmp);
}
- if (op->send_trailing_metadata != NULL) {
+ if (op->send_trailing_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
- put_metadata_list(&b, *op->send_trailing_metadata);
+ put_metadata_list(
+ &b, *op->payload->send_trailing_metadata.send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
- if (op->recv_initial_metadata != NULL) {
+ if (op->recv_initial_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
- if (op->recv_message != NULL) {
+ if (op->recv_message) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
- if (op->recv_trailing_metadata != NULL) {
+ if (op->recv_trailing_metadata) {
gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
- if (op->cancel_error != GRPC_ERROR_NONE) {
+ if (op->cancel_stream) {
gpr_strvec_add(&b, gpr_strdup(" "));
- const char *msg = grpc_error_string(op->cancel_error);
+ const char *msg =
+ grpc_error_string(op->payload->cancel_stream.cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
gpr_strvec_add(&b, tmp);
@@ -204,8 +209,9 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem, grpc_transport_stream_op *op) {
- char *str = grpc_transport_stream_op_string(op);
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
+ char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(file, line, severity, "OP[%s:%p]: %s", elem->filter->name, elem, str);
gpr_free(str);
}